8000 BUG: shift operator cycles, fixes #2449 by jaimefrio · Pull Request #7473 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

BUG: shift operator cycles, fixes #2449 #7473

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
BUG: shift operator cycles, fixes #2449
  • Loading branch information
jaimefrio committed Mar 27, 2016
commit 24b2a2d36a7e8356310cd16dbe60abd9d0e682dc
114 changes: 109 additions & 5 deletions numpy/core/src/umath/loops.c.src
Original file line number Diff line number Diff line change
Expand Up @@ -772,6 +772,7 @@ BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
* npy_long, npy_ulong, npy_longlong, npy_ulonglong#
* #ftype = npy_float, npy_float, npy_float, npy_float, npy_double, npy_double,
* npy_double, npy_double, npy_double, npy_double#
* #issigned = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
*/

#define @TYPE@_floor_divide @TYPE@_divide
Expand Down Expand Up @@ -824,15 +825,15 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void

/**begin repeat1
* Arithmetic
* #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor,
* left_shift, right_shift#
* #OP = +, -,*, &, |, ^, <<, >>#
* #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor#
* #OP = +, -,*, &, |, ^#
*/

NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps,
void *NPY_UNUSED(func))
{
if(IS_BINARY_REDUCE) {
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
io1 @OP@= *(@type@ *)ip2;
}
Expand All @@ -845,6 +846,109 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void

/**end repeat1**/

/*
* Arithmetic bit shift operations.
*
* Intel hardware masks bit shift values, so large shifts wrap around
* and can produce surprising results. The special handling ensures that
* behavior is independent of compiler or hardware.
* TODO: We could implement consistent behavior for negative shifts,
* which is undefined in C.
*/

#define LEFT_SHIFT_OP \
do { \
if (NPY_LIKELY(in2 < sizeof(@type@) * 8)) { \
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think there are number of bits macros defined somewhere... yep, in npy_common.h. The have the form NPY_BITSOF_LONGLONG etc.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably already included here.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please, kill the macro ;)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this actually buy speed over ...? ... : ...?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A cleaner version modulo signedness might be

#define LEFT_SHIFT = do {*out = NPY_LIKELY(in2 < NPY_BITSOF_@TYPE@) ? in1 << in2 : 0} while(0)

Although I don't think we actually need the do ... while(0) bit here.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Macro removed in #13739. Kept the if statements because I think they're clearer. Did not use NPY_BITS_OF_LONG and friends because they don't exist for NPY_BITS_OF_ULONG etc.

*out = in1 << in2; \
} \
else { \
*out = 0; \
} \
} while (0)


NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_left_shift(char **args, npy_intp *dimensions, npy_intp *steps,
void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
@type@ ip2_val = *(@type@ *)ip2;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could remove the reduce specialization, I suspect reduce using the shift operators would be rare and BINARY_LOOP_FAST should have adequate performance.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done in #13739


if (NPY_LIKELY(ip2_val < sizeof(@type@) * 8)) {
io1 <<= ip2_val;
}
else {
io1 = 0;
}
}
*((@type@ *)iop1) = io1;
}
else {
BINARY_LOOP_FAST(@type@, @type@, LEFT_SHIFT_OP);
}
}

#undef LEFT_SHIFT_OP

#define RIGHT_SHIFT_OP_SIGNED \
do { \
if (NPY_LIKELY(in2 < sizeof(@type@) * 8)) { \
*out = in1 >> in2; \
} \
else if (in1 < 0) { \
*out = -1; \
} \
else { \
*out = 0; \
} \
} while (0)

#define RIGHT_SHIFT_OP_UNSIGNED \
do { \
if (NPY_LIKELY(in2 < sizeof(@type@) * 8)) { \
*out = in1 >> in2; \
} \
else { \
*out = 0; \
} \
} while (0)

NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_right_shift(char **args, npy_intp *dimensions, npy_intp *steps,
void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
@type@ ip2_val = *(@type@ *)ip2;

if (NPY_LIKELY(ip2_val < sizeof(@type@) * 8)) {
io1 >>= ip2_val;
}
#if @issigned@
else if (io1 < 0) {
io1 = -1;
}
#endif
else {
io1 = 0;
}
}
*((@type@ *)iop1) = io1;
}
else {
#if @issigned@
BINARY_LOOP_FAST(@type@, @type@, RIGHT_SHIFT_OP_SIGNED);
#else
BINARY_LOOP_FAST(@type@, @type@, RIGHT_SHIFT_OP_UNSIGNED);
#endif
}
}

#undef RIGHT_SHIFT_OP_SIGNED
#undef RIGHT_SHIFT_OP_UNSIGNED


/**begin repeat1
* #kind = equal, not_equal, greater, greater_equal, less, less_equal,
* logical_and, logical_or#
Expand Down
45 changes: 39 additions & 6 deletions numpy/core/src/umath/scalarmath.c.src
Original file line number Diff line number Diff line change
Expand Up @@ -245,25 +245,58 @@ static void
/**end repeat**/



/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */

/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong#
* #issigned = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
*/

/**begin repeat1
* #oper = and, xor, or, lshift, rshift#
* #op = &, ^, |, <<, >>#
* #oper = and, xor, or#
* #op = &, ^, |#
*/

#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2)

/**end repeat1**/

#define @name@_ctype_lshift(arg1, arg2, out) \
do { \
if (NPY_LIKELY((arg2) < sizeof(@type@) * 8)) { \
*(out) = (arg1) << (arg2); \
} \
else { \
*(out) = 0; \
} \
} while (0)

#if @issigned@
#define @name@_ctype_rshift(arg1, arg2, out) \
do { \
if (NPY_LIKELY((arg2) < sizeof(@type@) * 8)) { \
*(out) = (arg1) >> (arg2); \
} \
else if ((arg1) < 0) { \
*(out) = -1; \
} \
else { \
*(out) = 0; \
} \
} while (0)
#else
#define @name@_ctype_rshift(arg1, arg2, out) \
do { \
if (NPY_LIKELY((arg2) < sizeof(@type@) * 8)) { \
*(out) = (arg1) >> (arg2); \
} \
else { \
*(out) = 0; \
} \
} while (0)
#endif

/**end repeat**/

/**begin repeat
Expand Down Expand Up @@ -575,7 +608,7 @@ static void
* 1) Convert the types to the common type if both are scalars (0 return)
* 2) If both are not scalars use ufunc machinery (-2 return)
* 3) If both are scalars but cannot be cast to the right type
* return NotImplmented (-1 return)
* return NotImplemented (-1 return)
*
* 4) Perform the function on the C-type.
* 5) If an error condition occurred, check to see
Expand Down
32 changes: 32 additions & 0 deletions numpy/core/tests/test_scalarmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,5 +525,37 @@ def test_numpy_abs(self):
self._test_abs_func(np.abs)


class TestBitShifts(TestCase):

def test_left_shift(self):
# gh-2449
for dt in np.typecodes['AllInteger']:
arr = np.array([5, -5], dtype=dt)
scl_pos, scl_neg = arr
for shift in np.array([arr.dtype.itemsize * 8], dtype=dt):
res_pos = scl_pos << shift
res_neg = scl_neg << shift
assert_equal(res_pos, 0)
assert_equal(res_neg, 0)
# Result on scalars should be the same as on arrays
assert_array_equal(arr << shift, [res_pos, res_neg])

def test_right_shift(self):
# gh-2449
for dt in np.typecodes['AllInteger']:
arr = np.array([5, -5], dtype=dt)
scl_pos, scl_neg = arr
for shift in np.array([arr.dtype.itemsize * 8], dtype=dt):
res_pos = scl_pos >> shift
res_neg = scl_neg >> shift
assert_equal(res_pos, 0)
if dt in np.typecodes['UnsignedInteger']:
assert_equal(res_neg, 0)
else:
assert_equal(res_neg, -1)
# Result on scalars should be the same as on arrays
assert_array_equal(arr >> shift, [res_pos, res_neg], dt)


if __name__ == "__main__":
run_module_suite()
0