@@ -847,35 +847,35 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
847
847
848
848
/* Libdivide only supports 32 and 64 bit types
849
849
* We try to pick the best possible one */
850
- /**begin repeat1
851
- * #kind = t, gen, do#
852
- */
853
850
#if NPY_BITSOF_ @TYPE @ <= 32
854
- #define libdivide_ @type@_@kind@ libdivide_s32_@kind@
851
+ #define libdivide_ @type@_t libdivide_s32_t
852
+ #define libdivide_ @type@_gen libdivide_s32_gen
853
+ #define libdivide_ @type@_do libdivide_s32_do
855
854
#else
856
- #define libdivide_ @type@_@kind@ libdivide_s64_@kind@
855
+ #define libdivide_ @type@_t libdivide_s64_t
856
+ #define libdivide_ @type@_gen libdivide_s64_gen
857
+ #define libdivide_ @type@_do libdivide_s64_do
857
858
#endif
858
- /**end repeat1**/
859
859
860
860
NPY_NO_EXPORT void
861
861
@TYPE @_divide (char * * args , npy_intp const * dimensions , npy_intp const * steps , void * NPY_UNUSED (func ))
862
862
{
863
- BINARY_LOOP_BASE
863
+ BINARY_DEFS
864
864
865
865
/* When the divisor is a constant, use libdivde for faster division */
866
866
if (steps [1 ] == 0 ) {
867
867
const @type @ in2 = * (@type @ * )ip2 ;
868
868
869
869
/* If divisor is 0, we need not compute anything*/
870
870
if (in2 == 0 ) {
871
- BINARY_LOOP_ZERO {
871
+ BINARY_LOOP_SLIDING {
872
872
npy_set_floatstatus_divbyzero ();
873
873
* ((@type @ * )op1 ) = 0 ;
874
874
}
875
875
}
876
876
else {
877
877
struct libdivide_ @type @_t fast_d = libdivide_ @type @_gen (in2 );
878
- BINARY_LOOP_FIXED {
878
+ BINARY_LOOP_SLIDING {
879
879
const @type @ in1 = * (@type @ * )ip1 ;
880
880
/*
881
881
* FIXME: On x86 at least, dividing the smallest representable integer
@@ -1412,22 +1412,22 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const
1412
1412
NPY_NO_EXPORT void
1413
1413
TIMEDELTA_mq_m_divide (char * * args , npy_intp const * dimensions , npy_intp const * steps , void * NPY_UNUSED (func ))
1414
1414
{
1415
- BINARY_LOOP_BASE
1415
+ BINARY_DEFS
1416
1416
1417
1417
/* When the divisor is a constant, use libdivde for faster division */
1418
1418
if (steps [1 ] == 0 ) {
1419
1419
const npy_int64 in2 = * (npy_int64 * )ip2 ;
1420
1420
1421
1421
/* If divisor is 0, we need not compute anything */
1422
1422
if (in2 == 0 ) {
1423
- BINARY_LOOP_ZERO {
1423
+ BINARY_LOOP_SLIDING {
1424
1424
npy_set_floatstatus_divbyzero ();
1425
1425
* ((npy_timedelta * )op1 ) = NPY_DATETIME_NAT ;
1426
1426
}
1427
1427
}
1428
1428
else {
1429
1429
struct libdivide_s64_t fast_d = libdivide_s64_gen (in2 );
1430
- BINARY_LOOP_FIXED {
1430
+ BINARY_LOOP_SLIDING {
1431
1431
const npy_timedelta in1 = * (npy_timedelta * )ip1 ;
1432
1432
if (in1 == NPY_DATETIME_NAT ) {
1433
1433
* ((npy_timedelta * )op1 ) = NPY_DATETIME_NAT ;
@@ -1520,28 +1520,28 @@ NPY_NO_EXPORT void
1520
1520
TIMEDELTA_mm_q_floor_divide (char * * args , npy_intp const * dimensions , npy_intp const * steps , void * NPY_UNUSED (func ))
1521
1521
{
1522
1522
/* NOTE: This code is similar to array floor divide*/
1523
- BINARY_LOOP_BASE
1523
+ BINARY_DEFS
1524
1524
1525
1525
/* When the divisor is a constant, use libdivde for faster division */
1526
1526
if (steps [1 ] == 0 ) {
1527
1527
const npy_timedelta in2 = * (npy_timedelta * )ip2 ;
1528
1528
1529
1529
/* If divisor is 0 or NAT, we need not compute anything */
1530
1530
if (in2 == 0 ) {
1531
- BINARY_LOOP_ZERO {
1531
+ BINARY_LOOP_SLIDING {
1532
1532
npy_set_floatstatus_divbyzero ();
1533
1533
* ((npy_int64 * )op1 ) = 0 ;
1534
1534
}
1535
1535
}
1536
1536
else if (in2 == NPY_DATETIME_NAT ) {
1537
- BINARY_LOOP_ZERO {
1537
+ BINARY_LOOP_SLIDING {
1538
1538
npy_set_floatstatus_invalid ();
1539
1539
* ((npy_int64 * )op1 ) = 0 ;
1540
1540
}
1541
1541
}
1542
1542
else {
1543
1543
struct libdivide_s64_t fast_d = libdivide_s64_gen (in2 );
1544
- BINARY_LOOP_FIXED {
1544
+ BINARY_LOOP_SLIDING {
1545
1545
const npy_timedelta in1 = * (npy_timedelta * )ip1 ;
1546
1546
if (in1 == NPY_DATETIME_NAT ) {
1547
1547
npy_set_floatstatus_invalid ();
0 commit comments