8000 ENH: Allow an arbitrary number of operands in nditer by seberg · Pull Request #28080 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

ENH: Allow an arbitrary number of operands in nditer #28080

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/release/upcoming_changes/28080.c_api.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
* ``NpyIter`` now has no limit on the number of operands it supports.
2 changes: 2 additions & 0 deletions doc/release/upcoming_changes/28080.improvement.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
* ``np.nditer`` now has no limit on the number of supported operands
(C-integer).
34 changes: 18 additions & 16 deletions numpy/_core/src/multiarray/nditer_constr.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ npyiter_prepare_operands(int nop,
PyArray_Descr **op_dtype,
npy_uint32 flags,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
npy_int8 *out_maskop);
int *out_maskop);
static int
npyiter_check_casting(int nop, PyArrayObject **op,
PyArray_Descr **op_dtype,
Expand Down Expand Up @@ -100,7 +100,7 @@ npyiter_get_priority_subtype(int nop, PyArrayObject **op,
static int
npyiter_allocate_transfer_functions(NpyIter *iter);

static void
static int
npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize);

/*NUMPY_API
Expand Down Expand Up @@ -158,13 +158,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,

NPY_IT_TIME_POINT(c_start);

if (nop > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d operands "
"(%d were requested)", NPY_MAXARGS, nop);
return NULL;
}

/*
* Before 1.8, if `oa_ndim == 0`, this meant `op_axes != NULL` was an error.
* With 1.8, `oa_ndim == -1` takes this role, while op_axes in that case
Expand Down Expand Up @@ -433,7 +426,10 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,

/* If buffering is set prepare it */
if (itflags & NPY_ITFLAG_BUFFER) {
npyiter_find_buffering_setup(iter, buffersize);
if (npyiter_find_buffering_setup(iter, buffersize) < 0) {
NpyIter_Deallocate(iter);
return NULL;
}

/*
* Initialize for use in FirstVisit, which may be called before
Expand Down Expand Up @@ -1168,10 +1164,10 @@ npyiter_prepare_operands(int nop, PyArrayObject **op_in,
PyArray_Descr **op_dtype,
npy_uint32 flags,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
npy_int8 *out_maskop)
int *out_maskop)
{
int iop, i;
npy_int8 maskop = -1;
int maskop = -1;
int any_writemasked_ops = 0;

/*
Expand Down Expand Up @@ -2001,22 +1997,27 @@ operand_different_than_broadcast: {
* In theory, the reduction could also span multiple axes if other operands
* are buffered. We do not try to discover this.
*/
static void
static int
npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize)
{
int nop = iter->nop;
int ndim = iter->ndim;
npy_uint32 itflags = iter->itflags;
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);

/* Per operand space; could also reuse an iterator field initialized later */
NPY_ALLOC_WORKSPACE(dim_scratch_space, int, 10, 2 * nop);
if (dim_scratch_space == NULL) {
return -1;
}
/*
* We check two things here, first how many operand dimensions can be
* iterated using a single stride (all dimensions are consistent),
* and second, whether we found a reduce dimension for the operand.
* That is an outer dimension a reduce would have to take place on.
*/
int op_single_stride_dims[NPY_MAXARGS];
int op_reduce_outer_dim[NPY_MAXARGS];
int *op_single_stride_dims = dim_scratch_space;
int *op_reduce_outer_dim = dim_scratch_space + nop;

npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
Expand Down Expand Up @@ -2312,7 +2313,8 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize)
/* Core starts at 0 initially, if needed it is set in goto index. */
NIT_BUFFERDATA(iter)->coreoffset = 0;

return;
npy_free_workspace(dim_scratch_space);
return 0;
}


Expand Down
4 changes: 2 additions & 2 deletions numpy/_core/src/multiarray/nditer_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@
struct NpyIter_InternalOnly {
/* Initial fixed position data */
npy_uint32 itflags;
npy_uint8 ndim, nop;
npy_int8 maskop;
n 8E1E py_uint8 ndim;
int nop, maskop;
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So I think effectively, this means we need 64bit more space here (on typical systems). A bit unfortunate, but even if we limit to int16, we lose 32bits here anyway.

npy_intp itersize, iterstart, iterend;
/* iterindex is only used if RANGED or BUFFERED is set */
npy_intp iterindex;
Expand Down
Loading
Loading
0