10000 BUG,ENH: Fix generic scalar infinite recursion issues by seberg · Pull Request #26904 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

BUG,ENH: Fix generic scalar infinite recursion issues #26904

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jul 24, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
TST,MAINT: Remane scalar operator list for clarity
  • Loading branch information
seberg committed Jul 22, 2024
commit 3af87c4c52168969f55f68e272fa6e551dea89e3
21 changes: 12 additions & 9 deletions numpy/_core/tests/test_scalarmath.py
10000
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,14 @@

objecty_things = [object(), None]

reasonable_operators_for_scalars = [
binary_operators_for_scalars = [
operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
operator.gt, operator.add, operator.floordiv, operator.mod,
operator.mul, operator.pow, operator.sub, operator.truediv
]
binary_operators_for_scalar_ints = binary_operators_for_scalars + [
operator.xor, operator.or_, operator.and_
]


# This compares scalarmath against ufuncs.
Expand Down Expand Up @@ -109,7 +112,7 @@ def check_ufunc_scalar_equivalence(op, arr1, arr2):

@pytest.mark.slow
@settings(max_examples=10000, deadline=2000)
@given(sampled_from(reasonable_operators_for_scalars),
@given(sampled_from(binary_operators_for_scalars),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
Expand All @@ -122,7 +125,7 @@ def test_array_scalar_ufunc_equivalence(op, arr1, arr2):


@pytest.mark.slow
@given(sampled_from(reasonable_operators_for_scalars),
@given(sampled_from(binary_operators_for_scalars),
hynp.scalar_dtypes(), hynp.scalar_dtypes())
def test_array_scalar_ufunc_dtypes(op, dt1, dt2):
# Same as above, but don't worry about sampling weird values so that we
Expand Down Expand Up @@ -865,7 +868,7 @@ def recursionlimit(n):


@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars + [operator.xor]),
sampled_from(binary_operators_for_scalar_ints),
sampled_from(types + [rational]))
def test_operator_object_left(o, op, type_):
try:
Expand All @@ -876,7 +879,7 @@ def test_operator_object_left(o, op, type_):


@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars + [operator.xor]),
sampled_from(binary_operators_for_scalar_ints),
sampled_from(types + [rational]))
def test_operator_object_right(o, op, type_):
try:
Expand All @@ -886,7 +889,7 @@ def test_operator_object_right(o, op, type_):
pass


@given(sampled_from(reasonable_operators_for_scalars),
@given(sampled_from(binary_operators_for_scalars),
sampled_from(types),
sampled_from(types))
def test_operator_scalars(op, type1, type2):
Expand All @@ -896,7 +899,7 @@ def test_operator_scalars(op, type1, type2):
pass


@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
@pytest.mark.parametrize("op", binary_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
def test_longdouble_operators_with_obj(sctype, op):
# This is/used to be tricky, because NumPy generally falls back to
Expand Down Expand Up @@ -931,7 +934,7 @@ def test_longdouble_with_arrlike(sctype, op):
assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3))
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So yes, the new paths aligns longdouble with others here. I could guess at this not being right, that would remove the array conversion step.

(It may mean deciding that one test in gh-26905, just can't pass, because we simply cannot deal with subclasses of Python floats even if they get converted to a float array later. Although, it can go either way, since you can still convert but only allow the was_scalar path.)



@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
@pytest.mark.parametrize("op", binary_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
@np.errstate(all="ignore")
def test_longdouble_operators_with_large_int(sctype, op):
Expand Down Expand Up @@ -1121,7 +1124,7 @@ def test_truediv_int():
@pytest.mark.slow
@pytest.mark.parametrize("op",
# TODO: Power is a bit special, but here mostly bools seem to behave oddly
[op for op in reasonable_operators_for_scalars if op is not operator.pow])
[op for op in binary_operators_for_scalars if op is not operator.pow])
@pytest.mark.parametrize("sctype", types)
@pytest.mark.parametrize("other_type", [float, int, complex])
@pytest.mark.parametrize("rop", [True, False])
Expand Down
Loading
0