8000 MAINT: Spellcheck some files. by charris · Pull Request #4844 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

MAINT: Spellcheck some files. #4844

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 6, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions numpy/core/src/multiarray/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -1224,7 +1224,7 @@ array_assign_boolean_subscript(PyArrayObject *self,

if (needs_api) {
/*
* FIXME?: most assignment operations stop after the first occurance
* FIXME?: most assignment operations stop after the first occurrence
* of an error. Boolean does not currently, but should at least
* report the error. (This is only relevant for things like str->int
* casts which call into python)
Expand Down Expand Up @@ -1439,7 +1439,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
/*
* TODO: Should this be a view or not? The only reason not would be
* optimization (i.e. of array[...] += 1) I think.
* Before, it was just self for a single Ellipis.
* Before, it was just self for a single ellipsis.
*/
result = PyArray_View(self, NULL, NULL);
/* A single ellipsis, so no need to decref */
Expand Down Expand Up @@ -1664,7 +1664,7 @@ array_assign_item(PyArrayObject *self, Py_ssize_t i, PyObject *op)
/*
* This fallback takes the old route of `arr.flat[index] = values`
* for one dimensional `arr`. The route can sometimes fail slightly
* different (ValueError instead of IndexError), in which case we
* differently (ValueError instead of IndexError), in which case we
* warn users about the change. But since it does not actually care *at all*
* about shapes, it should only fail for out of bound indexes or
* casting errors.
Expand Down Expand Up @@ -2448,7 +2448,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
NPY_BEGIN_THREADS_DEF;

if (mit->size == 0) {
/* All indices got broadcasted away, do *not* check as it always was */
/* All indices got broadcast away, do *not* check as it always was */
return 0;
}

Expand Down Expand Up @@ -2671,7 +2671,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
* 1. No subspace iteration is necessary, so the extra_op can
* be included into the index iterator (it will be buffered)
* 2. Subspace iteration is necessary, so the extra op is iterated
* independendly, and the iteration order is fixed at C (could
* independently, and the iteration order is fixed at C (could
* also use Fortran order if the array is Fortran order).
* In this case the subspace iterator is not buffered.
*
Expand Down Expand Up @@ -2864,7 +2864,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
NPY_ITER_GROWINNER;

/*
* For a single 1-d operand, guarantee itertion order
* For a single 1-d operand, guarantee iteration order
* (scipy used this). Note that subspace may be used.
*/
if ((mit->numiter == 1) && (PyArray_NDIM(index_arrays[0]) == 1)) {
Expand Down Expand Up @@ -3076,7 +3076,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,

fail:
/*
* Check whether the operand was not broadcastable and replace the error
* Check whether the operand could not be broadcast and replace the error
* in that case. This should however normally be found early with a
* direct goto to broadcast_error
*/
Expand All @@ -3091,7 +3091,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
/* (j < 0 is currently impossible, extra_op is reshaped) */
j >= 0 &&
PyArray_DIM(extra_op, i) != mit->dimensions[j]) {
/* extra_op cannot be broadcasted to the indexing result */
/* extra_op cannot be broadcast to the indexing result */
goto broadcast_error;
}
}
Expand Down Expand Up @@ -3151,7 +3151,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
* that most of this public API is currently not guaranteed
* to stay the same between versions. If you plan on using
* it, please consider adding more utility functions here
* to accomodate new features.
* to accommodate new features.
*/
NPY_NO_EXPORT PyObject *
PyArray_MapIterArray(PyArrayObject * a, PyObject * index)
Expand Down
28 changes: 14 additions & 14 deletions numpy/core/tests/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def test_boolean_indexing_onedim(self):

def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcasted to the subscription. (see also gh-3458)
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
Expand Down Expand Up @@ -188,12 +188,12 @@ def test_reverse_strides_and_subspace_bufferinit(self):
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)

# This also tests that the subspace buffer is initiliazed:
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])

def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
Expand Down Expand Up @@ -346,7 +346,7 @@ def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in seperate function:
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
Expand Down Expand Up @@ -537,11 +537,11 @@ class TestMultiIndexingAutomated(TestCase):
These test use code to mimic the C-Code indexing for selection.

NOTE: * This still lacks tests for complex item setting.
* If you change behavoir of indexing, you might want to modify
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indicies are supported by the mimicing code.
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
Expand All @@ -564,7 +564,7 @@ def setUp(self):
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty broadcastable
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
Expand Down Expand Up @@ -611,7 +611,7 @@ def _get_multi_index(self, arr, indices):
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcasted fancy-indices are 0-sized.
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False

Expand Down Expand Up @@ -656,7 +656,7 @@ def _get_multi_index(self, arr, indices):
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d arrays.
# since a[()] makes sense, but not a[(),]. We will raise an error
# lateron, unless a broadcasting error occurs first.
# later on, unless a broadcasting error occurs first.
raise IndexError

if ndim == 0 and not None in in_indices:
Expand All @@ -668,7 +668,7 @@ def _get_multi_index(self, arr, indices):

for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array anways:
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
Expand Down Expand Up @@ -701,7 +701,7 @@ def _get_multi_index(self, arr, indices):
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outide the 0-d indexed array case)
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
Expand Down Expand Up @@ -753,7 +753,7 @@ def _get_multi_index(self, arr, indices):
arr = arr.transpose(*(fancy_axes + axes))

# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxes by reshaping...
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
Expand All @@ -771,7 +771,7 @@ def _get_multi_index(self, arr, indices):
res = np.broadcast(*indx[1:]) # raises ValueError...
else:
res = indx[1]
# unfortunatly the indices might be out of bounds. So check
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
Expand Down Expand Up @@ -909,7 +909,7 @@ def test_multidim(self):
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file seperatly.
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
Expand Down
24 changes: 12 additions & 12 deletions numpy/core/tests/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def test_endian_bool_indexing(self,level=rlevel):
assert_(np.all(b[yb] > 0.5))

def test_endian_where(self,level=rlevel):
"""GitHuB issue #369"""
"""GitHub issue #369"""
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
Expand Down Expand Up @@ -290,7 +290,7 @@ def test_unicode_string_comparison(self,level=rlevel):

def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))

Expand All @@ -311,7 +311,7 @@ def bfb(): x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfb)

def test_nonarray_assignment(self):
# See also Issue gh-2870, test for nonarray assignment
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
Expand Down Expand Up @@ -560,15 +560,15 @@ def test_reshape_zero_strides(self, level=rlevel):
assert_(a.reshape(5, 1).strides[0] == 0)

def test_reshape_zero_size(self, level=rlevel):
"""Github Issue #2700, setting shape failed for 0-sized arrays"""
"""GitHub Issue #2700, setting shape failed for 0-sized arrays"""
a = np.ones((0, 2))
a.shape = (-1, 2)

# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# Github issue gh-2949, bad strides for trailing ones of new shape
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
Expand Down Expand Up @@ -756,9 +756,9 @@ def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v): x[(s>0)]=v
# After removing deprecation, the following is are ValueErrors.
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always use "nonzero" logic
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
Expand Down Expand Up @@ -848,7 +848,7 @@ def test_object_array_refcounting(self, level=rlevel):
cnt0_b = cnt(b)
cnt0_c = cnt(c)

# -- 0d -> 1d broadcasted slice assignment
# -- 0d -> 1-d broadcast slice assignment

arr = np.zeros(5, dtype=np.object_)

Expand All @@ -865,7 +865,7 @@ def test_object_array_refcounting(self, level=rlevel):

del arr

# -- 1d -> 2d broadcasted slice assignment
# -- 1-d -> 2-d broadcast slice assignment

arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
Expand All @@ -884,7 +884,7 @@ def test_object_array_refcounting(self, level=rlevel):

del arr, arr0

# -- 2d copying + flattening
# -- 2-d copying + flattening

arr = np.zeros((5, 2), dtype=np.object_)

Expand Down Expand Up @@ -1029,8 +1029,8 @@ def test_compress_small_type(self, level=rlevel):
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
Expand Down
0