@@ -883,28 +883,33 @@ def copy(a, order='K'):
883
883
# Basic operations
884
884
885
885
886
- def gradient (f , * varargs ):
886
+ def gradient (f , * varargs , ** kwargs ):
887
887
"""
888
888
Return the gradient of an N-dimensional array.
889
-
889
+
890
890
The gradient is computed using second order accurate central differences
891
- in the interior and second order accurate one-sides (forward or backwards)
892
- differences at the boundaries. The returned gradient hence has the same
893
- shape as the input array.
891
+ in the interior and either first differences or second order accurate
892
+ one-sides (forward or backwards) differences at the boundaries. The
893
+ returned gradient hence has the same shape as the input array.
894
894
895
895
Parameters
896
896
----------
897
897
f : array_like
898
- An N-dimensional array containing samples of a scalar function.
899
- `*varargs` : scalars
900
- 0, 1, or N scalars specifying the sample distances in each direction,
901
- that is: `dx`, `dy`, `dz`, ... The default distance is 1.
898
+ An N-dimensional array containing samples of a scalar function.
899
+ varargs : list of scalar, optional
900
+ N scalars specifying the sample distances for each dimension,
901
+ i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
902
+ edge_order : {1, 2}, optional
903
+ Gradient is calculated using N\ :sup:`th` order accurate differences
904
+ at the boundaries. Default: 1.
905
+
906
+ .. versionadded:: 1.9.1
902
907
903
908
Returns
904
909
-------
905
910
gradient : ndarray
906
- N arrays of the same shape as `f` giving the derivative of `f` with
907
- respect to each dimension.
911
+ N arrays of the same shape as `f` giving the derivative of `f` with
912
+ respect to each dimension.
908
913
909
914
Examples
910
915
--------
@@ -916,15 +921,14 @@ def gradient(f, *varargs):
916
921
917
922
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
918
923
[array([[ 2., 2., -1.],
919
- [ 2., 2., -1.]]),
920
- array([[ 1. , 2.5, 4. ],
921
- [ 1. , 1. , 1. ]])]
924
+ [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
925
+ [ 1. , 1. , 1. ]])]
922
926
923
- >>> x = np.array([0,1,2,3, 4])
924
- >>> dx = gradient(x)
927
+ >>> x = np.array([0, 1, 2, 3, 4])
928
+ >>> dx = np. gradient(x)
925
929
>>> y = x**2
926
- >>> gradient(y,dx )
927
- array([0., 2., 4., 6., 8.])
930
+ >>> np. gradient(y, dx, edge_order=2 )
931
+ array([- 0., 2., 4., 6., 8.])
928
932
"""
929
933
f = np .asanyarray (f )
930
934
N = len (f .shape ) # number of dimensions
@@ -939,6 +943,13 @@ def gradient(f, *varargs):
939
943
raise SyntaxError (
940
944
"invalid number of arguments" )
941
945
946
+ edge_order = kwargs .pop ('edge_order' , 1 )
947
+ if kwargs :
948
+ raise TypeError ('"{}" are not valid keyword arguments.' .format (
949
+ '", "' .join (kwargs .keys ())))
950
+ if edge_order > 2 :
951
+ raise ValueError ("'edge_order' greater than 2 not supported" )
952
+
942
953
# use central differences on interior and one-sided differences on the
943
954
# endpoints. This preserves second order-accuracy over the full domain.
944
955
@@ -978,7 +989,7 @@ def gradient(f, *varargs):
978
989
"at least two elements are required." )
979
990
980
991
# Numerical differentiation: 1st order edges, 2nd order interior
981
- if y .shape [axis ] == 2 :
992
+ if y .shape [axis ] == 2 or edge_order == 1 :
982
993
# Use first order differences for time data
983
994
out = np .empty_like (y , dtype = otype )
984
995
0 commit comments