8000 DOC docstring (shape= -> of shape) (#14640) · scikit-learn/scikit-learn@0eebade · GitHub
[go: up one dir, main page]

Skip to content

Commit 0eebade

Browse files
adrinjalalithomasjpfan
authored andcommitted
DOC docstring (shape= -> of shape) (#14640)
1 parent 6127b4e commit 0eebade

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+468
-469
lines changed

sklearn/base.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -337,13 +337,13 @@ def score(self, X, y, sample_weight=None):
337337
338338
Parameters
339339
----------
340-
X : array-like, shape = (n_samples, n_features)
340+
X : array-like of shape (n_samples, n_features)
341341
Test samples.
342342
343-
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
343+
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
344344
True labels for X.
345345
346-
sample_weight : array-like, shape = [n_samples], optional
346+
sample_weight : array-like of shape (n_samples,), default=None
347347
Sample weights.
348348
349349
Returns
@@ -373,16 +373,16 @@ def score(self, X, y, sample_weight=None):
373373
374374
Parameters
375375
----------
376-
X : array-like, shape = (n_samples, n_features)
376+
X : array-like of shape (n_samples, n_features)
377377
Test samples. For some estimators this may be a
378378
precomputed kernel matrix instead, shape = (n_samples,
379379
n_samples_fitted], where n_samples_fitted is the number of
380380
samples used in the fitting for the estimator.
381381
382-
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
382+
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
383383
True values for X.
384384
385-
sample_weight : array-like, shape = [n_samples], optional
385+
sample_weight : array-like of shape (n_samples,), default=None
386386
Sample weights.
387387
388388
Returns
@@ -565,7 +565,7 @@ def score(self, X, y=None):
565565
566566
Parameters
567567
----------
568-
X : array-like, shape = (n_samples, n_features)
568+
X : array-like of shape (n_samples, n_features)
569569
570570
Returns
571571
-------

sklearn/calibration.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def fit(self, X, y, sample_weight=None):
122122
y : array-like, shape (n_samples,)
123123
Target values.
124124
125-
sample_weight : array-like, shape = [n_samples] or None
125+
sample_weight : array-like of shape (n_samples,), default=None
126126
Sample weights. If None, then samples are equally weighted.
127127
128128
Returns
@@ -326,7 +326,7 @@ def fit(self, X, y, sample_weight=None):
326326
y : array-like, shape (n_samples,)
327327
Target values.
328328
329-
sample_weight : array-like, shape = [n_samples] or None
329+
sample_weight : array-like of shape (n_samples,), default=None
330330
Sample weights. If None, then samples are equally weighted.
331331
332332
Returns
@@ -413,7 +413,7 @@ def _sigmoid_calibration(df, y, sample_weight=None):
413413
y : ndarray, shape (n_samples,)
414414
The targets.
415415
416-
sample_weight : array-like, shape = [n_samples] or None
416+
sample_weight : array-like of shape (n_samples,), default=None
417417
Sample weights. If None, then samples are equally weighted.
418418
419419
Returns
@@ -487,7 +487,7 @@ def fit(self, X, y, sample_weight=None):
487487
y : array-like, shape (n_samples,)
488488
Training target.
489489
490-
sample_weight : array-like, shape = [n_samples] or None
490+
sample_weight : array-like of shape (n_samples,), default=None
491491
Sample weights. If None, then samples are equally weighted.
492492
493493
Returns

sklearn/cluster/_feature_agglomeration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def transform(self, X):
2727
2828
Parameters
2929
----------
30-
X : array-like, shape = [n_samples, n_features] or [n_features]
30+
X : array-like of shape (n_samples, n_features) or (n_samples,)
3131
A M by N array of M observations in N dimensions or a length
3232
M array of M one-dimensional observations.
3333
@@ -62,7 +62,7 @@ def inverse_transform(self, Xred):
6262
6363
Parameters
6464
----------
65-
Xred : array-like, shape=[n_samples, n_clusters] or [n_clusters,]
65+
Xred : array-like of shape (n_samples, n_clusters) or (n_clusters,)
6666
The values to be assigned to each cluster of samples
6767
6868
Returns

sklearn/cluster/hierarchical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1025,7 +1025,7 @@ def fit(self, X, y=None, **params):
10251025
10261026
Parameters
10271027
----------
1028-
X : array-like, shape = [n_samples, n_features]
1028+
X : array-like of shape (n_samples, n_features)
10291029
The data
10301030
10311031
y : Ignored

sklearn/cluster/k_means_.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -987,7 +987,7 @@ def fit_predict(self, X, y=None, sample_weight=None):
987987
988988
Parameters
989989
----------
990-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
990+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
991991
New data to transform.
992992
993993
y : Ignored
@@ -1011,7 +1011,7 @@ def fit_transform(self, X, y=None, sample_weight=None):
10111011
10121012
Parameters
10131013
----------
1014-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
1014+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
10151015
New data to transform.
10161016
10171017
y : Ignored
@@ -1041,7 +1041,7 @@ def transform(self, X):
10411041
10421042
Parameters
10431043
----------
1044-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
1044+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
10451045
New data to transform.
10461046
10471047
Returns
@@ -1067,7 +1067,7 @@ def predict(self, X, sample_weight=None):
10671067
10681068
Parameters
10691069
----------
1070-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
1070+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
10711071
New data to predict.
10721072
10731073
sample_weight : array-like, shape (n_samples,), optional
@@ -1091,7 +1091,7 @@ def score(self, X, y=None, sample_weight=None):
10911091
10921092
Parameters
10931093
----------
1094-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
1094+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
10951095
New data.
10961096
10971097
y : Ignored
@@ -1666,7 +1666,7 @@ def partial_fit(self, X, y=None, sample_weight=None):
16661666
16671667
Parameters
16681668
----------
1669-
X : array-like, shape = [n_samples, n_features]
1669+
X : array-like of shape (n_samples, n_features)
16701670
Coordinates of the data points to cluster. It must be noted that
16711671
X will be copied if it is not C-contiguous.
16721672
@@ -1737,7 +1737,7 @@ def predict(self, X, sample_weight=None):
17371737
17381738
Parameters
17391739
----------
1740-
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
1740+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
17411741
New data to predict.
17421742
17431743
sample_weight : array-like, shape (n_samples,), optional

sklearn/cluster/mean_shift_.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
3535
3636
Parameters
3737
----------
38-
X : array-like, shape=[n_samples, n_features]
38+
X : array-like of shape (n_samples, n_features)
3939
Input points.
4040
4141
quantile : float, default 0.3
@@ -115,7 +115,7 @@ def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
115115
Parameters
116116
----------
117117
118-
X : array-like, shape=[n_samples, n_features]
118+
X : array-like of shape (n_samples, n_features)
119119
Input data.
120120
121121
bandwidth : float, optional
@@ -126,7 +126,7 @@ def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
126126
the number of samples. The sklearn.cluster.estimate_bandwidth function
127127
can be used to do this more efficiently.
128128
129-
seeds : array-like, shape=[n_seeds, n_features] or None
129+
seeds : array-like of shape (n_seeds, n_features) or None
130130
Point used as initial kernel locations. If None and bin_seeding=False,
131131
each data point is used as a seed. If None and bin_seeding=True,
132132
see bin_seeding.
@@ -256,7 +256,7 @@ def get_bin_seeds(X, bin_size, min_bin_freq=1):
256256
Parameters
257257
----------
258258
259-
X : array-like, shape=[n_samples, n_features]
259+
X : array-like of shape (n_samples, n_features)
260260
Input points, the same points that will be used in mean_shift.
261261
262262
bin_size : float
@@ -272,7 +272,7 @@ def get_bin_seeds(X, bin_size, min_bin_freq=1):
272272
273273
Returns
274274
-------
275-
bin_seeds : array-like, shape=[n_samples, n_features]
275+
bin_seeds : array-like of shape (n_samples, n_features)
276276
Points used as initial kernel positions in clustering.mean_shift.
277277
"""
278278

@@ -408,7 +408,7 @@ def fit(self, X, y=None):
408408
409409
Parameters
410410
----------
411-
X : array-like, shape=[n_samples, n_features]
411+
X : array-like of shape (n_samples, n_features)
412412
Samples to cluster.
413413
414414
y : Ignored

sklearn/compose/_target.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def predict(self, X):
210210
211211
Parameters
212212
----------
213-
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
213+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
214214
Samples.
215215
216216
Returns

sklearn/covariance/empirical_covariance_.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ def fit(self, X, y=None):
179179
180180
Parameters
181181
----------
182-
X : array-like, shape = [n_samples, n_features]
182+
X : array-like of shape (n_samples, n_features)
183183
Training data, where n_samples is the number of samples and
184184
n_features is the number of features.
185185
@@ -208,7 +208,7 @@ def score(self, X_test, y=None):
208208
209209
Parameters
210210
----------
211-
X_test : array-like, shape = [n_samples, n_features]
211+
X_test : array-like of shape (n_samples, n_features)
212212
Test data of which we compute the likelihood, where n_samples is
213213
the number of samples and n_features is the number of features.
214214
X_test is assumed to be drawn from the same distribution than
@@ -239,7 +239,7 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True,
239239
240240
Parameters
241241
----------
242-
comp_cov : array-like, shape = [n_features, n_features]
242+
comp_cov : array-like of shape (n_features, n_features)
243243
The covariance to compare with.
244244
245245
norm : str
@@ -289,7 +289,7 @@ def mahalanobis(self, X):
289289
290290
Parameters
291291
----------
292-
X : array-like, shape = [n_samples, n_features]
292+
X : array-like of shape (n_samples, n_features)
293293
The observations, the Mahalanobis distances of the which we
294294
compute. Observations are assumed to be drawn from the same
295295
distribution than the data used in fit.

sklearn/covariance/robust_covariance.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,7 @@ def fit(self, X, y=None):
624624
625625
Parameters
626626
----------
627-
X : array-like, shape = [n_samples, n_features]
627+
X : array-like of shape (n_samples, n_features)
628628
Training data, where n_samples is the number of samples
629629
and n_features is the number of features.
630630

sklearn/covariance/shrunk_covariance_.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ def fit(self, X, y=None):
131131
132132
Parameters
133133
----------
134-
X : array-like, shape = [n_samples, n_features]
134+
X : array-like of shape (n_samples, n_features)
135135
Training data, where n_samples is the number of samples
136136
and n_features is the number of features.
137137
@@ -406,7 +406,7 @@ def fit(self, X, y=None):
406406
407407
Parameters
408408
----------
409-
X : array-like, shape = [n_samples, n_features]
409+
X : array-like of shape (n_samples, n_features)
410410
Training data, where n_samples is the number of samples
411411
and n_features is the number of features.
412412
y
@@ -561,7 +561,7 @@ def fit(self, X, y=None):
561561
562562
Parameters
563563
----------
564-
X : array-like, shape = [n_samples, n_features]
564+
X : array-like of shape (n_samples, n_features)
565565
Training data, where n_samples is the number of samples
566566
and n_features is the number of features.
567567
y

sklearn/cross_decomposition/pls_.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -241,11 +241,11 @@ def fit(self, X, Y):
241241
242242
Parameters
243243
----------
244-
X : array-like, shape = [n_samples, n_features]
244+
X : array-like of shape (n_samples, n_features)
245245
Training vectors, where n_samples is the number of samples and
246246
n_features is the number of predictors.
247247
248-
Y : array-like, shape = [n_samples, n_targets]
248+
Y : array-like of shape (n_samples, n_targets)
249249
Target vectors, where n_samples is the number of samples and
250250
n_targets is the number of response variables.
251251
"""
@@ -387,11 +387,11 @@ def transform(self, X, Y=None, copy=True):
387387
388388
Parameters
389389
----------
390-
X : array-like, shape = [n_samples, n_features]
390+
X : array-like of shape (n_samples, n_features)
391391
Training vectors, where n_samples is the number of samples and
392392
n_features is the number of predictors.
393393
394-
Y : array-like, shape = [n_samples, n_targets]
394+
Y : array-like of shape (n_samples, n_targets)
395395
Target vectors, where n_samples is the number of samples and
396396
n_targets is the number of response variables.
397397
@@ -425,7 +425,7 @@ def predict(self, X, copy=True):
425425
426426
Parameters
427427
----------
428-
X : array-like, shape = [n_samples, n_features]
428+
X : array-like of shape (n_samples, n_features)
429429
Training vectors, where n_samples is the number of samples and
430430
n_features is the number of predictors.
431431
@@ -450,11 +450,11 @@ def fit_transform(self, X, y=None):
450450
451451
Parameters
452452
----------
453-
X : array-like, shape = [n_samples, n_features]
453+
X : array-like of shape (n_samples, n_features)
454454
Training vectors, where n_samples is the number of samples and
455455
n_features is the number of predictors.
456456
457-
y : array-like, shape = [n_samples, n_targets]
457+
y : array-like of shape (n_samples, n_targets)
458458
Target vectors, where n_samples is the number of samples and
459459
n_targets is the number of response variables.
460460
@@ -818,11 +818,11 @@ def fit(self, X, Y):
818818
819819
Parameters
820820
----------
821-
X : array-like, shape = [n_samples, n_features]
821+
X : array-like of shape (n_samples, n_features)
822822
Training vectors, where n_samples is the number of samples and
823823
n_features is the number of predictors.
824824
825-
Y : array-like, shape = [n_samples, n_targets]
825+
Y : array-like of shape (n_samples, n_targets)
826826
Target vectors, where n_samples is the number of samples and
827827
n_targets is the number of response variables.
828828
"""
@@ -868,11 +868,11 @@ def transform(self, X, Y=None):
868868
869869
Parameters
870870
----------
871-
X : array-like, shape = [n_samples, n_features]
871+
X : array-like of shape (n_samples, n_features)
872872
Training vectors, where n_samples is the number of samples and
873873
n_features is the number of predictors.
874874
875-
Y : array-like, shape = [n_samples, n_targets]
875+
Y : array-like of shape (n_samples, n_targets)
876876
Target vectors, where n_samples is the number of samples and
877877
n_targets is the number of response variables.
878878
"""
@@ -893,11 +893,11 @@ def fit_transform(self, X, y=None):
893893
894894
Parameters
895895
----------
896-
X : array-like, shape = [n_samples, n_features]
896+
X : array-like of shape (n_samples, n_features)
897897
Training vectors, where n_samples is the number of samples and
898898
n_features is the number of predictors.
899899
900-
y : array-like, shape = [n_samples, n_targets]
900+
y : array-like of shape (n_samples, n_targets)
901901
Target vectors, where n_samples is the number of samples and
902902
n_targets is the number of response variables.
903903

0 commit comments

Comments
 (0)
0