@@ -1975,8 +1975,8 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
1975
1975
Whether or not the training data should be shuffled after each epoch.
1976
1976
Defaults to True.
1977
1977
1978
- verbose : integer , optional
1979
- The verbosity level
1978
+ verbose : int , optional
1979
+ The verbosity level.
1980
1980
1981
1981
random_state : int, RandomState instance or None, optional (default=None)
1982
1982
The seed of the pseudo random number generator to use when shuffling
@@ -1985,7 +1985,7 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
1985
1985
generator; If None, the random number generator is the RandomState
1986
1986
instance used by `np.random`.
1987
1987
1988
- learning_rate : string , optional
1988
+ learning_rate : str , optional
1989
1989
The learning rate schedule to use with `fit`. (If using `partial_fit`,
1990
1990
learning rate must be controlled directly).
1991
1991
@@ -2059,6 +2059,17 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
2059
2059
2060
2060
.. versionadded:: 1.0
2061
2061
2062
+ See Also
2063
+ --------
2064
+ sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
2065
+
2066
+ Notes
2067
+ -----
2068
+ This estimator has a linear complexity in the number of training samples
2069
+ and is thus better suited than the `sklearn.svm.OneClassSVM`
2070
+ implementation for datasets with a large number of training samples (say
2071
+ > 10,000).
2072
+
2062
2073
Examples
2063
2074
--------
2064
2075
>>> import numpy as np
@@ -2070,17 +2081,6 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
2070
2081
2071
2082
>>> print(clf.predict([[4, 4]]))
2072
2083
[1]
2073
-
2074
- See also
2075
- --------
2076
- sklearn.svm.OneClassSVM
2077
-
2078
- Notes
2079
- -----
2080
- This estimator has a linear complexity in the number of training samples
2081
- and is thus better suited than the `sklearn.svm.OneClassSVM`
2082
- implementation for datasets with a large number of training samples (say
2083
- > 10,000).
2084
2084
"""
2085
2085
2086
2086
loss_functions = {"hinge" : (Hinge , 1.0 )}
@@ -2295,14 +2295,17 @@ def partial_fit(self, X, y=None, sample_weight=None):
2295
2295
----------
2296
2296
X : {array-like, sparse matrix}, shape (n_samples, n_features)
2297
2297
Subset of the training data.
2298
+ y : Ignored
2299
+ Not used, present for API consistency by convention.
2298
2300
2299
2301
sample_weight : array-like, shape (n_samples,), optional
2300
2302
Weights applied to individual samples.
2301
2303
If not provided, uniform weights are assumed.
2302
2304
2303
2305
Returns
2304
2306
-------
2305
- self : returns an instance of self.
2307
+ self : object
2308
+ Returns a fitted instance of self.
2306
2309
"""
2307
2310
2308
2311
alpha = self .nu / 2
@@ -2383,6 +2386,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
2383
2386
----------
2384
2387
X : {array-like, sparse matrix}, shape (n_samples, n_features)
2385
2388
Training data.
2389
+ y : Ignored
2390
+ Not used, present for API consistency by convention.
2386
2391
2387
2392
coef_init : array, shape (n_classes, n_features)
2388
2393
The initial coefficients to warm-start the optimization.
@@ -2398,7 +2403,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
2398
2403
2399
2404
Returns
2400
2405
-------
2401
- self : returns an instance of self.
2406
+ self : object
2407
+ Returns a fitted instance of self.
2402
2408
"""
2403
2409
2404
2410
alpha = self .nu / 2
0 commit comments