8000 DOC Fix doc of defaults in sklearn.linear_model._sag.py (#18098) · sstalley/scikit-learn@3dd2e94 · GitHub
[go: up one dir, main page]

Skip to content

Commit 3dd2e94

Browse files
authored
DOC Fix doc of defaults in sklearn.linear_model._sag.py (scikit-learn#18098)
1 parent 709ffc3 commit 3dd2e94

File tree

1 file changed

+24
-25
lines changed

1 file changed

+24
-25
lines changed

sklearn/linear_model/_sag.py

Lines changed: 24 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
2121
n_samples=None,
2222
is_saga=False):
23-
"""Compute automatic step size for SAG solver
23+
"""Compute automatic step size for SAG solver.
2424
2525
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
2626
the max sum of squares for over all samples.
@@ -34,17 +34,17 @@ def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
3434
Constant that multiplies the regularization term, scaled by
3535
1. / n_samples, the number of samples.
3636
37-
loss : string, in {"log", "squared"}
37+
loss : {'log', 'squared', 'multinomial'}
3838
The loss function used in SAG solver.
3939
4040
fit_intercept : bool
4141
Specifies if a constant (a.k.a. bias or intercept) will be
4242
added to the decision function.
4343
44-
n_samples : int, optional
44+
n_samples : int, default=None
4545
Number of rows in X. Useful if is_saga=True.
4646
47-
is_saga : boolean, optional
47+
is_saga : bool, default=False
4848
Whether to return step size for the SAGA algorithm or the SAG
4949
algorithm.
5050
@@ -91,7 +91,7 @@ def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
9191
check_input=True, max_squared_sum=None,
9292
warm_start_mem=None,
9393
is_saga=False):
94-
"""SAG solver for Ridge and LogisticRegression
94+
"""SAG solver for Ridge and LogisticRegression.
9595
9696
SAG stands for Stochastic Average Gradient: the gradient of the loss is
9797
estimated each sample at a time and the model is updated along the way with
@@ -113,17 +113,17 @@ def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
113113
114114
Parameters
115115
----------
116-
X : {array-like, sparse matrix}, shape (n_samples, n_features)
117-
Training data
116+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
117+
Training data.
118118
119-
y : numpy array, shape (n_samples,)
119+
y : ndarray of shape (n_samples,)
120120
Target values. With loss='multinomial', y must be label encoded
121121
(see preprocessing.LabelEncoder).
122122
123-
sample_weight : array-like, shape (n_samples,), optional
123+
sample_weight : array-like of shape (n_samples,), default=None
124124
Weights applied to individual samples (1. for unweighted).
125125
126-
loss : 'log' | 'squared' | 'multinomial'
126+
loss : {'log', 'squared', 'multinomial'}, default='log'
127127
Loss function that will be optimized:
128128
-'log' is the binary logistic loss, as used in LogisticRegression.
129129
-'squared' is the squared loss, as used in Ridge.
@@ -133,40 +133,39 @@ def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
133133
.. versionadded:: 0.18
134134
*loss='multinomial'*
135135
136-
alpha : float, optional
136+
alpha : float, default=1.
137137
L2 regularization term in the objective function
138-
``(0.5 * alpha * || W ||_F^2)``. Defaults to 1.
138+
``(0.5 * alpha * || W ||_F^2)``.
139139
140-
beta : float, optional
140+
beta : float, default=0.
141141
L1 regularization term in the objective function
142142
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
143-
Defaults to 0.
144143
145-
max_iter : int, optional
144+
max_iter : int, default=1000
146145
The max number of passes over the training data if the stopping
147-
criteria is not reached. Defaults to 1000.
146+
criteria is not reached.
148147
149-
tol : double, optional
148+
tol : double, default=0.001
150149
The stopping criteria for the weights. The iterations will stop when
151-
max(change in weights) / max(weights) < tol. Defaults to .001
150+
max(change in weights) / max(weights) < tol.
152151
153-
verbose : integer, optional
152+
verbose : int, default=0
154153
The verbosity level.
155154
156-
random_state : int, RandomState instance, default=None
155+
random_state : int or RandomState instance, default=None
157156
Used when shuffling the data. Pass an int for reproducible output
158157
across multiple function calls.
159158
See :term:`Glossary <random_state>`.
160159
161-
check_input : bool, default True
160+
check_input : bool, default=True
162161
If False, the input arrays X and y will not be checked.
163162
164-
max_squared_sum : float, default None
163+
max_squared_sum : float, default=None
165164
Maximum squared sum of X over samples. If None, it will be computed,
166165
going through all the samples. The value should be precomputed
167166
to speed up cross validation.
168167
169-
warm_start_mem : dict, optional
168+
warm_start_mem : dict, default=None
170169
The initialization parameters used for warm starting. Warm starting is
171170
currently used in LogisticRegression but not in Ridge.
172171
It contains:
@@ -180,13 +179,13 @@ def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
180179
- 'seen': array of boolean describing the seen samples.
181180
- 'num_seen': the number of seen samples.
182181
183-
is_saga : boolean, optional
182+
is_saga : bool, default=False
184183
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
185184
better in the first epochs, and allow for l1 regularisation.
186185
187186
Returns
188187
-------
189-
coef_ : array, shape (n_features)
188+
coef_ : ndarray of shape (n_features,)
190189
Weight vector.
191190
192191
n_iter_ : int

0 commit comments

Comments
 (0)
0