@@ -601,15 +601,16 @@ def _update_no_improvement_count(self, early_stopping, X_val, y_val):
601
601
self .best_loss_ = self .loss_curve_ [- 1 ]
602
602
603
603
def fit (self , X , y ):
604
- """Fit the model to data matrix X and target y.
604
+ """Fit the model to data matrix X and target(s) y.
605
605
606
606
Parameters
607
607
----------
608
- X : { array-like, sparse matrix} , shape (n_samples, n_feature
8000
s)
608
+ X : array-like or sparse matrix, shape (n_samples, n_features)
609
609
The input data.
610
610
611
- y : array-like, shape (n_samples,)
612
- The target values.
611
+ y : array-like, shape (n_samples,) or (n_samples, n_outputs)
612
+ The target values (class labels in classification, real numbers in
613
+ regression).
613
614
614
615
Returns
615
616
-------
@@ -818,17 +819,17 @@ class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
818
819
819
820
Attributes
820
821
----------
821
- ` classes_` : array or list of array of shape (n_classes,)
822
+ classes_ : array or list of array of shape (n_classes,)
822
823
Class labels for each output.
823
824
824
- ` loss_` : float
825
+ loss_ : float
825
826
The current loss computed with the loss function.
826
827
827
- ` coefs_` : list, length n_layers - 1
828
+ coefs_ : list, length n_layers - 1
828
829
The ith element in the list represents the weight matrix corresponding
829
830
to layer i.
830
831
831
- ` intercepts_` : list, length n_layers - 1
832
+ intercepts_ : list, length n_layers - 1
832
833
The ith element in the list represents the bias vector corresponding to
833
834
layer i + 1.
834
835
@@ -838,10 +839,10 @@ class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
838
839
n_layers_ : int
839
840
Number of layers.
840
841
841
- ` n_outputs_` : int
842
+ n_outputs_ : int
842
843
Number of outputs.
843
844
844
- ` out_activation_` : string
845
+ out_activation_ : string
845
846
Name of the output activation function.
846
847
847
848
Notes
@@ -1163,14 +1164,14 @@ class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
1163
1164
1164
1165
Attributes
1165
1166
----------
1166
- ` loss_` : float
1167
+ loss_ : float
1167
1168
The current loss computed with the loss function.
1168
1169
1169
- ` coefs_` : list, length n_layers - 1
1170
+ coefs_ : list, length n_layers - 1
1170
1171
The ith element in the list represents the weight matrix corresponding
1171
1172
to layer i.
1172
1173
1173
- ` intercepts_` : list, length n_layers - 1
1174
+ intercepts_ : list, length n_layers - 1
1174
1175
The ith element in the list represents the bias vector corresponding to
1175
1176
layer i + 1.
1176
1177
@@ -1180,10 +1181,10 @@ class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
1180
1181
n_layers_ : int
1181
1182
Number of layers.
1182
1183
1183
- ` n_outputs_` : int
1184
+ n_outputs_ : int
1184
1185
Number of outputs.
1185
1186
1186
- ` out_activation_` : string
1187
+ out_activation_ : string
1187
1188
Name of the output activation function.
1188
1189
1189
1190
Notes
0 commit comments