@@ -1091,7 +1091,6 @@ def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
1091
1091
scoring = scoring )
1092
1092
# We clone the estimator to make sure that all the folds are
1093
1093
# independent, and that it is pickle-able.
1094
- fit_params = fit_params if fit_params is not None else {}
1095
1094
parallel = Parallel (n_jobs = n_jobs , verbose = verbose ,
1096
1095
pre_dispatch = pre_dispatch )
1097
1096
scores = parallel (
@@ -1104,15 +1103,15 @@ def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
1104
1103
def _cross_val_score (estimator , X , y , scorer , train , test , verbose ,
1105
1104
fit_params ):
1106
1105
"""Inner loop for cross validation"""
1107
- # TODO replace with grid_search.fit_grid_point()
1108
1106
n_samples = _num_samples (X )
1107
+ fit_params = fit_params if fit_params is not None else {}
1109
1108
fit_params = dict ([(k , np .asarray (v )[train ] # TODO why is this necessary?
1110
1109
if hasattr (v , '__len__' ) and len (v ) == n_samples else v )
1111
1110
for k , v in fit_params .items ()])
1112
1111
1113
1112
X_train , y_train = _split (estimator , X , y , train )
1114
1113
X_test , y_test = _split (estimator , X , y , test , train )
1115
- estimator .fit ( X_train , y_train , ** fit_params )
1114
+ _fit ( estimator .fit , X_train , y_train , ** fit_params )
1116
1115
score = _score (estimator , X_test , y_test , scorer )
1117
1116
1118
1117
if verbose > 1 :
0 commit comments