diff --git a/sklearn/metrics/scorer.py b/sklearn/metrics/scorer.py index c13d59a8daa2b..4cde38bfa0179 100644 --- a/sklearn/metrics/scorer.py +++ b/sklearn/metrics/scorer.py @@ -19,7 +19,6 @@ # License: Simplified BSD from abc import ABCMeta, abstractmethod -from collections import Iterable import warnings import numpy as np @@ -27,8 +26,8 @@ from . import (r2_score, median_absolute_error, mean_absolute_error, mean_squared_error, mean_squared_log_error, accuracy_score, f1_score, roc_auc_score, average_precision_score, - precision_score, recall_score, log_loss, balanced_accuracy_score, - explained_variance_score, brier_score_loss) + precision_score, recall_score, log_loss, + explained_variance_score) from .cluster import adjusted_rand_score from .cluster import homogeneity_score @@ -136,10 +135,7 @@ def __call__(self, clf, X, y, sample_weight=None): """ super(_ProbaScorer, self).__call__(clf, X, y, sample_weight=sample_weight) - y_type = type_of_target(y) y_pred = clf.predict_proba(X) - if y_type == "binary": - y_pred = y_pred[:, 1] if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, @@ -212,7 +208,6 @@ def __call__(self, clf, X, y, sample_weight=None): def _factory_args(self): return ", needs_threshold=True" - def get_scorer(scoring): """Get a scorer from string @@ -236,8 +231,8 @@ def get_scorer(scoring): valid = False # Don't raise here to make the error message elegant if not valid: raise ValueError('%r is not a valid scoring value. ' - 'Valid options are %s' - % (scoring, sorted(scorers))) + 'For valid options use sorted(SCORERS.keys())' + % (scoring)) else: scorer = scoring return scorer @@ -301,10 +296,6 @@ def check_scoring(estimator, scoring=None, allow_none=False): "If no scoring is specified, the estimator passed should " "have a 'score' method. The estimator %r does not." % estimator) - elif isinstance(scoring, Iterable): - raise ValueError("For evaluating multiple scores, use " - "sklearn.model_selection.cross_validate instead. " - "{0} was passed.".format(scoring)) else: raise ValueError("scoring value should either be a callable, string or" " None. %r was passed" % scoring) @@ -505,7 +496,6 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False, # Standard Classification Scores accuracy_scorer = make_scorer(accuracy_score) f1_scorer = make_scorer(f1_score) -balanced_accuracy_scorer = make_scorer(balanced_accuracy_score) # Score functions that need decision values roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, @@ -523,9 +513,6 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False, log_loss_scorer = make_scorer(log_loss, greater_is_better=False, needs_proba=True) log_loss_scorer._deprecation_msg = deprecation_msg -brier_score_loss_scorer = make_scorer(brier_score_loss, - greater_is_better=False, - needs_proba=True) # Clustering scores @@ -549,11 +536,9 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False, mean_absolute_error=mean_absolute_error_scorer, mean_squared_error=mean_squared_error_scorer, accuracy=accuracy_scorer, roc_auc=roc_auc_scorer, - balanced_accuracy=balanced_accuracy_scorer, average_precision=average_precision_scorer, log_loss=log_loss_scorer, neg_log_loss=neg_log_loss_scorer, - brier_score_loss=brier_score_loss_scorer, # Cluster metrics that use supervised evaluation adjusted_rand_score=adjusted_rand_scorer, homogeneity_score=homogeneity_scorer,