8000 MNT Adds CurveDisplayMixin _get_response_values by glemaitre · Pull Request #20999 · scikit-learn/scikit-learn · GitHub
[go: up one dir, main page]

Skip to content

MNT Adds CurveDisplayMixin _get_response_values #20999

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 22 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 17 additions & 17 deletions sklearn/calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@
RegressorMixin,
clone,
MetaEstimatorMixin,
is_classifier,
)
from .metrics._base import _check_pos_label_consistency
from .metrics._plot.base import BinaryClassifierCurveDisplayMixin
from .preprocessing import label_binarize, LabelEncoder
from .utils import (
column_or_1d,
deprecated,
indexable,
check_matplotlib_support,
)

from .utils.multiclass import check_classification_targets
Expand All @@ -47,8 +47,6 @@
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv, cross_val_predict
from .metrics._base import _check_pos_label_consistency
from .metrics._plot.base import _get_response


class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
Expand Down Expand Up @@ -987,7 +985,7 @@ def calibration_curve(
return prob_true, prob_pred


class CalibrationDisplay:
class CalibrationDisplay(BinaryClassifierCurveDisplayMixin):
"""Calibration curve (also known as reliability diagram) visualization.

It is recommended to use
Expand Down Expand Up @@ -1098,7 +1096,7 @@ def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):
display : :class:`~sklearn.calibration.CalibrationDisplay`
Object that stores computed values.
"""
check_matplotlib_support("CalibrationDisplay.plot")
super().plot()
import matplotlib.pyplot as plt

if ax is None:
Expand Down Expand Up @@ -1234,17 +1232,15 @@ def from_estimator(
>>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
>>> plt.show()
"""
method_name = f"{cls.__name__}.from_estimator"
check_matplotlib_support(method_name)

if not is_classifier(estimator):
raise ValueError("'estimator' should be a fitted classifier.")

y_prob, pos_label = _get_response(
X, estimator, response_method="predict_proba", pos_label=pos_label
y_prob, pos_label, name = super().from_estimator(
estimator,
X,
y,
response_method="predict_proba",
pos_label=pos_label,
name=name,
)

name = name if name is not None else estimator.__class__.__name__
return cls.from_predictions(
y,
y_prob,
Expand Down Expand Up @@ -1354,8 +1350,12 @@ def from_predictions(
>>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
>>> plt.show()
"""
method_name = f"{cls.__name__}.from_estimator"
check_matplotlib_support(method_name)
pos_label, name = super().from_predictions(
y_true,
y_prob,
pos_label=pos_label,
name=name,
)

prob_true, prob_pred = calibration_curve(
y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label
Expand Down
30 changes: 13 additions & 17 deletions sklearn/ensemble/_stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,11 @@
from ..utils import Bunch
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils.validation import (
_check_response_method,
check_is_fitted,
column_or_1d,
)
from ..utils.fixes import delayed


Expand Down Expand Up @@ -104,21 +107,14 @@ def _concatenate_predictions(self, X, predictions):
def _method_name(name, estimator, method):
if estimator == "drop":
return None
if method == "auto":
if getattr(estimator, "predict_proba", None):
return "predict_proba"
elif getattr(estimator, "decision_function", None):
return "decision_function"
else:
return "predict"
else:
if not hasattr(estimator, method):
raise ValueError(
"Underlying estimator {} does not implement the method {}.".format(
name, method
)
)
return method
method = None if method == "auto" else method
try:
method_name = _check_response_method(estimator, method).__name__
except AttributeError as e:
raise ValueError(
f"Underlying estimator {name} does not implement the method {method}."
) from e
return method_name

def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Expand Down
Loading
0