8000 DOC fix dollar sign to euro sign (#29020) · scikit-learn/scikit-learn@f8be06c · GitHub
[go: up one dir, main page]

Skip to content

Commit f8be06c

Browse files
adrinjalaliglemaitre
authored andcommitted
DOC fix dollar sign to euro sign (#29020)
Co-authored-by: Guillaume Lemaitre <guillaume@probabl.ai>
1 parent 1434bb1 commit f8be06c

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

examples/model_selection/plot_cost_sensitive_learning.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -489,7 +489,7 @@ def plot_roc_pr_curves(vanilla_model, tuned_model, *, title):
489489
_, ax = plt.subplots()
490490
ax.hist(amount_fraud, bins=100)
491491
ax.set_title("Amount of fraud transaction")
492-
_ = ax.set_xlabel("Amount ($)")
492+
_ = ax.set_xlabel("Amount ()")
493493

494494
# %%
495495
# Addressing the problem with a business metric
@@ -501,8 +501,8 @@ def plot_roc_pr_curves(vanilla_model, tuned_model, *, title):
501501
# transaction result in a loss of the amount of the transaction. As stated in [2]_, the
502502
# gain and loss related to refusals (of fraudulent and legitimate transactions) are not
503503
# trivial to define. Here, we define that a refusal of a legitimate transaction is
504-
# estimated to a loss of $5 while the refusal of a fraudulent transaction is estimated
505-
# to a gain of $50 dollars and the amount of the transaction. Therefore, we define the
504+
# estimated to a loss of 5€ while the refusal of a fraudulent transaction is estimated
505+
# to a gain of 50€ and the amount of the transaction. Therefore, we define the
506506
# following function to compute the total benefit of a given decision:
507507

508508

@@ -557,22 +557,22 @@ def business_metric(y_true, y_pred, amount):
557557
benefit_cost = business_scorer(
558558
easy_going_classifier, data_test, target_test, amount=amount_test
559559
)
560-
print(f"Benefit/cost of our easy-going classifier: ${benefit_cost:,.2f}")
560+
print(f"Benefit/cost of our easy-going classifier: {benefit_cost:,.2f}")
561561

562562
# %%
563563
# A classifier that predict all transactions as legitimate would create a profit of
564-
# around $220,000. We make the same evaluation for a classifier that predicts all
564+
# around 220,000. We make the same evaluation for a classifier that predicts all
565565
# transactions as fraudulent.
566566
intolerant_classifier = DummyClassifier(strategy="constant", constant=1)
567567
intolerant_classifier.fit(data_train, target_train)
568568
benefit_cost = business_scorer(
569569
intolerant_classifier, data_test, target_test, amount=amount_test
570570
)
571-
print(f"Benefit/cost of our intolerant classifier: ${benefit_cost:,.2f}")
571+
print(f"Benefit/cost of our intolerant classifier: {benefit_cost:,.2f}")
572572

573573
# %%
574-
# Such a classifier create a loss of around $670,000. A predictive model should allow
575-
# us to make a profit larger than $220,000. It is interesting to compare this business
574+
# Such a classifier create a loss of around 670,000. A predictive model should allow
575+
# us to make a profit larger than 220,000. It is interesting to compare this business
576576
# metric with another "standard" statistical metric such as the balanced accuracy.
577577
from sklearn.metrics import get_scorer
578578

@@ -607,7 +607,7 @@ def business_metric(y_true, y_pred, amount):
607607

608608
print(
609609
"Benefit/cost of our logistic regression: "
610-
f"${business_scorer(model, data_test, target_test, amount=amount_test):,.2f}"
610+
f"{business_scorer(model, data_test, target_test, amount=amount_test):,.2f}"
611611
)
612612
print(
613613
"Balanced accuracy of our logistic regression: "
@@ -645,7 +645,7 @@ def business_metric(y_true, y_pred, amount):
645645
# %%
646646
print(
647647
"Benefit/cost of our logistic regression: "
648-
f"${business_scorer(tuned_model, data_test, target_test, amount=amount_test):,.2f}"
648+
f"{business_scorer(tuned_model, data_test, target_test, amount=amount_test):,.2f}"
649649
)
650650
print(
651651
"Balanced accuracy of our logistic regression: "
@@ -691,7 +691,7 @@ def business_metric(y_true, y_pred, amount):
691691
business_score = business_scorer(
692692
model_fixed_threshold, data_test, target_test, amount=amount_test
693693
)
694-
print(f"Benefit/cost of our logistic regression: ${business_score:,.2f}")
694+
print(f"Benefit/cost of our logistic regression: {business_score:,.2f}")
695695
print(
696696
"Balanced accuracy of our logistic regression: "
697697
f"{balanced_accuracy_scorer(model_fixed_threshold, data_test, target_test):.3f}"

0 commit comments

Comments
 (0)
0