8000 Merge branch 'pr/3395' · scikit-learn/scikit-learn@f38c1ca · GitHub
[go: up one dir, main page]

Skip to content

Commit f38c1ca

Browse files
committed
Merge branch 'pr/3395'
2 parents b186348 + 15be75e commit f38c1ca

File tree

9 files changed

+213
-153
lines changed

9 files changed

+213
-153
lines changed

benchmarks/bench_multilabel_metrics.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,13 @@
1212
import sys
1313

1414
import matplotlib.pyplot as plt
15+
import scipy.sparse as sp
1516
import numpy as np
1617

1718
from sklearn.datasets import make_multilabel_classification
1819
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
1920
jaccard_similarity_score)
21+
from sklearn.utils.testing import ignore_warnings
2022

2123

2224
METRICS = {
@@ -30,9 +32,12 @@
3032
FORMATS = {
3133
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
3234
'dense': lambda y: y,
35+
'csr': lambda y: sp.csr_matrix(y),
36+
'csc': lambda y: sp.csc_matrix(y),
3337
}
3438

3539

40+
@ignore_warnings
3641
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
3742
formats=tuple(v for k, v in sorted(FORMATS.items())),
3843
samples=1000, classes=4, density=.2,
@@ -109,7 +114,7 @@ def _tabulate(results, metrics, formats):
109114

110115

111116
def _plot(results, metrics, formats, title, x_ticks, x_label,
112-
format_markers=('x', '|', 'o'),
117+
format_markers=('x', '|', 'o', '+'),
113118
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
114119
"""
115120
Plot the results by metric, format and some other variable given by

doc/modules/model_evaluation.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ where :math:`1(x)` is the `indicator function
260260

261261
In the multilabel case with binary label indicators: ::
262262

263-
>>> accuracy_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.ones((2, 2)))
263+
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
264264
0.5
265265

266266
.. topic:: Example:
@@ -373,7 +373,7 @@ where :math:`1(x)` is the `indicator function
373373

374374
In the multilabel case with binary label indicators: ::
375375

376-
>>> hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.zeros((2, 2)))
376+
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2 AF29 )))
377377
0.75
378378

379379
.. note::
@@ -425,7 +425,7 @@ score is equal to the classification accuracy.
425425

426426
In the multilabel case with binary label indicators: ::
427427

428-
>>> jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.ones((2, 2)))
428+
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
429429
0.75
430430

431431
.. _precision_recall_f_measure_metrics:
@@ -889,7 +889,7 @@ where :math:`1(x)` is the `indicator function
889889

890890
In the multilabel case with binary label indicators: ::
891891

892-
>>> zero_one_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.ones((2, 2)))
892+
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
893893
0.5
894894

895895

0 commit comments

Comments
 (0)
0