|
26 | 26 | from . import (r2_score, median_absolute_error, mean_absolute_error,
|
27 | 27 | mean_squared_error, mean_squared_log_error, accuracy_score,
|
28 | 28 | f1_score, roc_auc_score, average_precision_score,
|
29 |
| - precision_score, recall_score, log_loss) |
| 29 | + precision_score, recall_score, log_loss, |
| 30 | + fowlkes_mallows_score, calinski_harabaz_score) |
30 | 31 | from .cluster import adjusted_rand_score
|
31 | 32 | from ..utils.multiclass import type_of_target
|
32 | 33 | from ..externals import six
|
@@ -406,7 +407,9 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False,
|
406 | 407 | average_precision=average_precision_scorer,
|
407 | 408 | log_loss=log_loss_scorer,
|
408 | 409 | neg_log_loss=neg_log_loss_scorer,
|
409 |
| - adjusted_rand_score=adjusted_rand_scorer) |
| 410 | + adjusted_rand_score=adjusted_rand_scorer, |
| 411 | + fowlkes_mallows_score=fowlkes_mallows_score, |
| 412 | + calinski_harabaz_score=calinski_harabaz_score) |
410 | 413 |
|
411 | 414 | for name, metric in [('precision', precision_score),
|
412 | 415 | ('recall', recall_score), ('f1', f1_score)]:
|
<
32B3
/td> |
0 commit comments