8000 update version in svdd docs to 1.1, relocate from 1.0 to 1.1 in whats… · ivannz/scikit-learn@26144bd · GitHub 8000
[go: up one dir, main page]

Skip to content

Commit 26144bd

Browse files
committed
update version in svdd docs to 1.1, relocate from 1.0 to 1.1 in whats_new
add backticks (scikit-learn#20914), deprecate **params in fit (scikit-learn#20843), add feature_names_in_ (scikit-learn#20787) uncompromisingly reformat plot_oneclass_vs_svdd with black
1 parent bbedeef commit 26144bd

File tree

4 files changed

+64
-42
lines changed

4 files changed

+64
-42
lines changed

doc/whats_new/v1.0.rst

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1110,13 +1110,6 @@ Changelog
11101110
now deprecated. Use `scipy.sparse.csgraph.shortest_path` instead. :pr:`20531`
11111111
by `Tom Dupre la Tour`_.
11121112

1113-
:mod:`sklearn.svm`
1114-
..................
1115-
1116-
- |Feature| Added the :class:`svm.SVDD` class for novelty detection based
1117-
on soft minimal volume hypersphere around the sample data. :pr:`7910`
1118-
by :user:`Ivan Nazarov <ivannz>`.
1119-
11201113
Code and Documentation Contributors
11211114
-----------------------------------
11221115

doc/whats_new/v1.1.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line number 10000 Diff line change
@@ -181,6 +181,10 @@ Changelog
181181
parameters in `fit` instead of `__init__`.
182182
:pr:`21436` by :user:`Haidar Almubarak <Haidar13 >`.
183183

184+
- |Feature| Added the :class:`svm.SVDD` class for novelty detection based
185+
on soft minimal volume hypersphere around the sample data. :pr:`7910`
186+
by :user:`Ivan Nazarov <ivannz>`.
187+
184188
:mod:`sklearn.utils`
185189
....................
186190

examples/svm/plot_oneclass_vs_svdd.py

Lines changed: 49 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -41,22 +41,26 @@
4141
X_outliers = random_state.uniform(low=-4, high=4, size=(20, 2))
4242

4343
# Define the models
44-
nu = .1
45-
kernels = [("RBF", dict(kernel="rbf", gamma=0.1)),
46-
("Poly", dict(kernel="poly", degree=2, coef0=1.0)),
47-
]
44+
nu = 0.1
45+
kernels = [
46+
("RBF", dict(kernel="rbf", gamma=0.1)),
47+
("Poly", dict(kernel="poly", degree=2, coef0=1.0)),
48+
]
4849

4950
for kernel_name, kernel in kernels:
5051

5152
# Use low tolerance to ensure better precision of the SVM
5253
# optimization procedure.
53-
classifiers = [("OCSVM", svm.OneClassSVM(nu=nu, tol=1e-8, **kernel)),
54-
("SVDD", svm.SVDD(nu=nu, tol=1e-8, **kernel)),
55-
]
54+
classifiers = [
55+
("OCSVM", svm.OneClassSVM(nu=nu, tol=1e-8, **kernel)),
56+
("SVDD", svm.SVDD(nu=nu, tol=1e-8, **kernel)),
57+
]
5658

5759
fig = plt.figure(figsize=(12, 5))
58-
fig.suptitle("One-Class SVM versus SVDD "
59-
"(error train, error novel regular, error novel abnormal)")
60+
fig.suptitle(
61+
"One-Class SVM versus SVDD "
62+
"(error train, error novel regular, error novel abnormal)"
63+
)
6064

6165
for i, (model_name, clf) in enumerate(classifiers):
6266
clf.fit(X_train)
@@ -74,32 +78,46 @@
7478
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
7579
Z = Z.reshape(xx.shape)
7680

77-
ax.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7),
78-
cmap=plt.cm.PuBu, zorder=-99)
79-
ax.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred',
80-
zorder=-98)
81-
a = ax.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred',
82-
zorder=-97)
81+
ax.contourf(
82+
xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu, zorder=-99
83+
)
84+
ax.contourf(xx, yy, Z, levels=[0, Z.max()], colors="palevioletred", zorder=-98)
85+
a = ax.contour(
86+
xx, yy, Z, levels=[0], linewidths=2, colors="darkred", zorder=-97
87+
)
8388

8489
s = 40
85-
b1 = ax.scatter(X_train[:, 0], X_train[:, 1], s=s,
86-
c='white', edgecolors='k')
87-
b2 = ax.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
88-
c = ax.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
89-
ax.axis('tight')
90+
b1 = ax.scatter(X_train[:, 0], X_train[:, 1], s=s, c="white", edgecolors="k")
91+
b2 = ax.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s)
92+
c = ax.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s)
93+
ax.axis("tight")
9094
ax.set_xlim((-6, 6))
9195
ax.set_ylim((-6, 6))
9296

93-
ax.set_title("%s %s (%d/%d, %d/%d, %d/%d)"
94-
% (model_name, kernel_name,
95-
n_error_train, len(X_train),
96- n_error_test, len(X_test),
97-
n_error_outliers, len(X_outliers)))
98-
99-
ax.legend([a.collections[0], b1, b2, c],
100-
["learned frontier", "training observations",
101-
"new regular observations", "new abnormal observations"],
102-
loc="lower right",
103-
prop=matplotlib.font_manager.FontProperties(size=10))
97+
ax.set_title(
98+
"%s %s (%d/%d, %d/%d, %d/%d)"
99+
% (
100+
model_name,
101+
kernel_name,
102+
n_error_train,
103+
len(X_train),
104+
n_error_test,
105+
len(X_test),
106+
n_error_outliers,
107+
len(X_outliers),
108+
)
109+
)
110+
111+
ax.legend(
112+
[a.collections[0], b1, b2, c],
113+
[
114+
"learned frontier",
115+
"training observations",
116+
"new regular observations",
117+
"new abnormal observations",
118+
],
119+
loc="lower right",
120+
prop=matplotlib.font_manager.FontProperties(size=10),
121+
)
104122

105123
plt.show()

sklearn/svm/_classes.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1713,7 +1713,7 @@ class SVDD(OutlierMixin, BaseLibSVM):
17131713
17141714
Read more in the :ref:`User Guide <svm_outlier_detection>`.
17151715
1716-
..versionadded: 1.0
1716+
..versionadded: 1.1
17171717
17181718
Parameters
17191719
----------
@@ -1788,7 +1788,9 @@ class SVDD(OutlierMixin, BaseLibSVM):
17881788
n_features_in_ : int
17891789
Number of features seen during :term:`fit`.
17901790
1791-
.. versionadded:: 0.24
1791+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
1792+
Names of features seen during :term:`fit`. Defined only when `X`
1793+
has feature names that are all strings.
17921794
17931795
n_support_ : ndarray of shape (n_classes,), dtype=int32
17941796
Number of support vectors for each class.
@@ -1877,8 +1879,8 @@ def fit(self, X, y=None, sample_weight=None, **params):
18771879
Parameters
18781880
----------
18791881
X : {array-like, sparse matrix} of shape (n_samples, n_features)
1880-
Set of samples, where n_samples is the number of samples and
1881-
n_features is the number of features.
1882+
Set of samples, where `n_samples` is the number of samples and
1883+
`n_features` is the number of features.
18821884
18831885
y : Ignored
18841886
Not used, present for API consistency by convention.
@@ -1890,6 +1892,11 @@ def fit(self, X, y=None, sample_weight=None, **params):
18901892
**params : dict
18911893
Additional fit parameters.
18921894
1895+
.. deprecated:: 1.0
1896+
The `fit` method will not longer accept extra keyword
1897+
parameters in 1.2. These keyword parameters were
1898+
already discarded.
1899+
18931900
Returns
18941901
-------
18951902
self : object

0 commit comments

Comments
 (0)
0