@@ -176,9 +176,10 @@ which produces a new array that contains all but
176
176
the last entry of ``digits.data ``::
177
177
178
178
>>> clf.fit(digits.data[:-1], digits.target[:-1]) # doctest: +NORMALIZE_WHITESPACE
179
- SVC(C=100.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
180
- gamma=0.001, kernel='rbf', max_iter=-1, probability=False,
181
- random_state=None, shrinking=True, tol=0.001, verbose=False)
179
+ SVC(C=100.0, cache_size=200, class_weight=None, coef0=0.0,
180
+ decision_function_shape=None, degree=3, gamma=0.001, kernel='rbf',
181
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
182
+ tol=0.001, verbose=False)
182
183
183
184
Now you can predict new values, in particular, we can ask to the
184
185
classifier what is the digit of our last image in the ``digits `` dataset,
@@ -214,9 +215,10 @@ persistence model, namely `pickle <http://docs.python.org/library/pickle.html>`_
214
215
>>> iris = datasets.load_iris()
215
216
>>> X, y = iris.data, iris.target
216
217
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
217
- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
218
- kernel='rbf', max_iter=-1, probability=False, random_state=None,
219
- shrinking=True, tol=0.001, verbose=False)
218
+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
219
+ decision_function_shape=None, degree=3, gamma=0.0, kernel='rbf',
220
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
221
+ tol=0.001, verbose=False)
220
222
221
223
>>> import pickle
222
224
>>> s = pickle.dumps(clf)
@@ -287,18 +289,20 @@ maintained::
287
289
288
290
>>> iris = datasets.load_iris()
289
291
>>> clf = SVC()
290
- >>> clf.fit(iris.data, iris.target)
291
- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
292
- kernel='rbf', max_iter=-1, probability=False, random_state=None,
293
- shrinking=True, tol=0.001, verbose=False)
292
+ >>> clf.fit(iris.data, iris.target) # doctest: +NORMALIZE_WHITESPACE
293
+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
294
+ decision_function_shape=None, degree=3, gamma=0.0, kernel='rbf',
295
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
296
+ tol=0.001, verbose=False)
294
297
295
298
>>> list(clf.predict(iris.data[:3]))
296
299
[0, 0, 0]
297
300
298
- >>> clf.fit(iris.data, iris.target_names[iris.target])
299
- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
300
- kernel='rbf', max_iter=-1, probability=False, random_state=None,
301
- shrinking=True, tol=0.001, verbose=False)
301
+ >>> clf.fit(iris.data, iris.target_names[iris.target]) # doctest: +NORMALIZE_WHITESPACE
302
+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
303
+ decision_function_shape=None, degree=3, gamma=0.0, kernel='rbf',
304
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
305
+ tol=0.001, verbose=False)
302
306
303
307
>>> list(clf.predict(iris.data[:3])) # doctest: +NORMALIZE_WHITESPACE
304
308
['setosa', 'setosa', 'setosa']
@@ -324,17 +328,19 @@ more than once will overwrite what was learned by any previous ``fit()``::
324
328
>>> X_test = rng.rand(5, 10)
325
329
326
330
>>> clf = SVC()
327
- >>> clf.set_params(kernel='linear').fit(X, y)
328
- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
329
- kernel='linear', max_iter=-1, probability=False, random_state=None,
330
- shrinking=True, tol=0.001, verbose=False)
331
+ >>> clf.set_params(kernel='linear').fit(X, y) # doctest: +NORMALIZE_WHITESPACE
332
+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
333
+ decision_function_shape=None, degree=3, gamma=0.0, kernel='linear',
334
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
335
+ tol=0.001, verbose=False)
331
336
>>> clf.predict(X_test)
332
337
array([1, 0, 1, 1, 0])
333
338
334
- >>> clf.set_params(kernel='rbf').fit(X, y)
335
- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
336
- kernel='rbf', max_iter=-1, probability=False, random_state=None,
337
- shrinking=True, tol=0.001, verbose=False)
339
+ >>> clf.set_params(kernel='rbf').fit(X, y) # doctest: +NORMALIZE_WHITESPACE
340
+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
341
+ decision_function_shape=None, degree=3, gamma=0.0, kernel='rbf',
342
+ max_iter=-1, probability=False, random_state=None, shrinking=True,
343
+ tol=0.001, verbose=False)
338
344
>>> clf.predict(X_test)
339
345
array([0, 0, 0, 1, 0])
340
346
0 commit comments