@@ -176,9 +176,10 @@ which produces a new array that contains all but
176176the last entry of ``digits.data ``::
177177
178178 >>> clf.fit(digits.data[:-1], digits.target[:-1]) # doctest: +NORMALIZE_WHITESPACE
179- SVC(C=100.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
180- gamma=0.001, kernel='rbf', max_iter=-1, probability=False,
181- random_state=None, shrinking=True, tol=0.001, verbose=False)
179+ SVC(C=100.0, cache_size=200, class_weight=None, coef0=0.0,
180+ decision_function_shape=None, degree=3, gamma=0.001, kernel='rbf',
181+ max_iter=-1, probability=False, random_state=None, shrinking=True,
182+ tol=0.001, verbose=False)
182183
183184Now you can predict new values, in particular, we can ask to the
184185classifier what is the digit of our last image in the ``digits `` dataset,
@@ -214,9 +215,10 @@ persistence model, namely `pickle <http://docs.python.org/library/pickle.html>`_
214215 >>> iris = datasets.load_iris()
215216 >>> X, y = iris.data, iris.target
216217 >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
217- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
218- gamma='auto', kernel='rbf', max_iter=-1, probability=False,
219- random_state=None, shrinking=True, tol=0.001, verbose=False)
218+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
219+ decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
220+ max_iter=-1, probability=False, random_state=None, shrinking=True,
221+ tol=0.001, verbose=False)
220222
221223 >>> import pickle
222224 >>> s = pickle.dumps(clf)
@@ -286,18 +288,20 @@ maintained::
286288 >>> from sklearn.svm import SVC
287289 >>> iris = datasets.load_iris()
288290 >>> clf = SVC()
289- >>> clf.fit(iris.data, iris.target)
290- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
291- gamma='auto', kernel='rbf', max_iter=-1, probability=False,
292- random_state=None, shrinking=True, tol=0.001, verbose=False)
291+ >>> clf.fit(iris.data, iris.target) # doctest: +NORMALIZE_WHITESPACE
292+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
293+ decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
294+ max_iter=-1, probability=False, random_state=None, shrinking=True,
295+ tol=0.001, verbose=False)
293296
294297 >>> list(clf.predict(iris.data[:3]))
295298 [0, 0, 0]
296299
297- >>> clf.fit(iris.data, iris.target_names[iris.target])
298- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
299- gamma='auto', kernel='rbf', max_iter=-1, probability=False,
300- random_state=None, shrinking=True, tol=0.001, verbose=False)
300+ >>> clf.fit(iris.data, iris.target_names[iris.target]) # doctest: +NORMALIZE_WHITESPACE
301+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
302+ decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
303+ max_iter=-1, probability=False, random_state=None, shrinking=True,
304+ tol=0.001, verbose=False)
301305
302306 >>> list(clf.predict(iris.data[:3])) # doctest: +NORMALIZE_WHITESPACE
303307 ['setosa', 'setosa', 'setosa']
@@ -323,17 +327,19 @@ more than once will overwrite what was learned by any previous ``fit()``::
323327 >>> X_test = rng.rand(5, 10)
324328
325329 >>> clf = SVC()
326- >>> clf.set_params(kernel='linear').fit(X, y)
327- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
328- gamma='auto', kernel='linear', max_iter=-1, probability=False,
329- random_state=None, shrinking=True, tol=0.001, verbose=False)
330+ >>> clf.set_params(kernel='linear').fit(X, y) # doctest: +NORMALIZE_WHITESPACE
331+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
332+ decision_function_shape=None, degree=3, gamma='auto', kernel='linear',
333+ max_iter=-1, probability=False, random_state=None, shrinking=True,
334+ tol=0.001, verbose=False)
330335 >>> clf.predict(X_test)
331336 array([1, 0, 1, 1, 0])
332337
333- >>> clf.set_params(kernel='rbf').fit(X, y)
334- SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
335- gamma='auto', kernel='rbf', max_iter=-1, probability=False,
336- random_state=None, shrinking=True, tol=0.001, verbose=False)
338+ >>> clf.set_params(kernel='rbf').fit(X, y) # doctest: +NORMALIZE_WHITESPACE
339+ SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
340+ decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
341+ max_iter=-1, probability=False, random_state=None, shrinking=True,
342+ tol=0.001, verbose=False)
337343 >>> clf.predict(X_test)
338344 array([0, 0, 0, 1, 0])
339345
0 commit comments