|
7 | 7 |
|
8 | 8 | from . import libsvm, liblinear |
9 | 9 | from . import libsvm_sparse |
10 | | -from ..base import BaseEstimator, ClassifierMixin, RegressorMixin |
| 10 | +from ..base import BaseEstimator, ClassifierMixin |
11 | 11 | from ..preprocessing import LabelEncoder |
12 | 12 | from ..utils import check_array, check_random_state, column_or_1d |
13 | 13 | from ..utils import ConvergenceWarning, compute_class_weight |
@@ -70,7 +70,7 @@ def __init__(self, impl, kernel, degree, gamma, coef0, |
70 | 70 | tol, C, nu, epsilon, shrinking, probability, cache_size, |
71 | 71 | class_weight, verbose, max_iter, random_state): |
72 | 72 |
|
73 | | - if not impl in LIBSVM_IMPL: # pragma: no cover |
| 73 | + if impl not in LIBSVM_IMPL: # pragma: no cover |
74 | 74 | raise ValueError("impl should be one of %s, %s was given" % ( |
75 | 75 | LIBSVM_IMPL, impl)) |
76 | 76 |
|
@@ -384,7 +384,7 @@ def decision_function(self, X): |
384 | 384 |
|
385 | 385 | def _validate_for_predict(self, X): |
386 | 386 | check_is_fitted(self, 'support_') |
387 | | - |
| 387 | + |
388 | 388 | X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C") |
389 | 389 | if self._sparse and not sp.isspmatrix(X): |
390 | 390 | X = sp.csr_matrix(X) |
@@ -615,10 +615,10 @@ def _get_liblinear_solver_type(multi_class, penalty, loss, dual): |
615 | 615 | 'logistic_regression': { |
616 | 616 | 'l1': {False: 6}, |
617 | 617 | 'l2': {False: 0, True: 7}}, |
618 | | - 'hinge' : { |
619 | | - 'l2' : {True: 3}}, |
| 618 | + 'hinge': { |
| 619 | + 'l2': {True: 3}}, |
620 | 620 | 'squared_hinge': { |
621 | | - 'l1': {False : 5}, |
| 621 | + 'l1': {False: 5}, |
622 | 622 | 'l2': {False: 2, True: 1}}, |
623 | 623 | 'epsilon_insensitive': { |
624 | 624 | 'l2': {True: 13}}, |
@@ -652,15 +652,15 @@ def _get_liblinear_solver_type(multi_class, penalty, loss, dual): |
652 | 652 | % (penalty, loss, dual)) |
653 | 653 | else: |
654 | 654 | return solver_num |
655 | | - |
| 655 | + |
656 | 656 | raise ValueError(('Unsupported set of arguments: %s, ' |
657 | 657 | 'Parameters: penalty=%r, loss=%r, dual=%r') |
658 | 658 | % (error_string, penalty, loss, dual)) |
659 | 659 |
|
660 | 660 |
|
661 | 661 | def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight, |
662 | 662 | penalty, dual, verbose, max_iter, tol, |
663 | | - random_state=None, multi_class='ovr', |
| 663 | + random_state=None, multi_class='ovr', |
664 | 664 | loss='logistic_regression', epsilon=0.1): |
665 | 665 | """Used by Logistic Regression (and CV) and LinearSVC. |
666 | 666 |
|
@@ -722,7 +722,7 @@ def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight, |
722 | 722 | If `crammer_singer` is chosen, the options loss, penalty and dual will |
723 | 723 | be ignored. |
724 | 724 |
|
725 | | - loss : str, {'logistic_regression', 'hinge', 'squared_hinge', |
| 725 | + loss : str, {'logistic_regression', 'hinge', 'squared_hinge', |
726 | 726 | 'epsilon_insensitive', 'squared_epsilon_insensitive} |
727 | 727 | The loss function used to fit the model. |
728 | 728 |
|
@@ -788,7 +788,7 @@ def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight, |
788 | 788 | # LibLinear wants targets as doubles, even for classification |
789 | 789 | y_ind = np.asarray(y_ind, dtype=np.float64).ravel() |
790 | 790 | solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual) |
791 | | - raw_coef_, n_iter_ = liblinear.train_wrap( |
| 791 | + raw_coef_, n_iter_ = liblinear.train_wrap( |
792 | 792 | X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C, |
793 | 793 | class_weight_, max_iter, rnd.randint(np.iinfo('i').max), |
794 | 794 | epsilon |
|
0 commit comments