Skip to content

Commit 754c2d9

Browse files
committed
COSMIT pep8
1 parent dbc4689 commit 754c2d9

File tree

101 files changed

+1003
-1001
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

101 files changed

+1003
-1001
lines changed

sklearn/covariance/outlier_detection.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# License: BSD Style.
1414

1515
import numpy as np
16-
import scipy as sp
16+
import scipy as sp
1717
from . import MinCovDet
1818
from ..utils import deprecated
1919
from ..base import ClassifierMixin

sklearn/datasets/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def get_data_home(data_home=None):
5050
"""
5151
if data_home is None:
5252
data_home = environ.get('SCIKIT_LEARN_DATA',
53-
join('~', 'scikit_learn_data'))
53+
join('~', 'scikit_learn_data'))
5454
data_home = expanduser(data_home)
5555
if not exists(data_home):
5656
makedirs(data_home)

sklearn/datasets/lfw.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def _load_imgs(file_paths, slice_, color, resize):
167167
#
168168

169169
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
170-
min_faces_per_person=0):
170+
min_faces_per_person=0):
171171
"""Perform the actual data loading for the lfw people dataset
172172
173173
This operation is meant to be cached by a joblib wrapper.
@@ -208,9 +208,9 @@ def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
208208

209209

210210
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
211-
min_faces_per_person=None, color=False,
212-
slice_=(slice(70, 195), slice(78, 172)),
213-
download_if_missing=True):
211+
min_faces_per_person=None, color=False,
212+
slice_=(slice(70, 195), slice(78, 172)),
213+
download_if_missing=True):
214214
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
215215
216216
This dataset is a collection of JPEG pictures of famous people
@@ -283,7 +283,7 @@ def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
283283

284284

285285
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
286-
color=False, resize=None):
286+
color=False, resize=None):
287287
"""Perform the actual data loading for the LFW pairs dataset
288288
289289
This operation is meant to be cached by a joblib wrapper.
@@ -339,7 +339,7 @@ def load_lfw_people(download_if_missing=False, **kwargs):
339339

340340

341341
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
342-
color=False, slice_=(slice(70, 195), slice(78, 172)),
342+
color=False, slice_=(slice(70, 195), slice(78, 172)),
343343
download_if_missing=True):
344344
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
345345

sklearn/datasets/olivetti_faces.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
8383
if not exists(data_home):
8484
makedirs(data_home)
8585
if not exists(join(data_home, TARGET_FILENAME)):
86-
print 'downloading Olivetti faces from %s to %s' % (DATA_URL,
87-
data_home)
86+
print('downloading Olivetti faces from %s to %s'
87+
% (DATA_URL, data_home))
8888
fhandle = urllib2.urlopen(DATA_URL)
8989
buf = StringIO(fhandle.read())
9090
mfile = loadmat(buf)

sklearn/datasets/samples_generator.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -112,13 +112,14 @@ def make_classification(n_samples=100, n_features=20, n_informative=2,
112112
# Count features, clusters and samples
113113
if n_informative + n_redundant + n_repeated > n_features:
114114
raise ValueError("Number of informative, redundant and repeated "
115-
"features must sum to less than the number of total features")
115+
"features must sum to less than the number of total"
116+
" features")
116117
if 2 ** n_informative < n_classes * n_clusters_per_class:
117118
raise ValueError("n_classes * n_clusters_per_class must"
118-
"be smaller or equal 2 ** n_informative")
119+
"be smaller or equal 2 ** n_informative")
119120
if weights and len(weights) not in [n_classes, n_classes - 1]:
120121
raise ValueError("Weights specified but incompatible with number "
121-
"of classes.")
122+
"of classes.")
122123

123124
n_useless = n_features - n_informative - n_redundant - n_repeated
124125
n_clusters = n_classes * n_clusters_per_class
@@ -186,7 +187,7 @@ def make_classification(n_samples=100, n_features=20, n_informative=2,
186187
if n_redundant > 0:
187188
B = 2 * generator.rand(n_informative, n_redundant) - 1
188189
X[:, n_informative:n_informative + n_redundant] = \
189-
np.dot(X[:, :n_informative], B)
190+
np.dot(X[:, :n_informative], B)
190191

191192
# Repeat some features
192193
if n_repeated > 0:
@@ -485,7 +486,7 @@ def make_regression(n_samples=100, n_features=100, n_informative=10,
485486

486487

487488
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
488-
factor=.8):
489+
factor=.8):
489490
"""Make a large circle containing a smaller circle in 2d.
490491
491492
A simple toy dataset to visualize clustering and classification
@@ -526,8 +527,8 @@ def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
526527
inner_circ_x = outer_circ_x * factor
527528
inner_circ_y = outer_circ_y * factor
528529

529-
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
530-
np.append(outer_circ_y, inner_circ_y))).T
530+
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
531+
np.append(outer_circ_y, inner_circ_y))).T
531532
y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)])
532533
if shuffle:
533534
X, y = util_shuffle(X, y, random_state=generator)
@@ -574,8 +575,8 @@ def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
574575
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
575576
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
576577

577-
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
578-
np.append(outer_circ_y, inner_circ_y))).T
578+
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
579+
np.append(outer_circ_y, inner_circ_y))).T
579580
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
580581

581582
if shuffle:
@@ -793,7 +794,7 @@ def make_friedman2(n_samples=100, noise=0.0, random_state=None):
793794
X[:, 3] += 1
794795

795796
y = (X[:, 0] ** 2
796-
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
797+
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
797798
+ noise * generator.randn(n_samples)
798799

799800
return X, y
@@ -924,8 +925,8 @@ def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
924925
singular_ind = np.arange(n, dtype=np.float64)
925926

926927
# Build the singular profile by assembling signal and noise components
927-
low_rank = (1 - tail_strength) * \
928-
np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
928+
low_rank = ((1 - tail_strength) *
929+
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
929930
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
930931
s = np.identity(n) * (low_rank + tail)
931932

@@ -1105,7 +1106,7 @@ def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
11051106
aux[aux < alpha] = 0
11061107
aux[aux > alpha] = (smallest_coef
11071108
+ (largest_coef - smallest_coef)
1108-
* random_state.rand(np.sum(aux > alpha)))
1109+
* random_state.rand(np.sum(aux > alpha)))
11091110
aux = np.tril(aux, k=-1)
11101111

11111112
# Permute the lines: we don't want to have assymetries in the final

sklearn/datasets/species_distributions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757

5858

5959
def _load_coverage(F, header_length=6,
60-
dtype=np.int16):
60+
dtype=np.int16):
6161
"""
6262
load a coverage file.
6363
This will return a numpy array of the given dtype

sklearn/datasets/svmlight_format.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -205,8 +205,8 @@ def load_svmlight_files(files, n_features=None, dtype=np.float64,
205205
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
206206
for f in files]
207207

208-
if zero_based is False \
209-
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r):
208+
if (zero_based is False
209+
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
210210
for ind in r:
211211
indices = ind[1]
212212
indices -= 1

sklearn/datasets/tests/test_20news.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,14 @@
1111

1212
def test_20news():
1313
try:
14-
data = datasets.fetch_20newsgroups(subset='all',
15-
download_if_missing=False,
16-
shuffle=False)
14+
data = datasets.fetch_20newsgroups(
15+
subset='all', download_if_missing=False, shuffle=False)
1716
except IOError:
1817
raise SkipTest("Download 20 newsgroups to run this test")
1918

2019
# Extract a reduced dataset
21-
data2cats = datasets.fetch_20newsgroups(subset='all',
22-
categories=data.target_names[-1:-3:-1],
23-
shuffle=False)
20+
data2cats = datasets.fetch_20newsgroups(
21+
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
2422
# Check that the ordering of the target_names is the same
2523
# as the ordering in the full dataset
2624
assert_equal(data2cats.target_names,

sklearn/datasets/tests/test_lfw.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,8 @@ def test_load_fake_lfw_people():
141141
assert_array_equal(lfw_people.target,
142142
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
143143
assert_array_equal(lfw_people.target_names,
144-
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
145-
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
144+
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
145+
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
146146

147147

148148
@raises(ValueError)

sklearn/datasets/tests/test_samples_generator.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ def test_make_regression_multitarget():
7878
assert_equal(X.shape, (100, 10), "X shape mismatch")
7979
assert_equal(y.shape, (100, 3), "y shape mismatch")
8080
assert_equal(c.shape, (10, 3), "coef shape mismatch")
81-
assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
81+
assert_array_equal(sum(c != 0.0), 3,
82+
"Unexpected number of informative features")
8283

8384
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
8485
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
@@ -102,7 +103,7 @@ def test_make_friedman1():
102103
assert_equal(y.shape, (5,), "y shape mismatch")
103104

104105
assert_array_almost_equal(y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1])
105-
+ 20 * (X[:, 2] - 0.5) ** 2 \
106+
+ 20 * (X[:, 2] - 0.5) ** 2
106107
+ 10 * X[:, 3] + 5 * X[:, 4])
107108

108109

@@ -112,9 +113,10 @@ def test_make_friedman2():
112113
assert_equal(X.shape, (5, 4), "X shape mismatch")
113114
assert_equal(y.shape, (5,), "y shape mismatch")
114115

115-
assert_array_almost_equal(y, (X[:, 0] ** 2
116-
+ (X[:, 1] * X[:, 2]
117-
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
116+
assert_array_almost_equal(y,
117+
(X[:, 0] ** 2
118+
+ (X[:, 1] * X[:, 2] - 1
119+
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
118120

119121

120122
def test_make_friedman3():
@@ -141,8 +143,8 @@ def test_make_low_rank_matrix():
141143

142144
def test_make_sparse_coded_signal():
143145
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
144-
n_features=10, n_nonzero_coefs=3,
145-
random_state=0)
146+
n_features=10, n_nonzero_coefs=3,
147+
random_state=0)
146148
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
147149
assert_equal(D.shape, (10, 8), "D shape mismatch")
148150
assert_equal(X.shape, (8, 5), "X shape mismatch")

0 commit comments

Comments
 (0)