Skip to content

Commit 3ed7200

Browse files
lukeyeagerapaszke
authored andcommitted
[pep8] Fix most remaining lint manually
1 parent e7c1e6a commit 3ed7200

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+179
-154
lines changed

test/test_autograd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1079,7 +1079,7 @@ def check(name):
10791079
try:
10801080
check(inplace_name)
10811081
except Exception as e:
1082-
if not 'only supports scalar' in e.args[0]:
1082+
if 'only supports scalar' not in e.args[0]:
10831083
raise
10841084

10851085
assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name

test/test_legacy_nn.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -927,8 +927,10 @@ def test_DepthConcat(self):
927927
gradInputConcat = concat.backward(input, gradOutput)
928928
# the spatial dims are the largest, the nFilters is the sum
929929
output = torch.Tensor(2, int(outputSize.sum()), 12, 12).zero_() # zero for padding
930-
narrows = ((slice(None), slice(0, 5), slice(None), slice(None)), (slice(None), slice(5, 11), slice(1, 11), slice(
931-
1, 11)), (slice(None), slice(11, 18), slice(1, 10), slice(1, 10)), (slice(None), slice(18, 26), slice(2, 10), slice(2, 10)))
930+
narrows = ((slice(None), slice(0, 5), slice(None), slice(None)),
931+
(slice(None), slice(5, 11), slice(1, 11), slice(1, 11)),
932+
(slice(None), slice(11, 18), slice(1, 10), slice(1, 10)),
933+
(slice(None), slice(18, 26), slice(2, 10), slice(2, 10)))
932934
gradInput = input.clone().zero_()
933935
for i in range(4):
934936
conv = concat.get(i)
@@ -1120,8 +1122,9 @@ def test_ParallelCriterion(self):
11201122
pc = nn.ParallelCriterion().add(nll, 0.5).add(mse)
11211123
pc2 = nn.ParallelCriterion().add(nll2, 0.4).add(pc)
11221124
output = pc2.forward(input, target)
1123-
output2 = nll2.forward(input[0], target[0]) * 0.4 + nll.forward(input[1][0],
1124-
target[1][0]) / 2 + mse.forward(input[1][1], target[1][1])
1125+
output2 = (nll2.forward(input[0], target[0]) * 0.4 +
1126+
nll.forward(input[1][0], target[1][0]) / 2 +
1127+
mse.forward(input[1][1], target[1][1]))
11251128
self.assertEqual(output, output2)
11261129
gradInput2 = [
11271130
nll2.backward(input[0], target[0]).clone().mul(0.4),

tools/cwrap/cwrap.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,10 @@ def wrap_declarations(self, declarations):
9898
def set_declaration_defaults(self, declaration):
9999
declaration.setdefault('arguments', [])
100100
declaration.setdefault('return', 'void')
101-
if not 'cname' in declaration:
101+
if 'cname' not in declaration:
102102
declaration['cname'] = declaration['name']
103103
# Simulate multiple dispatch, even if it's not necessary
104-
if not 'options' in declaration:
104+
if 'options' not in declaration:
105105
declaration['options'] = [{'arguments': declaration['arguments']}]
106106
del declaration['arguments']
107107
# Parse arguments (some of them can be strings)
@@ -183,7 +183,7 @@ def map_selected_arguments(self, base_fn_name, plugin_fn_name, option, arguments
183183

184184
def generate_option(self, option, is_first):
185185
checked_args = list(filter(
186-
lambda arg: not 'ignore_check' in arg or not arg['ignore_check'],
186+
lambda arg: 'ignore_check' not in arg or not arg['ignore_check'],
187187
option['arguments']))
188188
option['num_checked_args'] = len(checked_args)
189189
idx_args = list(filter(

tools/cwrap/plugins/CuDNNPlugin.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ def process_declarations(self, declarations):
124124

125125
def filter_unique_options(self, options):
126126
def signature(option):
127-
return '#'.join(arg['type'] for arg in option['arguments'] if not 'ignore_check' in arg or not arg['ignore_check'])
127+
return '#'.join(arg['type'] for arg in option['arguments']
128+
if 'ignore_check' not in arg or not arg['ignore_check'])
128129
seen_signatures = set()
129130
unique = []
130131
for option in options:

tools/cwrap/plugins/ReturnArguments.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,5 @@ def get_return_wrapper(self, option):
1717
if len(args) == 1:
1818
return Template(self.ARGUMENT_RETURN_TEMPLATE.safe_substitute(arg=accessors[0]))
1919
else:
20-
return Template(self.TUPLE_RETURN_TEMPLATE.safe_substitute(num_args=len(args), args=', '.join(accessors)))
20+
return Template(self.TUPLE_RETURN_TEMPLATE.safe_substitute(num_args=len(args),
21+
args=', '.join(accessors)))

tools/cwrap/plugins/THPPlugin.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,8 @@ def format_arg(arg, var_args=False):
199199
def format_args(args, var_args=False):
200200
option_desc = [format_arg(arg, var_args)
201201
for arg in args
202-
if not arg.get('ignore_check', False)
203-
and not arg.get('output')]
202+
if not arg.get('ignore_check', False) and
203+
not arg.get('output')]
204204
output_args = list(filter(lambda a: a.get('output'), args))
205205
if output_args:
206206
if len(output_args) > 1:
@@ -392,12 +392,12 @@ def declare_methods(self, stateless, sparse):
392392
def process_full_file(self, code):
393393
# We have to find a place before all undefs
394394
idx = code.find('// PUT DEFINITIONS IN HERE PLEASE')
395-
return (code[:idx]
396-
+ self.declare_methods(False, False)
397-
+ self.declare_methods(True, False)
398-
+ self.declare_methods(False, True)
399-
+ self.declare_methods(True, True)
400-
+ code[idx:]
395+
return (code[:idx] +
396+
self.declare_methods(False, False) +
397+
self.declare_methods(True, False) +
398+
self.declare_methods(False, True) +
399+
self.declare_methods(True, True) +
400+
code[idx:]
401401
)
402402

403403
def preprocessor_guard(self, code, condition):

tools/nnwrap/generate_wrappers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def wrap_function(name, type, arguments):
9393
declaration += prefix + TYPE_TRANSFORMS[type].get(arg.type, arg.type) + ' ' + arg.name + '\n'
9494
else:
9595
t = TYPE_TRANSFORMS[type].get(arg.type, arg.type)
96-
declaration += prefix + 'type: ' + t + '\n' + \
96+
declaration += prefix + 'type: ' + t + '\n' + \
9797
dict_indent + 'name: ' + arg.name + '\n' + \
9898
dict_indent + 'nullable: True' + '\n'
9999
declaration += ']]\n\n\n'

torch/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def get_rng_state():
105105

106106

107107
def manual_seed(seed):
108-
r"""Sets the seed for generating random numbers. And returns a
108+
r"""Sets the seed for generating random numbers. And returns a
109109
`torch._C.Generator` object.
110110
111111
Args:
@@ -115,7 +115,7 @@ def manual_seed(seed):
115115

116116

117117
def initial_seed():
118-
r"""Returns the initial seed for generating random numbers as a
118+
r"""Returns the initial seed for generating random numbers as a
119119
python `long`.
120120
"""
121121
return default_generator.initial_seed()

torch/_torch_docs.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1469,13 +1469,13 @@
14691469
r"""
14701470
geqrf(input, out=None) -> (Tensor, Tensor)
14711471
1472-
This is a low-level function for calling LAPACK directly.
1472+
This is a low-level function for calling LAPACK directly.
14731473
14741474
You'll generally want to use :func:`torch.qr` instead.
14751475
1476-
Computes a QR decomposition of :attr:`input`, but without constructing `Q` and `R` as explicit separate matrices.
1476+
Computes a QR decomposition of :attr:`input`, but without constructing `Q` and `R` as explicit separate matrices.
14771477
1478-
Rather, this directly calls the underlying LAPACK function `?geqrf` which produces a sequence of 'elementary reflectors'.
1478+
Rather, this directly calls the underlying LAPACK function `?geqrf` which produces a sequence of 'elementary reflectors'.
14791479
14801480
See `LAPACK documentation`_ for further details.
14811481
@@ -1517,17 +1517,17 @@
15171517
gesv(B, A, out=None) -> (Tensor, Tensor)
15181518
15191519
`X, LU = torch.gesv(B, A)` returns the solution to the system of linear
1520-
equations represented by :math:`AX = B`
1520+
equations represented by :math:`AX = B`
15211521
15221522
`LU` contains `L` and `U` factors for LU factorization of `A`.
15231523
15241524
:attr:`A` has to be a square and non-singular matrix (2D Tensor).
15251525
1526-
If `A` is an `m x m` matrix and `B` is `m x k`,
1526+
If `A` is an `m x m` matrix and `B` is `m x k`,
15271527
the result `LU` is `m x m` and `X` is `m x k` .
15281528
1529-
.. note:: Irrespective of the original strides, the returned matrices
1530-
`X` and `LU` will be transposed, i.e. with strides `(1, m)`
1529+
.. note:: Irrespective of the original strides, the returned matrices
1530+
`X` and `LU` will be transposed, i.e. with strides `(1, m)`
15311531
instead of `(m, 1)`.
15321532
15331533
Args:
@@ -3052,19 +3052,19 @@
30523052
"""
30533053
qr(input, out=None) -> (Tensor, Tensor)
30543054
3055-
Computes the QR decomposition of a matrix :attr:`input`: returns matrices
3056-
`q` and `r` such that :math:`x = q * r`, with `q` being an orthogonal matrix
3057-
and `r` being an upper triangular matrix.
3055+
Computes the QR decomposition of a matrix :attr:`input`: returns matrices
3056+
`q` and `r` such that :math:`x = q * r`, with `q` being an orthogonal matrix
3057+
and `r` being an upper triangular matrix.
30583058
30593059
This returns the thin (reduced) QR factorization.
30603060
30613061
.. note:: precision may be lost if the magnitudes of the elements of `input` are large
30623062
3063-
.. note:: while it should always give you a valid decomposition, it may not
3064-
give you the same one across platforms - it will depend on your
3063+
.. note:: while it should always give you a valid decomposition, it may not
3064+
give you the same one across platforms - it will depend on your
30653065
LAPACK implementation.
30663066
3067-
.. note:: Irrespective of the original strides, the returned matrix `q` will be
3067+
.. note:: Irrespective of the original strides, the returned matrix `q` will be
30683068
transposed, i.e. with strides `(1, m)` instead of `(m, 1)`.
30693069
30703070
Args:
@@ -3708,7 +3708,7 @@
37083708
"""
37093709
svd(input, some=True, out=None) -> (Tensor, Tensor, Tensor)
37103710
3711-
`U, S, V = torch.svd(A)` returns the singular value decomposition of a
3711+
`U, S, V = torch.svd(A)` returns the singular value decomposition of a
37123712
real matrix `A` of size `(n x m)` such that :math:`A = USV'*`.
37133713
37143714
`U` is of shape `n x n`
@@ -3717,10 +3717,10 @@
37173717
37183718
`V` is of shape `m x m`.
37193719
3720-
:attr:`some` represents the number of singular values to be computed.
3720+
:attr:`some` represents the number of singular values to be computed.
37213721
If `some=True`, it computes some and `some=False` computes all.
37223722
3723-
.. note:: Irrespective of the original strides, the returned matrix `U`
3723+
.. note:: Irrespective of the original strides, the returned matrix `U`
37243724
will be transposed, i.e. with strides `(1, n)` instead of `(n, 1)`.
37253725
37263726
Args:
@@ -3783,26 +3783,26 @@
37833783
"""
37843784
symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)
37853785
3786-
`e, V = torch.symeig(input)` returns eigenvalues and eigenvectors
3786+
`e, V = torch.symeig(input)` returns eigenvalues and eigenvectors
37873787
of a symmetric real matrix :attr:`input`.
37883788
37893789
`input` and `V` are `m x m` matrices and `e` is a `m` dimensional vector.
37903790
3791-
This function calculates all eigenvalues (and vectors) of `input`
3791+
This function calculates all eigenvalues (and vectors) of `input`
37923792
such that `input = V diag(e) V'`
37933793
3794-
The boolean argument :attr:`eigenvectors` defines computation of
3794+
The boolean argument :attr:`eigenvectors` defines computation of
37953795
eigenvectors or eigenvalues only.
37963796
3797-
If it is `False`, only eigenvalues are computed. If it is `True`,
3797+
If it is `False`, only eigenvalues are computed. If it is `True`,
37983798
both eigenvalues and eigenvectors are computed.
37993799
3800-
Since the input matrix `input` is supposed to be symmetric,
3801-
only the upper triangular portion is used by default.
3800+
Since the input matrix `input` is supposed to be symmetric,
3801+
only the upper triangular portion is used by default.
38023802
38033803
If :attr:`upper` is `False`, then lower triangular portion is used.
38043804
3805-
Note: Irrespective of the original strides, the returned matrix `V` will
3805+
Note: Irrespective of the original strides, the returned matrix `V` will
38063806
be transposed, i.e. with strides `(1, m)` instead of `(m, 1)`.
38073807
38083808
Args:

torch/autograd/variable.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,8 @@ def backward(self, gradient=None, retain_variables=False):
152152
if gradient is None and self.requires_grad:
153153
if self.data.numel() != 1:
154154
raise RuntimeError(
155-
'backward should be called only on a scalar (i.e. 1-element tensor) or with gradient w.r.t. the variable')
155+
'backward should be called only on a scalar (i.e. 1-element tensor) '
156+
'or with gradient w.r.t. the variable')
156157
gradient = self.data.new().resize_as_(self.data).fill_(1)
157158
self._execution_engine.run_backward((self,), (gradient,), retain_variables)
158159

0 commit comments

Comments
 (0)