Skip to content

Commit 909f317

Browse files
nrbrdsoumith
authored andcommitted
Add nn.padding to docs fixes pytorch#1127 (pytorch#1808)
* exposed nn.padding modules * using functional
1 parent ea58190 commit 909f317

File tree

2 files changed

+152
-6
lines changed

2 files changed

+152
-6
lines changed

docs/source/nn.rst

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,40 @@ Pooling Layers
175175
:members:
176176

177177

178+
Padding Layers
179+
--------------
180+
181+
:hidden:`ReflectionPad2d`
182+
~~~~~~~~~~~~~~~~~~~~~~~~~
183+
184+
.. autoclass:: ReflectionPad2d
185+
:members:
186+
187+
:hidden:`ReplicationPad2d`
188+
~~~~~~~~~~~~~~~~~~~~~~~~~~
189+
190+
.. autoclass:: ReplicationPad2d
191+
:members:
192+
193+
:hidden:`ReplicationPad3d`
194+
~~~~~~~~~~~~~~~~~~~~~~~~~~
195+
196+
.. autoclass:: ReplicationPad3d
197+
:members:
198+
199+
:hidden:`ZeroPad2d`
200+
~~~~~~~~~~~~~~~~~~~
201+
202+
.. autoclass:: ZeroPad2d
203+
:members:
204+
205+
:hidden:`ConstantPad2d`
206+
~~~~~~~~~~~~~~~~~~~~~~~
207+
208+
.. autoclass:: ConstantPad2d
209+
:members:
210+
211+
178212
Non-linear Activations
179213
----------------------------------
180214

torch/nn/modules/padding.py

Lines changed: 118 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,71 +1,183 @@
11
from .module import Module
22
from .utils import _quadruple, _ntuple
3-
from .._functions.padding import ConstantPad2d as F_ConstantPad2d
3+
from .. import functional as F
44

55
# TODO: grad_output size asserts in THNN
66

77

88
class ReflectionPad2d(Module):
9+
r"""Pads the input tensor using the reflection of the input boundary.
10+
11+
Args:
12+
padding (int, tuple): the size of the padding. If is int, uses the same
13+
padding in all boundaries. If a 4-tuple, uses (paddingLeft, paddingRight, paddingTop, paddingBottom)
14+
15+
Shape:
16+
- Input: :math:`(N, C, H_{in}, W_{in})`
17+
- Output: :math:`(N, C, H_{out}, W_{out})` where
18+
:math:`H_{out} = H_{in} + paddingTop + paddingBottom`
19+
:math:`W_{out} = W_{in} + paddingLeft + paddingRight`
20+
21+
Examples::
22+
23+
>>> m = nn.ReflectionPad2d(3)
24+
>>> input = autograd.Variable(torch.randn(16, 3, 320, 480))
25+
>>> output = m(input)
26+
>>> # using different paddings
27+
>>> m = nn.ReflectionPad2d((3, 3, 6, 6))
28+
>>> output = m(input)
29+
30+
"""
931

1032
def __init__(self, padding):
1133
super(ReflectionPad2d, self).__init__()
1234
self.padding = _quadruple(padding)
1335

1436
def forward(self, input):
15-
return self._backend.ReflectionPad2d(*self.padding)(input)
37+
return F.pad(input, self.padding, 'reflect')
1638

1739
def __repr__(self):
1840
return self.__class__.__name__ + ' ' + str(self.padding)
1941

2042

2143
class ReplicationPad2d(Module):
44+
r"""Pads the input tensor using replication of the input boundary.
45+
46+
Args:
47+
padding (int, tuple): the size of the padding. If is int, uses the same
48+
padding in all boundaries. If a 4-tuple, uses (paddingLeft, paddingRight, paddingTop, paddingBottom)
49+
50+
Shape:
51+
- Input: :math:`(N, C, H_{in}, W_{in})`
52+
- Output: :math:`(N, C, H_{out}, W_{out})` where
53+
:math:`H_{out} = H_{in} + paddingTop + paddingBottom`
54+
:math:`W_{out} = W_{in} + paddingLeft + paddingRight`
55+
56+
Examples::
57+
58+
>>> m = nn.ReplicationPad2d(3)
59+
>>> input = autograd.Variable(torch.randn(16, 3, 320, 480))
60+
>>> output = m(input)
61+
>>> # using different paddings
62+
>>> m = nn.ReplicationPad2d((3, 3, 6, 6))
63+
>>> output = m(input)
64+
65+
"""
2266

2367
def __init__(self, padding):
2468
super(ReplicationPad2d, self).__init__()
2569
self.padding = _quadruple(padding)
2670

2771
def forward(self, input):
28-
return self._backend.ReplicationPad2d(*self.padding)(input)
72+
return F.pad(input, self.padding, 'replicate')
2973

3074
def __repr__(self):
3175
return self.__class__.__name__ + ' ' + str(self.padding)
3276

3377

3478
class ReplicationPad3d(Module):
79+
r"""Pads the input tensor using replication of the input boundary.
80+
81+
Args:
82+
padding (int, tuple): the size of the padding. If is int, uses the same
83+
padding in all boundaries. If a 6-tuple, uses (paddingLeft, paddingRight,
84+
paddingTop, paddingBottom, paddingFront, paddingBack)
85+
86+
Shape:
87+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
88+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
89+
:math:`D_{out} = D_{in} + paddingFront + paddingBack`
90+
:math:`H_{out} = H_{in} + paddingTop + paddingBottom`
91+
:math:`W_{out} = W_{in} + paddingLeft + paddingRight`
92+
93+
Examples::
94+
95+
>>> m = nn.ReplicationPad3d(3)
96+
>>> input = autograd.Variable(torch.randn(16, 3, 8, 320, 480))
97+
>>> output = m(input)
98+
>>> # using different paddings
99+
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
100+
>>> output = m(input)
101+
102+
"""
35103

36104
def __init__(self, padding):
37105
super(ReplicationPad3d, self).__init__()
38106
self.padding = _ntuple(6)(padding)
39107

40108
def forward(self, input):
41-
return self._backend.ReplicationPad3d(*self.padding)(input)
109+
return F.pad(input, self.padding, 'replicate')
42110

43111
def __repr__(self):
44112
return self.__class__.__name__ + ' ' + str(self.padding)
45113

46114

47115
class ZeroPad2d(Module):
116+
r"""Pads the input tensor boundaries with zero.
117+
118+
Args:
119+
padding (int, tuple): the size of the padding. If is int, uses the same
120+
padding in all boundaries. If a 4-tuple, uses (paddingLeft, paddingRight, paddingTop, paddingBottom)
121+
122+
Shape:
123+
- Input: :math:`(N, C, H_{in}, W_{in})`
124+
- Output: :math:`(N, C, H_{out}, W_{out})` where
125+
:math:`H_{out} = H_{in} + paddingTop + paddingBottom`
126+
:math:`W_{out} = W_{in} + paddingLeft + paddingRight`
127+
128+
Examples::
129+
130+
>>> m = nn.ZeroPad2d(3)
131+
>>> input = autograd.Variable(torch.randn(16, 3, 320, 480))
132+
>>> output = m(input)
133+
>>> # using different paddings
134+
>>> m = nn.ZeroPad2d((3, 3, 6, 6))
135+
>>> output = m(input)
136+
137+
"""
48138

49139
def __init__(self, padding):
50140
super(ZeroPad2d, self).__init__()
51141
self.padding = _quadruple(padding)
52142

53143
def forward(self, input):
54-
return F_ConstantPad2d(pad=self.padding, value=0)(input)
144+
return F.pad(input, self.padding, 'constant', 0)
55145

56146
def __repr__(self):
57147
return self.__class__.__name__ + ' ' + str(self.padding)
58148

59149

60150
class ConstantPad2d(Module):
151+
r"""Pads the input tensor boundaries with a constant value.
152+
153+
Args:
154+
padding (int, tuple): the size of the padding. If is int, uses the same
155+
padding in all boundaries. If a 4-tuple, uses (paddingLeft, paddingRight, paddingTop, paddingBottom)
156+
157+
Shape:
158+
- Input: :math:`(N, C, H_{in}, W_{in})`
159+
- Output: :math:`(N, C, H_{out}, W_{out})` where
160+
:math:`H_{out} = H_{in} + paddingTop + paddingBottom`
161+
:math:`W_{out} = W_{in} + paddingLeft + paddingRight`
162+
163+
Examples::
164+
165+
>>> m = nn.ConstantPad2d(3, 3.5)
166+
>>> input = autograd.Variable(torch.randn(16, 3, 320, 480))
167+
>>> output = m(input)
168+
>>> # using different paddings
169+
>>> m = nn.ConstantPad2d((3, 3, 6, 6), 3.5)
170+
>>> output = m(input)
171+
172+
"""
61173

62174
def __init__(self, padding, value):
63175
super(ConstantPad2d, self).__init__()
64176
self.padding = _quadruple(padding)
65177
self.value = value
66178

67179
def forward(self, input):
68-
return F_ConstantPad2d(pad=self.padding, value=self.value)(input)
180+
return F.pad(input, self.padding, 'constant', self.value)
69181

70182
def __repr__(self):
71183
return self.__class__.__name__ + ' ' + str(self.padding)

0 commit comments

Comments
 (0)