|
| 1 | +from .batchnorm import _BatchNorm |
| 2 | +from .. import functional as F |
| 3 | + |
| 4 | + |
| 5 | +class _InstanceNorm(_BatchNorm): |
| 6 | + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False): |
| 7 | + super(_InstanceNorm, self).__init__( |
| 8 | + num_features, eps, momentum, affine) |
| 9 | + |
| 10 | + def forward(self, input): |
| 11 | + self._check_input_dim(input) |
| 12 | + |
| 13 | + b, c = input.size(0), input.size(1) |
| 14 | + |
| 15 | + # Repeat stored stats and affine transform params |
| 16 | + running_mean = self.running_mean.repeat(b) |
| 17 | + running_var = self.running_var.repeat(b) |
| 18 | + |
| 19 | + weight, bias = None, None |
| 20 | + if self.affine: |
| 21 | + weight = self.weight.repeat(b) |
| 22 | + bias = self.bias.repeat(b) |
| 23 | + |
| 24 | + # Apply instance norm |
| 25 | + input_reshaped = input.contiguous().view(1, b * c, *input.size()[2:]) |
| 26 | + |
| 27 | + out = F.batch_norm( |
| 28 | + input_reshaped, running_mean, running_var, weight, bias, |
| 29 | + self.training, self.momentum, self.eps) |
| 30 | + |
| 31 | + # Reshape back |
| 32 | + self.running_mean.copy_(running_mean.view(b, c).mean(0)) |
| 33 | + self.running_var.copy_(running_var.view(b, c).mean(0)) |
| 34 | + |
| 35 | + return out.view(b, c, *input.size()[2:]) |
| 36 | + |
| 37 | + def eval(self): |
| 38 | + return self |
| 39 | + |
| 40 | + |
| 41 | +class InstanceNorm1d(_InstanceNorm): |
| 42 | + r"""Applies Instance Normalization over a 2d or 3d input that is seen as a mini-batch. |
| 43 | +
|
| 44 | + .. math:: |
| 45 | +
|
| 46 | + y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta |
| 47 | +
|
| 48 | + The mean and standard-deviation are calculated per-dimension separately |
| 49 | + for each object in a mini-batch. Gamma and beta are learnable parameter vectors |
| 50 | + of size C (where C is the input size). |
| 51 | +
|
| 52 | + During training, this layer keeps a running estimate of its computed mean |
| 53 | + and variance. The running sum is kept with a default momentum of 0.1. |
| 54 | +
|
| 55 | + At evaluation time (`.eval()`), the default behaviour of the InstanceNorm module stays the same |
| 56 | + i.e. running mean/variance is NOT used for normalization. One can force using stored |
| 57 | + mean and variance with `.train(False)` method. |
| 58 | +
|
| 59 | + Args: |
| 60 | + num_features: num_features from an expected input of size `batch_size x num_features x width` |
| 61 | + eps: a value added to the denominator for numerical stability. Default: 1e-5 |
| 62 | + momentum: the value used for the running_mean and running_var computation. Default: 0.1 |
| 63 | + affine: a boolean value that when set to true, gives the layer learnable affine parameters. |
| 64 | +
|
| 65 | + Shape: |
| 66 | + - Input: :math:`(N, C, L)` |
| 67 | + - Output: :math:`(N, C, L)` (same shape as input) |
| 68 | +
|
| 69 | + Examples: |
| 70 | + >>> # With Learnable Parameters |
| 71 | + >>> m = nn.InstanceNorm1d(100) |
| 72 | + >>> # Without Learnable Parameters |
| 73 | + >>> m = nn.InstanceNorm1d(100, affine=False) |
| 74 | + >>> input = autograd.Variable(torch.randn(20, 100)) |
| 75 | + >>> output = m(input) |
| 76 | + """ |
| 77 | + |
| 78 | + def _check_input_dim(self, input): |
| 79 | + if input.dim() != 3: |
| 80 | + raise ValueError('expected 2D or 3D input (got {}D input)' |
| 81 | + .format(input.dim())) |
| 82 | + super(InstanceNorm1d, self)._check_input_dim(input) |
| 83 | + |
| 84 | + |
| 85 | +class InstanceNorm2d(_InstanceNorm): |
| 86 | + r"""Applies Instance Normalization over a 4d input that is seen as a mini-batch of 3d inputs |
| 87 | + .. math:: |
| 88 | + y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta |
| 89 | + The mean and standard-deviation are calculated per-dimension separately |
| 90 | + for each object in a mini-batch. Gamma and beta are learnable parameter vectors |
| 91 | + of size C (where C is the input size). |
| 92 | +
|
| 93 | + During training, this layer keeps a running estimate of its computed mean |
| 94 | + and variance. The running sum is kept with a default momentum of 0.1. |
| 95 | +
|
| 96 | + At evaluation time (`.eval()`), the default behaviour of the InstanceNorm module stays the same |
| 97 | + i.e. running mean/variance is NOT used for normalization. One can force using stored |
| 98 | + mean and variance with `.train(False)` method. |
| 99 | +
|
| 100 | + Args: |
| 101 | + num_features: num_features from an expected input of size batch_size x num_features x height x width |
| 102 | + eps: a value added to the denominator for numerical stability. Default: 1e-5 |
| 103 | + momentum: the value used for the running_mean and running_var computation. Default: 0.1 |
| 104 | + affine: a boolean value that when set to true, gives the layer learnable affine parameters. |
| 105 | + Shape: |
| 106 | + - Input: :math:`(N, C, H, W)` |
| 107 | + - Output: :math:`(N, C, H, W)` (same shape as input) |
| 108 | + Examples: |
| 109 | + >>> # With Learnable Parameters |
| 110 | + >>> m = nn.InstanceNorm2d(100) |
| 111 | + >>> # Without Learnable Parameters |
| 112 | + >>> m = nn.InstanceNorm2d(100, affine=False) |
| 113 | + >>> input = autograd.Variable(torch.randn(20, 100, 35, 45)) |
| 114 | + >>> output = m(input) |
| 115 | + """ |
| 116 | + |
| 117 | + def _check_input_dim(self, input): |
| 118 | + if input.dim() != 4: |
| 119 | + raise ValueError('expected 4D input (got {}D input)' |
| 120 | + .format(input.dim())) |
| 121 | + super(InstanceNorm2d, self)._check_input_dim(input) |
| 122 | + |
| 123 | + |
| 124 | +class InstanceNorm3d(_InstanceNorm): |
| 125 | + r"""Applies Instance Normalization over a 5d input that is seen as a mini-batch of 4d inputs |
| 126 | +
|
| 127 | + .. math:: |
| 128 | +
|
| 129 | + y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta |
| 130 | +
|
| 131 | + The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. |
| 132 | + Gamma and beta are learnable parameter vectors |
| 133 | + of size C (where C is the input size). |
| 134 | +
|
| 135 | + During training, this layer keeps a running estimate of its computed mean |
| 136 | + and variance. The running sum is kept with a default momentum of 0.1. |
| 137 | +
|
| 138 | + At evaluation time (`.eval()`), the default behaviour of the InstanceNorm module stays the same |
| 139 | + i.e. running mean/variance is NOT used for normalization. One can force using stored |
| 140 | + mean and variance with `.train(False)` method. |
| 141 | +
|
| 142 | +
|
| 143 | + Args: |
| 144 | + num_features: num_features from an expected input of size batch_size x num_features x depth x height x width |
| 145 | + eps: a value added to the denominator for numerical stability. Default: 1e-5 |
| 146 | + momentum: the value used for the running_mean and running_var computation. Default: 0.1 |
| 147 | + affine: a boolean value that when set to true, gives the layer learnable affine parameters. |
| 148 | +
|
| 149 | + Shape: |
| 150 | + - Input: :math:`(N, C, D, H, W)` |
| 151 | + - Output: :math:`(N, C, D, H, W)` (same shape as input) |
| 152 | +
|
| 153 | + Examples: |
| 154 | + >>> # With Learnable Parameters |
| 155 | + >>> m = nn.InstanceNorm3d(100) |
| 156 | + >>> # Without Learnable Parameters |
| 157 | + >>> m = nn.InstanceNorm3d(100, affine=False) |
| 158 | + >>> input = autograd.Variable(torch.randn(20, 100, 35, 45, 10)) |
| 159 | + >>> output = m(input) |
| 160 | + """ |
| 161 | + |
| 162 | + def _check_input_dim(self, input): |
| 163 | + if input.dim() != 5: |
| 164 | + raise ValueError('expected 5D input (got {}D input)' |
| 165 | + .format(input.dim())) |
| 166 | + super(InstanceNorm3d, self)._check_input_dim(input) |
0 commit comments