Skip to content

Commit 33e8357

Browse files
authored
Fix typo: upsampe_cfg -> upsample_cfg (open-mmlab#449)
* Fix typo: upsampe_cfg -> upsample_cfg Signed-off-by: lizz <[email protected]> * convoluton -> convolution Signed-off-by: lizz <[email protected]> * more Signed-off-by: lizz <[email protected]> * ok Signed-off-by: lizz <[email protected]>
1 parent f7a5d53 commit 33e8357

File tree

18 files changed

+57
-58
lines changed

18 files changed

+57
-58
lines changed

mmseg/apis/test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def single_gpu_test(model,
4242
Args:
4343
model (nn.Module): Model to be tested.
4444
data_loader (utils.data.Dataloader): Pytorch data loader.
45-
show (bool): Whether show results during infernece. Default: False.
45+
show (bool): Whether show results during inference. Default: False.
4646
out_dir (str, optional): If specified, the results will be dumped into
4747
the directory to save output results.
4848
efficient_test (bool): Whether save the results as local numpy files to

mmseg/core/evaluation/metrics.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def eval_metrics(results,
212212
Returns:
213213
float: Overall accuracy on all images.
214214
ndarray: Per category accuracy, shape (num_classes, ).
215-
ndarray: Per category evalution metrics, shape (num_classes, ).
215+
ndarray: Per category evaluation metrics, shape (num_classes, ).
216216
"""
217217
if isinstance(metrics, str):
218218
metrics = [metrics]

mmseg/core/seg/sampler/base_pixel_sampler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,3 @@ def __init__(self, **kwargs):
1010
@abstractmethod
1111
def sample(self, seg_logit, seg_label):
1212
"""Placeholder for sample function."""
13-
pass

mmseg/datasets/custom.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,8 +214,8 @@ def prepare_test_img(self, idx):
214214
idx (int): Index of data.
215215
216216
Returns:
217-
dict: Testing data after pipeline with new keys intorduced by
218-
piepline.
217+
dict: Testing data after pipeline with new keys introduced by
218+
pipeline.
219219
"""
220220

221221
img_info = self.img_infos[idx]
@@ -225,7 +225,6 @@ def prepare_test_img(self, idx):
225225

226226
def format_results(self, results, **kwargs):
227227
"""Place holder to format result to dataset specific output."""
228-
pass
229228

230229
def get_gt_seg_maps(self, efficient_test=False):
231230
"""Get ground truth segmentation maps for evaluation."""

mmseg/datasets/pipelines/transforms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class Resize(object):
1414
contains the key "scale", then the scale in the input dict is used,
1515
otherwise the specified scale in the init method is used.
1616
17-
``img_scale`` can be Nong, a tuple (single-scale) or a list of tuple
17+
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
1818
(multi-scale). There are 4 multiscale modes:
1919
2020
- ``ratio_range is not None``:
@@ -89,7 +89,7 @@ def random_sample(img_scales):
8989
Args:
9090
img_scales (list[tuple]): Images scale range for sampling.
9191
There must be two tuples in img_scales, which specify the lower
92-
and uper bound of image scales.
92+
and upper bound of image scales.
9393
9494
Returns:
9595
(tuple, None): Returns a tuple ``(img_scale, None)``, where

mmseg/models/backbones/cgnet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
class GlobalContextExtractor(nn.Module):
1414
"""Global Context Extractor for CGNet.
1515
16-
This class is employed to refine the joFint feature of both local feature
16+
This class is employed to refine the joint feature of both local feature
1717
and surrounding context.
1818
1919
Args:
@@ -357,7 +357,7 @@ def init_weights(self, pretrained=None):
357357
raise TypeError('pretrained must be a str or None')
358358

359359
def train(self, mode=True):
360-
"""Convert the model into training mode whill keeping the normalization
360+
"""Convert the model into training mode will keeping the normalization
361361
layer freezed."""
362362
super(CGNet, self).train(mode)
363363
if mode and self.norm_eval:

mmseg/models/backbones/hrnet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -545,7 +545,7 @@ def forward(self, x):
545545
return y_list
546546

547547
def train(self, mode=True):
548-
"""Convert the model into training mode whill keeping the normalization
548+
"""Convert the model into training mode will keeping the normalization
549549
layer freezed."""
550550
super(HRNet, self).train(mode)
551551
if mode and self.norm_eval:

mmseg/models/backbones/mobilenet_v3.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class MobileNetV3(nn.Module):
1919
<https://ieeexplore.ieee.org/document/9008835>`_.
2020
2121
Args:
22-
arch (str): Architechture of mobilnetv3, from {'small', 'large'}.
22+
arch (str): Architecture of mobilnetv3, from {'small', 'large'}.
2323
Default: 'small'.
2424
conv_cfg (dict): Config dict for convolution layer.
2525
Default: None, which means using conv2d.
@@ -28,13 +28,13 @@ class MobileNetV3(nn.Module):
2828
out_indices (tuple[int]): Output from which layer.
2929
Default: (0, 1, 12).
3030
frozen_stages (int): Stages to be frozen (all param fixed).
31-
Defualt: -1, which means not freezing any parameters.
31+
Default: -1, which means not freezing any parameters.
3232
norm_eval (bool): Whether to set norm layers to eval mode, namely,
3333
freeze running stats (mean and var). Note: Effect on Batch Norm
3434
and its variants only. Default: False.
3535
with_cp (bool): Use checkpoint or not. Using checkpoint will save
3636
some memory while slowing down the training speed.
37-
Defualt: False.
37+
Default: False.
3838
"""
3939
# Parameters to build each block:
4040
# [kernel size, mid channels, out channels, with_se, act type, stride]

mmseg/models/backbones/unet.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class BasicConvBlock(nn.Module):
3535
Default: dict(type='BN').
3636
act_cfg (dict | None): Config dict for activation layer in ConvModule.
3737
Default: dict(type='ReLU').
38-
dcn (bool): Use deformable convoluton in convolutional layer or not.
38+
dcn (bool): Use deformable convolution in convolutional layer or not.
3939
Default: None.
4040
plugins (dict): plugins for convolutional layers. Default: None.
4141
"""
@@ -171,7 +171,7 @@ class InterpConv(nn.Module):
171171
kernel_size (int): Kernel size of the convolutional layer. Default: 1.
172172
stride (int): Stride of the convolutional layer. Default: 1.
173173
padding (int): Padding of the convolutional layer. Default: 1.
174-
upsampe_cfg (dict): Interpolation config of the upsample layer.
174+
upsample_cfg (dict): Interpolation config of the upsample layer.
175175
Default: dict(
176176
scale_factor=2, mode='bilinear', align_corners=False).
177177
"""
@@ -188,7 +188,7 @@ def __init__(self,
188188
kernel_size=1,
189189
stride=1,
190190
padding=0,
191-
upsampe_cfg=dict(
191+
upsample_cfg=dict(
192192
scale_factor=2, mode='bilinear', align_corners=False)):
193193
super(InterpConv, self).__init__()
194194

@@ -202,7 +202,7 @@ def __init__(self,
202202
conv_cfg=conv_cfg,
203203
norm_cfg=norm_cfg,
204204
act_cfg=act_cfg)
205-
upsample = nn.Upsample(**upsampe_cfg)
205+
upsample = nn.Upsample(**upsample_cfg)
206206
if conv_first:
207207
self.interp_upsample = nn.Sequential(conv, upsample)
208208
else:
@@ -232,17 +232,17 @@ class UNet(nn.Module):
232232
strides (Sequence[int 1 | 2]): Strides of each stage in encoder.
233233
len(strides) is equal to num_stages. Normally the stride of the
234234
first stage in encoder is 1. If strides[i]=2, it uses stride
235-
convolution to downsample in the correspondance encoder stage.
235+
convolution to downsample in the correspondence encoder stage.
236236
Default: (1, 1, 1, 1, 1).
237237
enc_num_convs (Sequence[int]): Number of convolutional layers in the
238-
convolution block of the correspondance encoder stage.
238+
convolution block of the correspondence encoder stage.
239239
Default: (2, 2, 2, 2, 2).
240240
dec_num_convs (Sequence[int]): Number of convolutional layers in the
241-
convolution block of the correspondance decoder stage.
241+
convolution block of the correspondence decoder stage.
242242
Default: (2, 2, 2, 2).
243243
downsamples (Sequence[int]): Whether use MaxPool to downsample the
244244
feature map after the first stage of encoder
245-
(stages: [1, num_stages)). If the correspondance encoder stage use
245+
(stages: [1, num_stages)). If the correspondence encoder stage use
246246
stride convolution (strides[i]=2), it will never use MaxPool to
247247
downsample, even downsamples[i-1]=True.
248248
Default: (True, True, True, True).
@@ -263,14 +263,14 @@ class UNet(nn.Module):
263263
norm_eval (bool): Whether to set norm layers to eval mode, namely,
264264
freeze running stats (mean and var). Note: Effect on Batch Norm
265265
and its variants only. Default: False.
266-
dcn (bool): Use deformable convoluton in convolutional layer or not.
266+
dcn (bool): Use deformable convolution in convolutional layer or not.
267267
Default: None.
268268
plugins (dict): plugins for convolutional layers. Default: None.
269269
270270
Notice:
271-
The input image size should be devisible by the whole downsample rate
271+
The input image size should be divisible by the whole downsample rate
272272
of the encoder. More detail of the whole downsample rate can be found
273-
in UNet._check_input_devisible.
273+
in UNet._check_input_divisible.
274274
275275
"""
276276

@@ -373,7 +373,7 @@ def __init__(self,
373373
in_channels = base_channels * 2**i
374374

375375
def forward(self, x):
376-
self._check_input_devisible(x)
376+
self._check_input_divisible(x)
377377
enc_outs = []
378378
for enc in self.encoder:
379379
x = enc(x)
@@ -395,15 +395,15 @@ def train(self, mode=True):
395395
if isinstance(m, _BatchNorm):
396396
m.eval()
397397

398-
def _check_input_devisible(self, x):
398+
def _check_input_divisible(self, x):
399399
h, w = x.shape[-2:]
400400
whole_downsample_rate = 1
401401
for i in range(1, self.num_stages):
402402
if self.strides[i] == 2 or self.downsamples[i - 1]:
403403
whole_downsample_rate *= 2
404404
assert (h % whole_downsample_rate == 0) \
405405
and (w % whole_downsample_rate == 0),\
406-
f'The input image size {(h, w)} should be devisible by the whole '\
406+
f'The input image size {(h, w)} should be divisible by the whole '\
407407
f'downsample rate {whole_downsample_rate}, when num_stages is '\
408408
f'{self.num_stages}, strides is {self.strides}, and downsamples '\
409409
f'is {self.downsamples}.'

mmseg/models/decode_heads/apc_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ class ACM(nn.Module):
1313
1414
Args:
1515
pool_scale (int): Pooling scale used in Adaptive Context
16-
Module to extract region fetures.
16+
Module to extract region features.
1717
fusion (bool): Add one conv to fuse residual feature.
1818
in_channels (int): Input channels.
1919
channels (int): Channels after modules, before conv_seg.

mmseg/models/decode_heads/dm_head.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,23 +59,23 @@ def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg,
5959

6060
def forward(self, x):
6161
"""Forward function."""
62-
generted_filter = self.filter_gen_conv(
62+
generated_filter = self.filter_gen_conv(
6363
F.adaptive_avg_pool2d(x, self.filter_size))
6464
x = self.input_redu_conv(x)
6565
b, c, h, w = x.shape
6666
# [1, b * c, h, w], c = self.channels
6767
x = x.view(1, b * c, h, w)
6868
# [b * c, 1, filter_size, filter_size]
69-
generted_filter = generted_filter.view(b * c, 1, self.filter_size,
70-
self.filter_size)
69+
generated_filter = generated_filter.view(b * c, 1, self.filter_size,
70+
self.filter_size)
7171
pad = (self.filter_size - 1) // 2
7272
if (self.filter_size - 1) % 2 == 0:
7373
p2d = (pad, pad, pad, pad)
7474
else:
7575
p2d = (pad + 1, pad, pad + 1, pad)
7676
x = F.pad(input=x, pad=p2d, mode='constant', value=0)
7777
# [1, b * c, h, w]
78-
output = F.conv2d(input=x, weight=generted_filter, groups=b * c)
78+
output = F.conv2d(input=x, weight=generated_filter, groups=b * c)
7979
# [b, c, h, w]
8080
output = output.view(b, c, h, w)
8181
if self.norm is not None:

mmseg/models/decode_heads/gc_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class GCHead(FCNHead):
1717
pooling_type (str): The pooling type of context aggregation.
1818
Options are 'att', 'avg'. Default: 'avg'.
1919
fusion_types (tuple[str]): The fusion type for feature fusion.
20-
Options are 'channel_add', 'channel_mul'. Defautl: ('channel_add',)
20+
Options are 'channel_add', 'channel_mul'. Default: ('channel_add',)
2121
"""
2222

2323
def __init__(self,

mmseg/models/losses/lovasz_loss.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
132132
probs (torch.Tensor): [P, C], class probabilities at each prediction
133133
(between 0 and 1).
134134
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
135-
classes (str | list[int], optional): Classes choosed to calculate loss.
135+
classes (str | list[int], optional): Classes chosen to calculate loss.
136136
'all' for all classes, 'present' for classes present in labels, or
137137
a list of classes to average. Default: 'present'.
138138
class_weight (list[float], optional): The weight for each class.
@@ -183,7 +183,7 @@ def lovasz_softmax(probs,
183183
prediction (between 0 and 1).
184184
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
185185
C - 1).
186-
classes (str | list[int], optional): Classes choosed to calculate loss.
186+
classes (str | list[int], optional): Classes chosen to calculate loss.
187187
'all' for all classes, 'present' for classes present in labels, or
188188
a list of classes to average. Default: 'present'.
189189
per_image (bool, optional): If per_image is True, compute the loss per
@@ -232,7 +232,7 @@ class LovaszLoss(nn.Module):
232232
Args:
233233
loss_type (str, optional): Binary or multi-class loss.
234234
Default: 'multi_class'. Options are "binary" and "multi_class".
235-
classes (str | list[int], optional): Classes choosed to calculate loss.
235+
classes (str | list[int], optional): Classes chosen to calculate loss.
236236
'all' for all classes, 'present' for classes present in labels, or
237237
a list of classes to average. Default: 'present'.
238238
per_image (bool, optional): If per_image is True, compute the loss per

mmseg/models/utils/inverted_residual.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from mmcv.cnn import ConvModule
2-
from torch import nn as nn
2+
from torch import nn
33
from torch.utils import checkpoint as cp
44

55
from .se_layer import SELayer
@@ -101,10 +101,10 @@ class InvertedResidualV3(nn.Module):
101101
in_channels (int): The input channels of this Module.
102102
out_channels (int): The output channels of this Module.
103103
mid_channels (int): The input channels of the depthwise convolution.
104-
kernel_size (int): The kernal size of the depthwise convolution.
104+
kernel_size (int): The kernel size of the depthwise convolution.
105105
Default: 3.
106106
stride (int): The stride of the depthwise convolution. Default: 1.
107-
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
107+
se_cfg (dict): Config dict for se layer. Default: None, which means no
108108
se layer.
109109
with_expand_conv (bool): Use expand conv or not. If set False,
110110
mid_channels must be the same with in_channels. Default: True.

mmseg/models/utils/se_layer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@ class SELayer(nn.Module):
1515
conv_cfg (None or dict): Config dict for convolution layer.
1616
Default: None, which means using conv2d.
1717
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
18-
If act_cfg is a dict, two activation layers will be configurated
18+
If act_cfg is a dict, two activation layers will be configured
1919
by this dict. If act_cfg is a sequence of dicts, the first
20-
activation layer will be configurated by the first dict and the
21-
second activation layer will be configurated by the second dict.
20+
activation layer will be configured by the first dict and the
21+
second activation layer will be configured by the second dict.
2222
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
2323
divisor=6.0)).
2424
"""

mmseg/models/utils/up_conv_block.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class UpConvBlock(nn.Module):
3636
high-level feature map is the same as that of skip feature map
3737
(low-level feature map from encoder), it does not need upsample the
3838
high-level feature map and the upsample_cfg is None.
39-
dcn (bool): Use deformable convoluton in convolutional layer or not.
39+
dcn (bool): Use deformable convolution in convolutional layer or not.
4040
Default: None.
4141
plugins (dict): plugins for convolutional layers. Default: None.
4242
"""

mmseg/ops/encoding.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import torch
2-
from torch import nn as nn
2+
from torch import nn
33
from torch.nn import functional as F
44

55

@@ -43,14 +43,14 @@ def scaled_l2(x, codewords, scale):
4343
return scaled_l2_norm
4444

4545
@staticmethod
46-
def aggregate(assigment_weights, x, codewords):
46+
def aggregate(assignment_weights, x, codewords):
4747
num_codes, channels = codewords.size()
4848
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
4949
batch_size = x.size(0)
5050

5151
expanded_x = x.unsqueeze(2).expand(
5252
(batch_size, x.size(1), num_codes, channels))
53-
encoded_feat = (assigment_weights.unsqueeze(3) *
53+
encoded_feat = (assignment_weights.unsqueeze(3) *
5454
(expanded_x - reshaped_codewords)).sum(dim=1)
5555
return encoded_feat
5656

@@ -61,10 +61,10 @@ def forward(self, x):
6161
# [batch_size, height x width, channels]
6262
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
6363
# assignment_weights: [batch_size, channels, num_codes]
64-
assigment_weights = F.softmax(
64+
assignment_weights = F.softmax(
6565
self.scaled_l2(x, self.codewords, self.scale), dim=2)
6666
# aggregate
67-
encoded_feat = self.aggregate(assigment_weights, x, self.codewords)
67+
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
6868
return encoded_feat
6969

7070
def __repr__(self):

0 commit comments

Comments
 (0)