Skip to content

Commit e1b48d5

Browse files
authored
[New Configs] Add mmseg/configs folder & Support loveda, potsdam, schedules, default_runtime new configs (open-mmlab#3542)
# [New Configs] Add mmseg/configs folder & Support loveda, potsdam, schedules, default_runtime new configs - As the title , the new configs path is mmseg/configs/ - The configs files for the dataset have been tested. - The purpose of this PR is to enable other community members migrating to the new config to reference the new configs files for schedules and default runtime. Hoping for a quick merge~~~. - Details of this task can be found at: https://github.com/AI-Tianlong/mmseg-new-config ![image](https://github.com/AI-Tianlong/mmseg-new-config/assets/50650583/04d40057-ff2c-492c-be44-52c6d34d3676)
1 parent 303e754 commit e1b48d5

File tree

10 files changed

+450
-0
lines changed

10 files changed

+450
-0
lines changed
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmcv.transforms.loading import LoadImageFromFile
3+
from mmcv.transforms.processing import (RandomFlip, RandomResize, Resize,
4+
TestTimeAug)
5+
from mmengine.dataset.sampler import DefaultSampler, InfiniteSampler
6+
7+
from mmseg.datasets.loveda import LoveDADataset
8+
from mmseg.datasets.transforms.formatting import PackSegInputs
9+
from mmseg.datasets.transforms.loading import LoadAnnotations
10+
from mmseg.datasets.transforms.transforms import (PhotoMetricDistortion,
11+
RandomCrop)
12+
from mmseg.evaluation import IoUMetric
13+
14+
# dataset settings
15+
dataset_type = LoveDADataset
16+
data_root = 'data/loveDA'
17+
crop_size = (512, 512)
18+
train_pipeline = [
19+
dict(type=LoadImageFromFile),
20+
dict(type=LoadAnnotations, reduce_zero_label=True),
21+
dict(
22+
type=RandomResize,
23+
scale=(2048, 512),
24+
ratio_range=(0.5, 2.0),
25+
keep_ratio=True),
26+
dict(type=RandomCrop, crop_size=crop_size, cat_max_ratio=0.75),
27+
dict(type=RandomFlip, prob=0.5),
28+
dict(type=PhotoMetricDistortion),
29+
dict(type=PackSegInputs)
30+
]
31+
test_pipeline = [
32+
dict(type=LoadImageFromFile),
33+
dict(type=Resize, scale=(1024, 1024), keep_ratio=True),
34+
# add loading annotation after ``Resize`` because ground truth
35+
# does not need to do resize data transform
36+
dict(type=LoadAnnotations, reduce_zero_label=True),
37+
dict(type=PackSegInputs)
38+
]
39+
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
40+
tta_pipeline = [
41+
dict(type=LoadImageFromFile, backend_args=None),
42+
dict(
43+
type=TestTimeAug,
44+
transforms=[[
45+
dict(type=Resize, scale_factor=r, keep_ratio=True)
46+
for r in img_ratios
47+
],
48+
[
49+
dict(type=RandomFlip, prob=0., direction='horizontal'),
50+
dict(type=RandomFlip, prob=1., direction='horizontal')
51+
], [dict(type=LoadAnnotations)],
52+
[dict(type=PackSegInputs)]])
53+
]
54+
train_dataloader = dict(
55+
batch_size=2,
56+
num_workers=12,
57+
persistent_workers=True,
58+
sampler=dict(type=InfiniteSampler, shuffle=True),
59+
dataset=dict(
60+
type=dataset_type,
61+
data_root=data_root,
62+
data_prefix=dict(
63+
img_path='img_dir/train', seg_map_path='ann_dir/train'),
64+
pipeline=train_pipeline))
65+
66+
val_dataloader = dict(
67+
batch_size=1,
68+
num_workers=4,
69+
persistent_workers=True,
70+
sampler=dict(type=DefaultSampler, shuffle=False),
71+
dataset=dict(
72+
type=dataset_type,
73+
data_root=data_root,
74+
data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'),
75+
pipeline=test_pipeline))
76+
77+
test_dataloader = val_dataloader
78+
val_evaluator = dict(type=IoUMetric, iou_metrics=['mIoU'])
79+
test_evaluator = val_evaluator
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmcv.transforms.loading import LoadImageFromFile
3+
from mmcv.transforms.processing import (RandomFlip, RandomResize, Resize,
4+
TestTimeAug)
5+
from mmengine.dataset.sampler import DefaultSampler, InfiniteSampler
6+
7+
from mmseg.datasets.potsdam import PotsdamDataset
8+
from mmseg.datasets.transforms.formatting import PackSegInputs
9+
from mmseg.datasets.transforms.loading import LoadAnnotations
10+
from mmseg.datasets.transforms.transforms import (PhotoMetricDistortion,
11+
RandomCrop)
12+
from mmseg.evaluation import IoUMetric
13+
14+
# dataset settings
15+
dataset_type = PotsdamDataset
16+
data_root = 'data/potsdam'
17+
crop_size = (512, 512)
18+
train_pipeline = [
19+
dict(type=LoadImageFromFile),
20+
dict(type=LoadAnnotations, reduce_zero_label=True),
21+
dict(
22+
type=RandomResize,
23+
scale=(512, 512),
24+
ratio_range=(0.5, 2.0),
25+
keep_ratio=True),
26+
dict(type=RandomCrop, crop_size=crop_size, cat_max_ratio=0.75),
27+
dict(type=RandomFlip, prob=0.5),
28+
dict(type=PhotoMetricDistortion),
29+
dict(type=PackSegInputs)
30+
]
31+
test_pipeline = [
32+
dict(type=LoadImageFromFile),
33+
dict(type=Resize, scale=(512, 512), keep_ratio=True),
34+
# add loading annotation after ``Resize`` because ground truth
35+
# does not need to do resize data transform
36+
dict(type=LoadAnnotations, reduce_zero_label=True),
37+
dict(type=PackSegInputs)
38+
]
39+
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
40+
tta_pipeline = [
41+
dict(type=LoadImageFromFile, backend_args=None),
42+
dict(
43+
type=TestTimeAug,
44+
transforms=[[
45+
dict(type=Resize, scale_factor=r, keep_ratio=True)
46+
for r in img_ratios
47+
],
48+
[
49+
dict(type=RandomFlip, prob=0., direction='horizontal'),
50+
dict(type=RandomFlip, prob=1., direction='horizontal')
51+
], [dict(type=LoadAnnotations)],
52+
[dict(type=PackSegInputs)]])
53+
]
54+
55+
train_dataloader = dict(
56+
batch_size=2,
57+
num_workers=4,
58+
persistent_workers=True,
59+
sampler=dict(type=InfiniteSampler, shuffle=True),
60+
dataset=dict(
61+
type=dataset_type,
62+
data_root=data_root,
63+
data_prefix=dict(
64+
img_path='img_dir/train', seg_map_path='ann_dir/train'),
65+
pipeline=train_pipeline))
66+
67+
val_dataloader = dict(
68+
batch_size=1,
69+
num_workers=4,
70+
persistent_workers=True,
71+
sampler=dict(type=DefaultSampler, shuffle=False),
72+
dataset=dict(
73+
type=dataset_type,
74+
data_root=data_root,
75+
data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'),
76+
pipeline=test_pipeline))
77+
test_dataloader = val_dataloader
78+
79+
val_evaluator = dict(
80+
type=IoUMetric, iou_metrics=['mIoU']) # 'mDice', 'mFscore'
81+
test_evaluator = val_evaluator
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
3+
from mmengine.visualization import LocalVisBackend
4+
5+
from mmseg.models import SegTTAModel
6+
from mmseg.visualization import SegLocalVisualizer
7+
8+
env_cfg = dict(
9+
cudnn_benchmark=False,
10+
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
11+
dist_cfg=dict(backend='nccl'),
12+
)
13+
vis_backends = [dict(type=LocalVisBackend)]
14+
visualizer = dict(
15+
type=SegLocalVisualizer, vis_backends=vis_backends, name='visualizer')
16+
log_processor = dict(by_epoch=False)
17+
log_level = 'INFO'
18+
load_from = None
19+
resume = False
20+
21+
tta_model = dict(type=SegTTAModel)
22+
default_scope = None
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
3+
LoggerHook, ParamSchedulerHook)
4+
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
5+
from mmengine.optim.scheduler.lr_scheduler import PolyLR
6+
from mmengine.runner.loops import IterBasedTrainLoop, TestLoop, ValLoop
7+
from torch.optim.sgd import SGD
8+
9+
from mmseg.engine import SegVisualizationHook
10+
11+
# optimizer
12+
optimizer = dict(
13+
type=SGD,
14+
# lr=0.01,
15+
# momentum=0.9,
16+
# weight_decay=0.0005
17+
)
18+
19+
optim_wrapper = dict(type=OptimWrapper, optimizer=optimizer, clip_grad=None)
20+
21+
# learning policy
22+
param_scheduler = [
23+
dict(
24+
type=PolyLR,
25+
eta_min=1e-4,
26+
power=0.9,
27+
begin=0,
28+
end=160000,
29+
by_epoch=False)
30+
]
31+
# training schedule for 160k
32+
33+
train_cfg = dict(type=IterBasedTrainLoop, max_iters=160000, val_interval=8000)
34+
val_cfg = dict(type=ValLoop)
35+
test_cfg = dict(type=TestLoop)
36+
37+
default_hooks = dict(
38+
timer=dict(type=IterTimerHook),
39+
logger=dict(type=LoggerHook, interval=50, log_metric_by_epoch=False),
40+
param_scheduler=dict(type=ParamSchedulerHook),
41+
checkpoint=dict(type=CheckpointHook, by_epoch=False, interval=8000),
42+
sampler_seed=dict(type=DistSamplerSeedHook),
43+
visualization=dict(type=SegVisualizationHook))
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
3+
LoggerHook, ParamSchedulerHook)
4+
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
5+
from mmengine.optim.scheduler.lr_scheduler import PolyLR
6+
from mmengine.runner.loops import IterBasedTrainLoop, TestLoop, ValLoop
7+
from torch.optim.sgd import SGD
8+
9+
from mmseg.engine import SegVisualizationHook
10+
11+
# optimizer
12+
optimizer = dict(type=SGD, lr=0.01, momentum=0.9, weight_decay=0.0005)
13+
optim_wrapper = dict(type=OptimWrapper, optimizer=optimizer, clip_grad=None)
14+
15+
# learning policy
16+
param_scheduler = [
17+
dict(
18+
type=PolyLR,
19+
eta_min=1e-4,
20+
power=0.9,
21+
begin=0,
22+
end=20000,
23+
by_epoch=False)
24+
]
25+
# training schedule for 20k
26+
train_cfg = dict(type=IterBasedTrainLoop, max_iters=20000, val_interval=2000)
27+
val_cfg = dict(type=ValLoop)
28+
test_cfg = dict(type=TestLoop)
29+
30+
default_hooks = dict(
31+
timer=dict(type=IterTimerHook),
32+
logger=dict(type=LoggerHook, interval=50, log_metric_by_epoch=False),
33+
param_scheduler=dict(type=ParamSchedulerHook),
34+
checkpoint=dict(type=CheckpointHook, by_epoch=False, interval=2000),
35+
sampler_seed=dict(type=DistSamplerSeedHook),
36+
visualization=dict(type=SegVisualizationHook))
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
3+
LoggerHook, ParamSchedulerHook)
4+
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
5+
from mmengine.optim.scheduler.lr_scheduler import PolyLR
6+
from mmengine.runner.loops import IterBasedTrainLoop, TestLoop, ValLoop
7+
# from mmengine.runner.loops import EpochBasedTrainLoop
8+
from torch.optim.sgd import SGD
9+
10+
from mmseg.engine import SegVisualizationHook
11+
12+
optimizer = dict(type=SGD, lr=0.01, momentum=0.9, weight_decay=0.0005)
13+
optim_wrapper = dict(type=OptimWrapper, optimizer=optimizer, clip_grad=None)
14+
# learning policy
15+
param_scheduler = [
16+
dict(
17+
type=PolyLR,
18+
eta_min=1e-4,
19+
power=0.9,
20+
begin=0,
21+
end=240000,
22+
by_epoch=False)
23+
]
24+
# training schedule for 240k
25+
train_cfg = dict(type=IterBasedTrainLoop, max_iters=240000, val_interval=24000)
26+
val_cfg = dict(type=ValLoop)
27+
test_cfg = dict(type=TestLoop)
28+
default_hooks = dict(
29+
timer=dict(type=IterTimerHook),
30+
logger=dict(type=LoggerHook, interval=50, log_metric_by_epoch=False),
31+
param_scheduler=dict(type=ParamSchedulerHook),
32+
checkpoint=dict(type=CheckpointHook, by_epoch=False, interval=24000),
33+
sampler_seed=dict(type=DistSamplerSeedHook),
34+
visualization=dict(type=SegVisualizationHook))
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
3+
LoggerHook, ParamSchedulerHook)
4+
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
5+
from mmengine.optim.scheduler.lr_scheduler import ConstantLR, LinearLR
6+
from mmengine.runner.loops import IterBasedTrainLoop, TestLoop, ValLoop
7+
# from mmengine.runner.loops import EpochBasedTrainLoop
8+
from torch.optim.adamw import AdamW
9+
10+
from mmseg.engine import SegVisualizationHook
11+
from mmseg.engine.schedulers import PolyLRRatio
12+
13+
# optimizer
14+
optimizer = dict(type=AdamW, lr=0.01, weight_decay=0.1)
15+
16+
optim_wrapper = dict(type=OptimWrapper, optimizer=optimizer, clip_grad=None)
17+
# learning policy
18+
19+
# learning policy
20+
param_scheduler = [
21+
dict(type=LinearLR, start_factor=3e-2, begin=0, end=12000, by_epoch=False),
22+
dict(
23+
type=PolyLRRatio,
24+
eta_min_ratio=3e-2,
25+
power=0.9,
26+
begin=12000,
27+
end=24000,
28+
by_epoch=False),
29+
dict(type=ConstantLR, by_epoch=False, factor=1, begin=24000, end=25000)
30+
]
31+
32+
# training schedule for 25k
33+
train_cfg = dict(type=IterBasedTrainLoop, max_iters=25000, val_interval=1000)
34+
val_cfg = dict(type=ValLoop)
35+
test_cfg = dict(type=TestLoop)
36+
37+
default_hooks = dict(
38+
timer=dict(type=IterTimerHook),
39+
logger=dict(type=LoggerHook, interval=50, log_metric_by_epoch=False),
40+
param_scheduler=dict(type=ParamSchedulerHook),
41+
checkpoint=dict(type=CheckpointHook, by_epoch=False, interval=1000),
42+
sampler_seed=dict(type=DistSamplerSeedHook),
43+
visualization=dict(type=SegVisualizationHook))
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
3+
LoggerHook, ParamSchedulerHook)
4+
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
5+
from mmengine.optim.scheduler.lr_scheduler import PolyLR
6+
from mmengine.runner.loops import IterBasedTrainLoop, TestLoop, ValLoop
7+
# from mmengine.runner.loops import EpochBasedTrainLoop
8+
from torch.optim.sgd import SGD
9+
10+
from mmseg.engine import SegVisualizationHook
11+
12+
# optimizer
13+
optimizer = dict(type=SGD, lr=0.01, momentum=0.9, weight_decay=0.0005)
14+
optim_wrapper = dict(type=OptimWrapper, optimizer=optimizer, clip_grad=None)
15+
16+
# learning policy
17+
param_scheduler = [
18+
dict(
19+
type=PolyLR,
20+
eta_min=1e-4,
21+
power=0.9,
22+
begin=0,
23+
end=320000,
24+
by_epoch=False)
25+
]
26+
# training schedule for 320k
27+
train_cfg = dict(type=IterBasedTrainLoop, max_iters=320000, val_interval=32000)
28+
val_cfg = dict(type=ValLoop)
29+
test_cfg = dict(type=TestLoop)
30+
default_hooks = dict(
31+
timer=dict(type=IterTimerHook),
32+
logger=dict(type=LoggerHook, interval=50, log_metric_by_epoch=False),
33+
param_scheduler=dict(type=ParamSchedulerHook),
34+
checkpoint=dict(type=CheckpointHook, by_epoch=False, interval=32000),
35+
sampler_seed=dict(type=DistSamplerSeedHook),
36+
visualization=dict(type=SegVisualizationHook))

0 commit comments

Comments
 (0)