Skip to content

Commit 755c082

Browse files
committed
small fix
1 parent 3974d79 commit 755c082

File tree

9 files changed

+243
-156
lines changed

9 files changed

+243
-156
lines changed

.idea/DeepLabv3_MobileNetv2.iml

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/misc.xml

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/workspace.xml

Lines changed: 127 additions & 92 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config.py

Lines changed: 39 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -3,47 +3,49 @@
33

44
""" Dataset parameters """
55
class Params():
6-
# network structure parameters
7-
model = 'MobileNetv2_DeepLabv3'
8-
dataset = 'cityscapes'
9-
s = [2, 1, 2, 2, 2, 1, 1] # stride of each conv stage
10-
t = [1, 1, 6, 6, 6, 6, 6] # expansion factor t
11-
n = [1, 1, 2, 3, 4, 3, 3] # number of repeat time
12-
c = [32, 16, 24, 32, 64, 96, 160] # output channel of each conv stage
13-
output_stride = 16
14-
multi_grid = (1, 2, 4)
15-
aspp = (6, 12, 18)
16-
down_sample_rate = 32 # classic down sample rate
6+
def __init__(self):
7+
# network structure parameters
8+
self.model = 'MobileNetv2_DeepLabv3'
9+
self.dataset = 'cityscapes'
10+
self.s = [2, 1, 2, 2, 2, 1, 1] # stride of each conv stage
11+
self.t = [1, 1, 6, 6, 6, 6, 6] # expansion factor t
12+
self.n = [1, 1, 2, 3, 4, 3, 3] # number of repeat time
13+
self.c = [32, 16, 24, 32, 64, 96, 160] # output channel of each conv stage
14+
self.output_stride = 16
15+
self.multi_grid = (1, 2, 4)
16+
self.aspp = (6, 12, 18)
17+
self.down_sample_rate = 32 # classic down sample rate
1718

18-
# dataset parameters
19-
image_size = 512
20-
num_class = 20 # 20 classes for training
21-
dataset_root = '/path/to/your/dataset'
22-
dataloader_workers = 8
23-
shuffle = True
24-
train_batch = 5
25-
val_batch = 2
26-
test_batch = 5
19+
# dataset parameters
20+
self.rescale_size = 600
21+
self.image_size = 512
22+
self.num_class = 20 # 20 classes for training
23+
self.dataset_root = '/path/to/your/dataset'
24+
self.dataloader_workers = 8
25+
self.shuffle = True
26+
self.train_batch = 10
27+
self.val_batch = 2
28+
self.test_batch = 5
2729

28-
# train parameters
29-
num_epoch = 50
30-
base_lr = 0.001
31-
power = 0.9
32-
momentum = 0.9
33-
dropout_prob = 0.2
34-
weight_decay = 0.0005
35-
should_val = True
36-
val_every = 1
37-
display = 1 # show train result every display epoch
30+
# train parameters
31+
self.num_epoch = 50
32+
self.base_lr = 0.00025
33+
self.power = 0.9
34+
self.momentum = 0.9
35+
self.weight_decay = 0.0005
36+
self.should_val = True
37+
self.val_every = 1
38+
self.display = 1 # show train result every display epoch
3839

39-
# model restore parameters
40-
resume_from = None # None for train from scratch
41-
pre_trained_from = None # None for train from scratch
42-
should_save = True
43-
save_every = 10
40+
# model restore parameters
41+
self.resume_from = None # None for train from scratch
42+
self.pre_trained_from = None # None for train from scratch
43+
self.should_save = True
44+
self.save_every = 10
4445

45-
def __init__(self):
4646
# create training dir
4747
self.summary_dir, self.ckpt_dir = create_train_dir(self)
4848

49-
# if __name__ == '__main__':
49+
if __name__ == '__main__':
50+
aa = Params()
51+
print(aa)

layers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@ def get_inverted_residual_block_arr(in_, out_, t=6, s=1, n=1):
7474
return block
7575

7676

77-
class ASPP(nn.Module):
77+
class ASPP_plus(nn.Module):
7878
def __init__(self, params):
79-
super(ASPP, self).__init__()
79+
super(ASPP_plus, self).__init__()
8080
self.conv11 = nn.Sequential(nn.Conv2d(params.c[-1], 256, 1, bias=False),
8181
nn.BatchNorm2d(256))
8282
self.conv33_1 = nn.Sequential(nn.Conv2d(params.c[-1], 256, 3,

main.py

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,38 @@
1-
from __future__ import print_function
21
import os
32
import argparse
43
from utils import create_dataset
54
from network import MobileNetv2_DeepLabv3
65
from config import Params
7-
6+
from utils import print_config
87

98
def main():
10-
# parser = argparse.ArgumentParser(description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
11-
# parser.add_argument('--dataset', default='cityscapes', choices=['cityscapes', 'other'],
12-
# help='Dataset used in training MobileNet v2+DeepLab v3')
13-
# parser.add_argument('--root', default='./data/cityscapes', help='Path to your dataset')
14-
#
15-
# args = parser.parse_args()
9+
# add argumentation
10+
parser = argparse.ArgumentParser(description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
11+
parser.add_argument('--dataset', default='cityscapes', choices=['cityscapes', 'other'],
12+
help='Dataset used in training MobileNet v2+DeepLab v3')
13+
parser.add_argument('--root', default='./data/cityscapes', help='Path to your dataset')
14+
parser.add_argument('--epoch', default=50, help='Total number of training epoch')
15+
parser.add_argument('--lr', default=0.00025, help='Base learning rate')
16+
parser.add_argument('--pretrain', default=None, help='Path to a pre-trained backbone model')
17+
parser.add_argument('--resume_from', default=None, help='Path to a checkpoint to resume model')
18+
19+
args = parser.parse_args()
20+
params = Params()
1621

1722
# parse args
18-
# if not os.path.exists(args.root):
19-
# print('ERROR: Root %s not exists!' % args.root)
20-
# exit(1)
23+
if not os.path.exists(args.root):
24+
if params.dataset_root is None:
25+
raise ValueError('ERROR: Root %s not exists!' % args.root)
26+
else:
27+
params.dataset_root = args.root
28+
params.num_epoch = args.epoch
29+
params.base_lr = args.lr
30+
params.pre_trained_from = args.pretrain
31+
params.resume_from = args.resume_from
32+
33+
print('Network parameters:')
34+
print_config(params)
2135

22-
params = Params()
23-
# params.dataset_root = args.root
24-
params.dataset_root = '/media/ubuntu/disk/cityscapes'
25-
""" TEST CODE """
2636
# create dataset and transformation
2737
print('Creating Dataset and Transformation......')
2838
datasets = create_dataset(params)

network.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from tensorboardX import SummaryWriter
77
import layers
88
from progressbar import bar
9-
from config import Params
109

1110
WARNING = lambda x: '\033[1;31;2mWARNING: ' + x + '\033[0m'
1211

@@ -37,7 +36,7 @@ def __init__(self, params, datasets):
3736
# conv layer 1
3837
block.append(nn.Sequential(nn.Conv2d(3, self.params.c[0], 3, stride=self.params.s[0], padding=1, bias=False),
3938
nn.BatchNorm2d(self.params.c[0]),
40-
nn.Dropout2d(self.params.dropout_prob, inplace=True),
39+
# nn.Dropout2d(self.params.dropout_prob, inplace=True),
4140
nn.ReLU6()))
4241

4342
# conv layer 2-7
@@ -53,7 +52,7 @@ def __init__(self, params, datasets):
5352
t=self.params.t[6], s=1, dilation=rate*self.params.multi_grid[i]))
5453

5554
# ASPP layer
56-
block.append(layers.ASPP(self.params))
55+
block.append(layers.ASPP_plus(self.params))
5756

5857
# final conv layer
5958
block.append(nn.Conv2d(256, self.params.num_class, 1))
@@ -143,7 +142,7 @@ def val_one_epoch(self):
143142
m is defined in params.val_every
144143
"""
145144
# TODO: add IoU compute function
146-
print('Testing:')
145+
print('Validating:')
147146

148147
# set mode eval
149148
self.network.eval()
@@ -218,6 +217,8 @@ def Train(self):
218217
# save the last network state
219218
self.save_checkpoint()
220219

220+
# TODO: add train visualization
221+
221222
def Test(self):
222223
"""
223224
Test network on test set
@@ -249,7 +250,7 @@ def adjust_lr(self):
249250
"""
250251
Adjust learning rate at each epoch
251252
"""
252-
learning_rate = self.params.base_lr * (1 - self.epoch / self.params.num_epoch) ** self.params.power
253+
learning_rate = self.params.base_lr * (1 - float(self.epoch) / self.params.num_epoch) ** self.params.power
253254
for param_group in self.opt.param_groups:
254255
param_group['lr'] = learning_rate
255256
print('Change learning rate into %f' % (learning_rate))

progressbar.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ def click(self, current_idx, max_idx, total_length=40):
2121
self.iter_per_sec = 1/self.time
2222
perc = current_idx * total_length / max_idx
2323
# print progress bar
24-
print '\r|'+'='*perc+'>'+' '*(total_length-1-perc)+'| %d/%d (%.2f iter/s)' % (current_idx+1,
24+
print('\r|'+'='*perc+'>'+' '*(total_length-1-perc)+'| %d/%d (%.2f iter/s)' % (current_idx+1,
2525
max_idx,
26-
self.iter_per_sec),
26+
self.iter_per_sec), end='')
2727
self.start_time = time.time()
2828

2929
def close(self):

utils.py

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def create_dataset(params):
4242
# if params.dataset_root is not None and not os.path.exists(params.dataset_root):
4343
# raise ValueError('Dataset not exists!')
4444

45-
transform = {'train': transforms.Compose([Rescale(params.image_size),
45+
transform = {'train': transforms.Compose([Rescale(params.rescale_size),
46+
RandomCrop(params.image_size),
4647
RandomHorizontalFlip(),
4748
ToTensor()
4849
]),
@@ -194,6 +195,44 @@ def __call__(self, sample, p=0.5):
194195
return {'image': image, 'label': label}
195196

196197

198+
class RandomCrop(object):
199+
"""
200+
Crop randomly the image in a sample.
201+
202+
:param output_size (tuple or int): Desired output size. If int, square crop
203+
is made.
204+
"""
205+
206+
def __init__(self, output_size):
207+
assert isinstance(output_size, (int, tuple))
208+
if isinstance(output_size, int):
209+
self.output_size = (output_size, output_size)
210+
else:
211+
assert len(output_size) == 2
212+
self.output_size = output_size
213+
214+
def __call__(self, sample):
215+
image, label = sample['image'], sample['label']
216+
217+
h, w = image.shape[:2]
218+
new_h, new_w = self.output_size
219+
220+
top = np.random.randint(0, h - new_h)
221+
left = np.random.randint(0, w - new_w)
222+
223+
image = image[top: top + new_h, left: left + new_w, :]
224+
225+
label = label[top: top + new_h, left: left + new_w]
226+
227+
return {'image': image, 'label': label}
228+
229+
230+
def print_config(params):
231+
for name, value in sorted(vars(params).items()):
232+
print('\t%-20s:%s' % (name, str(value)))
233+
print('')
234+
235+
197236
if __name__ == '__main__':
198237
dir = '/media/ubuntu/disk/cityscapes'
199238
dataset = Cityscapes(dir)

0 commit comments

Comments
 (0)