Skip to content

Commit 6b4c7ff

Browse files
authored
Merge pull request open-mmlab#2194 from jinwonkim93/custom/face_occlusion
[Feature] Support Delving into High-Quality Synthetic Face Occlusion Segmentation Datasets
2 parents 7b09967 + 70b2853 commit 6b4c7ff

File tree

5 files changed

+370
-1
lines changed

5 files changed

+370
-1
lines changed
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
dataset_type = 'FaceOccludedDataset'
2+
data_root = 'data/occlusion-aware-face-dataset'
3+
crop_size = (512, 512)
4+
img_norm_cfg = dict(
5+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6+
train_pipeline = [
7+
dict(type='LoadImageFromFile'),
8+
dict(type='LoadAnnotations'),
9+
dict(type='Resize', img_scale=(512, 512)),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='RandomRotate', degree=(-30, 30), prob=0.5),
12+
dict(type='PhotoMetricDistortion'),
13+
dict(
14+
type='Normalize',
15+
mean=[123.675, 116.28, 103.53],
16+
std=[58.395, 57.12, 57.375],
17+
to_rgb=True),
18+
dict(type='DefaultFormatBundle'),
19+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
20+
]
21+
test_pipeline = [
22+
dict(type='LoadImageFromFile'),
23+
dict(
24+
type='MultiScaleFlipAug',
25+
img_scale=(512, 512),
26+
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
27+
flip=True,
28+
transforms=[
29+
dict(type='Resize', keep_ratio=True),
30+
dict(type='ResizeToMultiple', size_divisor=32),
31+
dict(type='RandomFlip'),
32+
dict(
33+
type='Normalize',
34+
mean=[123.675, 116.28, 103.53],
35+
std=[58.395, 57.12, 57.375],
36+
to_rgb=True),
37+
dict(type='ImageToTensor', keys=['img']),
38+
dict(type='Collect', keys=['img'])
39+
])
40+
]
41+
42+
dataset_train_A = dict(
43+
type=dataset_type,
44+
data_root=data_root,
45+
img_dir='NatOcc_hand_sot/img',
46+
ann_dir='NatOcc_hand_sot/mask',
47+
split='train.txt',
48+
pipeline=train_pipeline)
49+
50+
dataset_train_B = dict(
51+
type=dataset_type,
52+
data_root=data_root,
53+
img_dir='NatOcc_object/img',
54+
ann_dir='NatOcc_object/mask',
55+
split='train.txt',
56+
pipeline=train_pipeline)
57+
58+
dataset_train_C = dict(
59+
type=dataset_type,
60+
data_root=data_root,
61+
img_dir='RandOcc/img',
62+
ann_dir='RandOcc/mask',
63+
split='train.txt',
64+
pipeline=train_pipeline)
65+
66+
dataset_valid = dict(
67+
type=dataset_type,
68+
data_root=data_root,
69+
img_dir='RealOcc/image',
70+
ann_dir='RealOcc/mask',
71+
split='RealOcc/split/val.txt',
72+
pipeline=test_pipeline)
73+
74+
data = dict(
75+
samples_per_gpu=2,
76+
workers_per_gpu=2,
77+
train=[dataset_train_A, dataset_train_B, dataset_train_C],
78+
val=dataset_valid)
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# +
2+
_base_ = '../_base_/datasets/occlude_face.py'
3+
norm_cfg = dict(type='SyncBN', requires_grad=True)
4+
model = dict(
5+
type='EncoderDecoder',
6+
pretrained='open-mmlab://resnet101_v1c',
7+
backbone=dict(
8+
type='ResNetV1c',
9+
depth=101,
10+
num_stages=4,
11+
out_indices=(0, 1, 2, 3),
12+
dilations=(1, 1, 2, 4),
13+
strides=(1, 2, 1, 1),
14+
norm_cfg=dict(type='SyncBN', requires_grad=True),
15+
norm_eval=False,
16+
style='pytorch',
17+
contract_dilation=True),
18+
decode_head=dict(
19+
type='DepthwiseSeparableASPPHead',
20+
in_channels=2048,
21+
in_index=3,
22+
channels=512,
23+
dilations=(1, 12, 24, 36),
24+
c1_in_channels=256,
25+
c1_channels=48,
26+
dropout_ratio=0.1,
27+
num_classes=2,
28+
norm_cfg=dict(type='SyncBN', requires_grad=True),
29+
align_corners=False,
30+
loss_decode=dict(
31+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
32+
sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000)),
33+
auxiliary_head=dict(
34+
type='FCNHead',
35+
in_channels=1024,
36+
in_index=2,
37+
channels=256,
38+
num_convs=1,
39+
concat_input=False,
40+
dropout_ratio=0.1,
41+
num_classes=2,
42+
norm_cfg=dict(type='SyncBN', requires_grad=True),
43+
align_corners=False,
44+
loss_decode=dict(
45+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
46+
train_cfg=dict(),
47+
test_cfg=dict(mode='whole'))
48+
log_config = dict(
49+
interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
50+
dist_params = dict(backend='nccl')
51+
log_level = 'INFO'
52+
load_from = None
53+
resume_from = None
54+
workflow = [('train', 1)]
55+
cudnn_benchmark = True
56+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
57+
optimizer_config = dict()
58+
lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False)
59+
runner = dict(type='IterBasedRunner', max_iters=30000)
60+
checkpoint_config = dict(by_epoch=False, interval=400)
61+
evaluation = dict(
62+
interval=400, metric=['mIoU', 'mDice', 'mFscore'], pre_eval=True)
63+
auto_resume = False

docs/en/dataset_prepare.md

Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
<!-- #region -->
2+
13
## Prepare datasets
24

35
It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`.
@@ -138,6 +140,21 @@ mmsegmentation
138140
│ │ ├── ann_dir
139141
│ │ │ ├── train
140142
│ │ │ ├── val
143+
│ ├── occlusion-aware-face-dataset
144+
│ │ ├── train.txt
145+
│ │ ├── NatOcc_hand_sot
146+
│ │ │ ├── img
147+
│ │ │ ├── mask
148+
│ │ ├── NatOcc_object
149+
│ │ │ ├── img
150+
│ │ │ ├── mask
151+
│ │ ├── RandOcc
152+
│ │ │ ├── img
153+
│ │ │ ├── mask
154+
│ │ ├── RealOcc
155+
│ │ │ ├── img
156+
│ │ │ ├── mask
157+
│ │ │ ├── split
141158
```
142159

143160
### Cityscapes
@@ -376,3 +393,190 @@ python tools/convert_datasets/isaid.py /path/to/iSAID
376393
```
377394

378395
In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation.
396+
397+
### Delving into High-Quality Synthetic Face Occlusion Segmentation Datasets
398+
399+
The dataset is generated by two techniques, Naturalistic occlusion generation, Random occlusion generation. you must install face-occlusion-generation and dataset. see more guide in https://github.com/kennyvoo/face-occlusion-generation.git
400+
401+
## Dataset Preparation
402+
403+
step 1
404+
405+
Create a folder for data generation materials on mmsegmentation folder.
406+
407+
```shell
408+
mkdir data_materials
409+
```
410+
411+
step 2
412+
413+
Please download the masks (11k-hands_mask.7z,CelebAMask-HQ-masks_corrected.7z) from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing)
414+
415+
Please download the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands.zip](https://sites.google.com/view/11khands) and [dtd-r1.0.1.tar.gz](https://www.robots.ox.ac.uk/~vgg/data/dtd/).
416+
417+
step 3
418+
419+
Download a upsampled COCO objects images and masks (coco_object.7z). files can be found in this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing).
420+
421+
Download CelebAMask-HQ and 11k Hands images split txt files. (11k_hands_sample.txt, CelebAMask-HQ-WO-train.txt) found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing).
422+
423+
download file to ./data_materials
424+
425+
```none
426+
CelebAMask-HQ.zip
427+
CelebAMask-HQ-masks_corrected.7z
428+
CelebAMask-HQ-WO-train.txt
429+
RealOcc.7z
430+
RealOcc-Wild.7z
431+
11k-hands_mask.7z
432+
11k Hands.zip
433+
11k_hands_sample.txt
434+
coco_object.7z
435+
dtd-r1.0.1.tar.gz
436+
```
437+
438+
______________________________________________________________________
439+
440+
```bash
441+
apt-get install p7zip-full
442+
443+
cd data_materials
444+
445+
#make occlusion-aware-face-dataset folder
446+
mkdir path-to-mmsegmentaion/data/occlusion-aware-face-dataset
447+
448+
#extract celebAMask-HQ and split by train-set
449+
unzip CelebAMask-HQ.zip
450+
7za x CelebAMask-HQ-masks_corrected.7z -o./CelebAMask-HQ
451+
#copy training data to train-image-folder
452+
rsync -a ./CelebAMask-HQ/CelebA-HQ-img/ --files-from=./CelebAMask-HQ-WO-train.txt ./CelebAMask-HQ-WO-Train_img
453+
#create a file-name txt file for copying mask
454+
basename -s .jpg ./CelebAMask-HQ-WO-Train_img/* > train.txt
455+
#add .png to file-name txt file
456+
xargs -n 1 -i echo {}.png < train.txt > mask_train.txt
457+
#copy training data to train-mask-folder
458+
rsync -a ./CelebAMask-HQ/CelebAMask-HQ-masks_corrected/ --files-from=./mask_train.txt ./CelebAMask-HQ-WO-Train_mask
459+
mv train.txt ../data/occlusion-aware-face-dataset
460+
461+
#extract DTD
462+
tar -zxvf dtd-r1.0.1.tar.gz
463+
mv dtd DTD
464+
465+
#extract hands dataset and split by 200 samples
466+
7za x 11k-hands_masks.7z -o.
467+
unzip Hands.zip
468+
rsync -a ./Hands/ --files-from=./11k_hands_sample.txt ./11k-hands_img
469+
470+
#extract upscaled coco object
471+
7za x coco_object.7z -o.
472+
mv coco_object/* .
473+
474+
#extract validation set
475+
7za x RealOcc.7z -o../data/occlusion-aware-face-dataset
476+
477+
```
478+
479+
**Dataset material Organization:**
480+
481+
```none
482+
483+
├── data_materials
484+
│ ├── CelebAMask-HQ-WO-Train_img
485+
│ │ ├── {image}.jpg
486+
│ ├── CelebAMask-HQ-WO-Train_mask
487+
│ │ ├── {mask}.png
488+
│ ├── DTD
489+
│ │ ├── images
490+
│ │ │ ├── {classA}
491+
│ │ │ │ ├── {image}.jpg
492+
│ │ │ ├── {classB}
493+
│ │ │ │ ├── {image}.jpg
494+
│ ├── 11k-hands_img
495+
│ │ ├── {image}.jpg
496+
│ ├── 11k-hands_mask
497+
│ │ ├── {mask}.png
498+
│ ├── object_image_sr
499+
│ │ ├── {image}.jpg
500+
│ ├── object_mask_x4
501+
│ │ ├── {mask}.png
502+
503+
```
504+
505+
## Data Generation
506+
507+
```bash
508+
git clone https://github.com/kennyvoo/face-occlusion-generation.git
509+
cd face_occlusion-generation
510+
```
511+
512+
Example script to generate NatOcc hand dataset
513+
514+
```bash
515+
CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \
516+
--config ./configs/natocc_hand.yaml \
517+
--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/NatOcc_hand_sot"\
518+
AUGMENTATION.SOT True \
519+
SOURCE_DATASET.IMG_DIR "path/to/data_materials/CelebAMask-HQ-WO-Train_img" \
520+
SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \
521+
OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/11k-hands_img" \
522+
OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/11k-hands_masks"
523+
```
524+
525+
Example script to generate NatOcc object dataset
526+
527+
```bash
528+
CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \
529+
--config ./configs/natocc_objects.yaml \
530+
--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/NatOcc_object" \
531+
SOURCE_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_img" \
532+
SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \
533+
OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/object_image_sr" \
534+
OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/object_mask_x4"
535+
```
536+
537+
Example script to generate RandOcc dataset
538+
539+
```bash
540+
CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \
541+
--config ./configs/randocc.yaml \
542+
--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/RandOcc" \
543+
SOURCE_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_img/" \
544+
SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \
545+
OCCLUDER_DATASET.IMG_DIR "path/to/jw93/mmsegmentation/data_materials/DTD/images"
546+
```
547+
548+
**Dataset Organization:**
549+
550+
```none
551+
├── data
552+
│ ├── occlusion-aware-face-dataset
553+
│ │ ├── train.txt
554+
│ │ ├── NatOcc_hand_sot
555+
│ │ │ ├── img
556+
│ │ │ │ ├── {image}.jpg
557+
│ │ │ ├── mask
558+
│ │ │ │ ├── {mask}.png
559+
│ │ ├── NatOcc_object
560+
│ │ │ ├── img
561+
│ │ │ │ ├── {image}.jpg
562+
│ │ │ ├── mask
563+
│ │ │ │ ├── {mask}.png
564+
│ │ ├── RandOcc
565+
│ │ │ ├── img
566+
│ │ │ │ ├── {image}.jpg
567+
│ │ │ ├── mask
568+
│ │ │ │ ├── {mask}.png
569+
│ │ ├── RealOcc
570+
│ │ │ ├── img
571+
│ │ │ │ ├── {image}.jpg
572+
│ │ │ ├── mask
573+
│ │ │ │ ├── {mask}.png
574+
│ │ │ ├── split
575+
│ │ │ │ ├── val.txt
576+
```
577+
578+
<!-- #endregion -->
579+
580+
```python
581+
582+
```

mmseg/datasets/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
1010
RepeatDataset)
1111
from .drive import DRIVEDataset
12+
from .face import FaceOccludedDataset
1213
from .hrf import HRFDataset
1314
from .isaid import iSAIDDataset
1415
from .isprs import ISPRSDataset
@@ -26,5 +27,5 @@
2627
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
2728
'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset',
2829
'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset',
29-
'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset'
30+
'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'FaceOccludedDataset'
3031
]

0 commit comments

Comments
 (0)