Skip to content

Commit a02f4c6

Browse files
authored
[Project] Add pp_mobileseg onnx inference demo (open-mmlab#3268)
## Motivation Add a model deployment example. ## Modification Add an inference script and update the README. ## BC-breaking (Optional) None ## Use cases (Optional) In README.
1 parent aa75916 commit a02f4c6

File tree

2 files changed

+260
-0
lines changed

2 files changed

+260
-0
lines changed

projects/pp_mobileseg/README.md

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,63 @@ Same as other models in MMsegmentation, you can run the following command to tes
4343
./tools/dist_test.sh projects/pp_mobileseg/configs/pp_mobileseg/pp_mobileseg_mobilenetv3_2x16_80k_ade20k_512x512_base.py checkpoints/pp_mobileseg_mobilenetv3_2xb16_3rdparty-base_512x512-ade20k-f12b44f3.pth 8
4444
```
4545

46+
## Inference with ONNXRuntime
47+
48+
### Prerequisites
49+
50+
**1. Install onnxruntime inference engine.**
51+
52+
Choose one of the following ways to install onnxruntime.
53+
54+
- CPU version
55+
56+
```shell
57+
pip install onnxruntime==1.15.1
58+
wget https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-linux-x64-1.15.1.tgz
59+
tar -zxvf onnxruntime-linux-x64-1.15.1.tgz
60+
export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.15.1
61+
export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH
62+
```
63+
64+
**2. Convert model to onnx file**
65+
66+
- Install `mim` and `mmdeploy`.
67+
68+
```shell
69+
pip install openmim
70+
mim install mmdeploy
71+
git clone https://github.com/open-mmlab/mmdeploy.git
72+
```
73+
74+
- Download pp_mobileseg model.
75+
76+
```shell
77+
wget https://download.openmmlab.com/mmsegmentation/v0.5/pp_mobileseg/pp_mobileseg_mobilenetv3_2xb16_3rdparty-tiny_512x512-ade20k-a351ebf5.pth
78+
```
79+
80+
- Convert model to onnx files.
81+
82+
```shell
83+
python mmdeploy/tools/deploy.py mmdeploy/configs/mmseg/segmentation_onnxruntime_dynamic.py \
84+
configs/pp_mobileseg/pp_mobileseg_mobilenetv3_2x16_80k_ade20k_512x512_tiny.py \
85+
pp_mobileseg_mobilenetv3_2xb16_3rdparty-tiny_512x512-ade20k-a351ebf5.pth \
86+
../../demo/demo.png \
87+
--work-dir mmdeploy_model/mmseg/ort \
88+
--show
89+
```
90+
91+
**3. Run demo**
92+
93+
```shell
94+
python inference_onnx.py ${ONNX_FILE_PATH} ${IMAGE_PATH} [${MODEL_INPUT_SIZE} ${DEVICE} ${OUTPUT_IMAGE_PATH}]
95+
```
96+
97+
Example:
98+
99+
```shell
100+
python inference_onnx.py mmdeploy_model/mmseg/ort/end2end.onnx ../../demo/demo.png
101+
```
102+
46103
## Citation
47104

48105
If you find our project useful in your research, please consider citing:
Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
import argparse
2+
import time
3+
from typing import List, Tuple
4+
5+
import cv2
6+
import loguru
7+
import numpy as np
8+
import onnxruntime as ort
9+
10+
logger = loguru.logger
11+
12+
13+
def parse_args():
14+
parser = argparse.ArgumentParser(
15+
description='PP_Mobileseg ONNX inference demo.')
16+
parser.add_argument('onnx_file', help='ONNX file path')
17+
parser.add_argument('image_file', help='Input image file path')
18+
parser.add_argument(
19+
'--input-size',
20+
type=int,
21+
nargs='+',
22+
default=[512, 512],
23+
help='input image size')
24+
parser.add_argument(
25+
'--device', help='device type for inference', default='cpu')
26+
parser.add_argument(
27+
'--save-path',
28+
help='path to save the output image',
29+
default='output.jpg')
30+
args = parser.parse_args()
31+
return args
32+
33+
34+
def preprocess(
35+
img: np.ndarray, input_size: Tuple[int, int] = (512, 512)
36+
) -> Tuple[np.ndarray, np.ndarray]:
37+
"""Preprocess image for inference."""
38+
img_shape = img.shape[:2]
39+
# Resize
40+
resized_img = cv2.resize(img, input_size)
41+
42+
# Normalize
43+
mean = np.array([123.575, 116.28, 103.53], dtype=np.float32)
44+
std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
45+
resized_img = (resized_img - mean) / std
46+
47+
return resized_img, img_shape
48+
49+
50+
def build_session(onnx_file: str, device: str = 'cpu') -> ort.InferenceSession:
51+
"""Build onnxruntime session.
52+
53+
Args:
54+
onnx_file (str): ONNX file path.
55+
device (str): Device type for inference.
56+
57+
Returns:
58+
sess (ort.InferenceSession): ONNXRuntime session.
59+
"""
60+
providers = ['CPUExecutionProvider'
61+
] if device == 'cpu' else ['CUDAExecutionProvider']
62+
sess = ort.InferenceSession(path_or_bytes=onnx_file, providers=providers)
63+
64+
return sess
65+
66+
67+
def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
68+
"""Inference RTMPose model.
69+
70+
Args:
71+
sess (ort.InferenceSession): ONNXRuntime session.
72+
img (np.ndarray): Input image in shape.
73+
74+
Returns:
75+
outputs (np.ndarray): Output of RTMPose model.
76+
"""
77+
# build input
78+
input_img = [img.transpose(2, 0, 1).astype(np.float32)]
79+
80+
# build output
81+
sess_input = {sess.get_inputs()[0].name: input_img}
82+
sess_output = []
83+
for out in sess.get_outputs():
84+
sess_output.append(out.name)
85+
86+
# inference
87+
outputs = sess.run(output_names=sess_output, input_feed=sess_input)
88+
89+
return outputs
90+
91+
92+
def postprocess(outputs: List[np.ndarray],
93+
origin_shape: Tuple[int, int]) -> np.ndarray:
94+
"""Postprocess outputs of PP_Mobileseg model.
95+
96+
Args:
97+
outputs (List[np.ndarray]): Outputs of PP_Mobileseg model.
98+
origin_shape (Tuple[int, int]): Input size of PP_Mobileseg model.
99+
100+
Returns:
101+
seg_map (np.ndarray): Segmentation map.
102+
"""
103+
seg_map = outputs[0][0][0]
104+
seg_map = cv2.resize(seg_map.astype(np.float32), origin_shape)
105+
return seg_map
106+
107+
108+
def visualize(img: np.ndarray,
109+
seg_map: np.ndarray,
110+
filename: str = 'output.jpg',
111+
opacity: float = 0.8) -> np.ndarray:
112+
assert 0.0 <= opacity <= 1.0, 'opacity should be in range [0, 1]'
113+
palette = np.array(PALETTE)
114+
color_seg = np.zeros((seg_map.shape[0], seg_map.shape[1], 3),
115+
dtype=np.uint8)
116+
for label, color in enumerate(palette):
117+
color_seg[seg_map == label, :] = color
118+
# convert to BGR
119+
color_seg = color_seg[..., ::-1]
120+
121+
img = img * (1 - opacity) + color_seg * opacity
122+
cv2.imwrite(filename, img)
123+
124+
return img
125+
126+
127+
def main():
128+
args = parse_args()
129+
logger.info('Start running model inference...')
130+
131+
# read image from file
132+
logger.info(f'1. Read image from file {args.image_file}...')
133+
img = cv2.imread(args.image_file)
134+
135+
# build onnx model
136+
logger.info(f'2. Build onnx model from {args.onnx_file}...')
137+
sess = build_session(args.onnx_file, args.device)
138+
139+
# preprocess
140+
logger.info('3. Preprocess image...')
141+
model_input_size = tuple(args.input_size)
142+
assert len(model_input_size) == 2
143+
resized_img, origin_shape = preprocess(img, model_input_size)
144+
145+
# inference
146+
logger.info('4. Inference...')
147+
start = time.time()
148+
outputs = inference(sess, resized_img)
149+
logger.info(f'Inference time: {time.time() - start:.4f}s')
150+
151+
# postprocess
152+
logger.info('5. Postprocess...')
153+
h, w = origin_shape
154+
seg_map = postprocess(outputs, (w, h))
155+
156+
# visualize
157+
logger.info('6. Visualize...')
158+
visualize(img, seg_map, args.save_path)
159+
160+
logger.info('Done...')
161+
162+
163+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
164+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
165+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
166+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
167+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
168+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
169+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
170+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
171+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
172+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
173+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
174+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
175+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
176+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
177+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
178+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
179+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
180+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
181+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
182+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
183+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
184+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
185+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
186+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
187+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
188+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
189+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
190+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
191+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
192+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
193+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
194+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
195+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
196+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
197+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
198+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
199+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
200+
[102, 255, 0], [92, 0, 255]]
201+
202+
if __name__ == '__main__':
203+
main()

0 commit comments

Comments
 (0)