Skip to content

Commit 0b11d58

Browse files
authored
[Fix] fix_torchserver1.1 (open-mmlab#844)
* test_torchserver1.1 * test_torchserver1.2 * update * update mmseg_handler.py * update docs * update torchserver * tranfer torchserver to torchserve * update docs
1 parent cff01b3 commit 0b11d58

File tree

5 files changed

+84
-5
lines changed

5 files changed

+84
-5
lines changed

docs/useful_tools.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ In order to serve an `MMSegmentation` model with [`TorchServe`](https://pytorch.
304304
### 1. Convert model from MMSegmentation to TorchServe
305305

306306
```shell
307-
python tools/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \
307+
python tools/torchserve/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \
308308
--output-folder ${MODEL_STORE} \
309309
--model-name ${MODEL_NAME}
310310
```
@@ -359,3 +359,20 @@ plt.show()
359359
You should see something similar to:
360360

361361
![3dogs_mask](../resources/3dogs_mask.png)
362+
363+
And you can use `test_torchserve.py` to compare result of torchserve and pytorch, and visualize them.
364+
365+
```shell
366+
python tools/torchserve/test_torchserve.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME}
367+
[--inference-addr ${INFERENCE_ADDR}] [--result-image ${RESULT_IMAGE}] [--device ${DEVICE}]
368+
```
369+
370+
Example:
371+
372+
```shell
373+
python tools/torchserve/test_torchserve.py \
374+
demo/demo.png \
375+
configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \
376+
checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \
377+
fcn
378+
```

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,6 @@ line_length = 79
88
multi_line_output = 0
99
known_standard_library = setuptools
1010
known_first_party = mmseg
11-
known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,scipy,seaborn,torch,ts
11+
known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts
1212
no_lines_before = STDLIB,LOCALFOLDER
1313
default_section = THIRDPARTY
File renamed without changes.

tools/mmseg_handler.py renamed to tools/torchserve/mmseg_handler.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
22
import base64
3-
import io
43
import os
54

65
import cv2
76
import mmcv
87
import torch
8+
from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm
99
from ts.torch_handler.base_handler import BaseHandler
1010

1111
from mmseg.apis import inference_segmentor, init_segmentor
@@ -27,6 +27,7 @@ def initialize(self, context):
2727
self.config_file = os.path.join(model_dir, 'config.py')
2828

2929
self.model = init_segmentor(self.config_file, checkpoint, self.device)
30+
self.model = revert_sync_batchnorm(self.model)
3031
self.initialized = True
3132

3233
def preprocess(self, data):
@@ -47,8 +48,10 @@ def inference(self, data, *args, **kwargs):
4748

4849
def postprocess(self, data):
4950
output = []
51+
5052
for image_result in data:
51-
buffer = io.BytesIO()
5253
_, buffer = cv2.imencode('.png', image_result[0].astype('uint8'))
53-
output.append(buffer.tobytes())
54+
bast64_data = base64.b64encode(buffer.tobytes())
55+
bast64_str = str(bast64_data, 'utf-8')
56+
output.append(bast64_str)
5457
return output
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import base64
2+
from argparse import ArgumentParser
3+
from io import BytesIO
4+
5+
import matplotlib.pyplot as plt
6+
import mmcv
7+
import requests
8+
9+
from mmseg.apis import inference_segmentor, init_segmentor
10+
11+
12+
def parse_args():
13+
parser = ArgumentParser(
14+
description='Compare result of torchserve and pytorch,'
15+
'and visualize them.')
16+
parser.add_argument('img', help='Image file')
17+
parser.add_argument('config', help='Config file')
18+
parser.add_argument('checkpoint', help='Checkpoint file')
19+
parser.add_argument('model_name', help='The model name in the server')
20+
parser.add_argument(
21+
'--inference-addr',
22+
default='127.0.0.1:8080',
23+
help='Address and port of the inference server')
24+
parser.add_argument(
25+
'--result-image',
26+
type=str,
27+
default=None,
28+
help='save server output in result-image')
29+
parser.add_argument(
30+
'--device', default='cuda:0', help='Device used for inference')
31+
32+
args = parser.parse_args()
33+
return args
34+
35+
36+
def main(args):
37+
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
38+
with open(args.img, 'rb') as image:
39+
tmp_res = requests.post(url, image)
40+
base64_str = tmp_res.content
41+
buffer = base64.b64decode(base64_str)
42+
if args.result_image:
43+
with open(args.result_image, 'wb') as out_image:
44+
out_image.write(buffer)
45+
plt.imshow(mmcv.imread(args.result_image, 'grayscale'))
46+
plt.show()
47+
else:
48+
plt.imshow(plt.imread(BytesIO(buffer)))
49+
plt.show()
50+
model = init_segmentor(args.config, args.checkpoint, args.device)
51+
image = mmcv.imread(args.img)
52+
result = inference_segmentor(model, image)
53+
plt.imshow(result[0])
54+
plt.show()
55+
56+
57+
if __name__ == '__main__':
58+
args = parse_args()
59+
main(args)

0 commit comments

Comments
 (0)