@@ -118,7 +118,7 @@ from funasr.utils.postprocess_utils import rich_transcription_postprocess
118
118
model_dir = " iic/SenseVoiceSmall"
119
119
120
120
121
- model = AutoModel (
121
+ model = AutoModel(
122
122
model = model_dir,
123
123
trust_remote_code = True ,
124
124
remote_code = " ./model.py" ,
@@ -128,17 +128,17 @@ model = AutoModel (
128
128
)
129
129
130
130
# en
131
- res = model.generate (
131
+ res = model.generate(
132
132
input = f " { model.model_path} /example/en.mp3 " ,
133
133
cache = {},
134
134
language = " auto" , # "zh", "en", "yue", "ja", "ko", "nospeech"
135
135
use_itn = True ,
136
136
batch_size_s = 60 ,
137
- merge_vad = True , #
137
+ merge_vad = True ,
138
138
merge_length_s = 15 ,
139
139
)
140
- text = rich_transcription_postprocess (res [0 ][" text" ])
141
- print (text)
140
+ text = rich_transcription_postprocess(res[0 ][" text" ])
141
+ print (text)
142
142
```
143
143
144
144
<details ><summary > 参数说明(点击展开)</summary >
@@ -159,9 +159,9 @@ print (text)
159
159
如果输入均为短音频(小于 30s),并且需要批量化推理,为了加快推理效率,可以移除 vad 模型,并设置 ` batch_size `
160
160
161
161
``` python
162
- model = AutoModel (model = model_dir, trust_remote_code = True , device = " cuda:0" )
162
+ model = AutoModel(model = model_dir, trust_remote_code = True , device = " cuda:0" )
163
163
164
- res = model.generate (
164
+ res = model.generate(
165
165
input = f " { model.model_path} /example/en.mp3 " ,
166
166
cache = {},
167
167
language = " auto" , # "zh", "en", "yue", "ja", "ko", "nospeech"
@@ -181,19 +181,19 @@ from model import SenseVoiceSmall
181
181
from funasr.utils.postprocess_utils import rich_transcription_postprocess
182
182
183
183
model_dir = " iic/SenseVoiceSmall"
184
- m, kwargs = SenseVoiceSmall.from_pretrained (model = model_dir, device = " cuda:0" )
185
- m.eval ()
184
+ m, kwargs = SenseVoiceSmall.from_pretrained(model = model_dir, device = " cuda:0" )
185
+ m.eval()
186
186
187
- res = m.inference (
187
+ res = m.inference(
188
188
data_in = f " { kwargs [' model_path' ]} /example/en.mp3 " ,
189
189
language = " auto" , # "zh", "en", "yue", "ja", "ko", "nospeech"
190
190
use_itn = False ,
191
191
ban_emo_unk = False ,
192
192
** kwargs,
193
193
)
194
194
195
- text = rich_transcription_postprocess (res [0 ][0 ][" text" ])
196
- print (text)
195
+ text = rich_transcription_postprocess(res [0 ][0 ][" text" ])
196
+ print (text)
197
197
```
198
198
199
199
## 服务部署
@@ -215,13 +215,13 @@ from funasr_onnx.utils.postprocess_utils import rich_transcription_postprocess
215
215
216
216
model_dir = " iic/SenseVoiceSmall"
217
217
218
- model = SenseVoiceSmall (model_dir, batch_size = 10 , quantize = True )
218
+ model = SenseVoiceSmall(model_dir, batch_size = 10 , quantize = True )
219
219
220
220
# inference
221
- wav_or_scp = [" {} /.cache/modelscope/hub/{} /example/en.mp3" .format (Path.home (), model_dir)]
221
+ wav_or_scp = [" {} /.cache/modelscope/hub/{} /example/en.mp3" .format(Path.home(), model_dir)]
222
222
223
- res = model (wav_or_scp, language = " auto" , use_itn = True )
224
- print ([rich_transcription_postprocess (i) for i in res])
223
+ res = model(wav_or_scp, language = " auto" , use_itn = True )
224
+ print ([rich_transcription_postprocess(i) for i in res])
225
225
```
226
226
227
227
备注:ONNX 模型导出到原模型目录中
@@ -236,12 +236,12 @@ from funasr_torch.utils.postprocess_utils import rich_transcription_postprocess
236
236
237
237
model_dir = " iic/SenseVoiceSmall"
238
238
239
- model = SenseVoiceSmall (model_dir, batch_size = 10 , device = " cuda:0" )
239
+ model = SenseVoiceSmall(model_dir, batch_size = 10 , device = " cuda:0" )
240
240
241
- wav_or_scp = [" {} /.cache/modelscope/hub/{} /example/en.mp3" .format (Path.home (), model_dir)]
241
+ wav_or_scp = [" {} /.cache/modelscope/hub/{} /example/en.mp3" .format(Path.home(), model_dir)]
242
242
243
- res = model (wav_or_scp, language = " auto" , use_itn = True )
244
- print ([rich_transcription_postprocess (i) for i in res])
243
+ res = model(wav_or_scp, language = " auto" , use_itn = True )
244
+ print ([rich_transcription_postprocess (i) for i in res])
245
245
```
246
246
247
247
备注:Libtorch 模型导出到原模型目录中
0 commit comments