Skip to content

Commit b9d6092

Browse files
Update README_zh.md
1 parent 0d467b1 commit b9d6092

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

README_zh.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ from funasr.utils.postprocess_utils import rich_transcription_postprocess
118118
model_dir = "iic/SenseVoiceSmall"
119119

120120

121-
model = AutoModel (
121+
model = AutoModel(
122122
model=model_dir,
123123
trust_remote_code=True,
124124
remote_code="./model.py",
@@ -128,17 +128,17 @@ model = AutoModel (
128128
)
129129

130130
# en
131-
res = model.generate (
131+
res = model.generate(
132132
input=f"{model.model_path}/example/en.mp3",
133133
cache={},
134134
language="auto", # "zh", "en", "yue", "ja", "ko", "nospeech"
135135
use_itn=True,
136136
batch_size_s=60,
137-
merge_vad=True, #
137+
merge_vad=True,
138138
merge_length_s=15,
139139
)
140-
text = rich_transcription_postprocess (res [0]["text"])
141-
print (text)
140+
text = rich_transcription_postprocess(res[0]["text"])
141+
print(text)
142142
```
143143

144144
<details><summary> 参数说明(点击展开)</summary>
@@ -159,9 +159,9 @@ print (text)
159159
如果输入均为短音频(小于 30s),并且需要批量化推理,为了加快推理效率,可以移除 vad 模型,并设置 `batch_size`
160160

161161
```python
162-
model = AutoModel (model=model_dir, trust_remote_code=True, device="cuda:0")
162+
model = AutoModel(model=model_dir, trust_remote_code=True, device="cuda:0")
163163

164-
res = model.generate (
164+
res = model.generate(
165165
input=f"{model.model_path}/example/en.mp3",
166166
cache={},
167167
language="auto", # "zh", "en", "yue", "ja", "ko", "nospeech"
@@ -181,19 +181,19 @@ from model import SenseVoiceSmall
181181
from funasr.utils.postprocess_utils import rich_transcription_postprocess
182182

183183
model_dir = "iic/SenseVoiceSmall"
184-
m, kwargs = SenseVoiceSmall.from_pretrained (model=model_dir, device="cuda:0")
185-
m.eval ()
184+
m, kwargs = SenseVoiceSmall.from_pretrained(model=model_dir, device="cuda:0")
185+
m.eval()
186186

187-
res = m.inference (
187+
res = m.inference(
188188
data_in=f"{kwargs ['model_path']}/example/en.mp3",
189189
language="auto", # "zh", "en", "yue", "ja", "ko", "nospeech"
190190
use_itn=False,
191191
ban_emo_unk=False,
192192
**kwargs,
193193
)
194194

195-
text = rich_transcription_postprocess (res [0][0]["text"])
196-
print (text)
195+
text = rich_transcription_postprocess(res [0][0]["text"])
196+
print(text)
197197
```
198198

199199
## 服务部署
@@ -215,13 +215,13 @@ from funasr_onnx.utils.postprocess_utils import rich_transcription_postprocess
215215

216216
model_dir = "iic/SenseVoiceSmall"
217217

218-
model = SenseVoiceSmall (model_dir, batch_size=10, quantize=True)
218+
model = SenseVoiceSmall(model_dir, batch_size=10, quantize=True)
219219

220220
# inference
221-
wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format (Path.home (), model_dir)]
221+
wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format(Path.home(), model_dir)]
222222

223-
res = model (wav_or_scp, language="auto", use_itn=True)
224-
print ([rich_transcription_postprocess (i) for i in res])
223+
res = model(wav_or_scp, language="auto", use_itn=True)
224+
print([rich_transcription_postprocess(i) for i in res])
225225
```
226226

227227
备注:ONNX 模型导出到原模型目录中
@@ -236,12 +236,12 @@ from funasr_torch.utils.postprocess_utils import rich_transcription_postprocess
236236

237237
model_dir = "iic/SenseVoiceSmall"
238238

239-
model = SenseVoiceSmall (model_dir, batch_size=10, device="cuda:0")
239+
model = SenseVoiceSmall(model_dir, batch_size=10, device="cuda:0")
240240

241-
wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format (Path.home (), model_dir)]
241+
wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format(Path.home(), model_dir)]
242242

243-
res = model (wav_or_scp, language="auto", use_itn=True)
244-
print ([rich_transcription_postprocess (i) for i in res])
243+
res = model(wav_or_scp, language="auto", use_itn=True)
244+
print([rich_transcription_postprocess (i) for i in res])
245245
```
246246

247247
备注:Libtorch 模型导出到原模型目录中

0 commit comments

Comments
 (0)