Delete tools/damo_asr
Browse files- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/.gitattributes +0 -33
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/README.md +0 -272
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/config.yaml +0 -46
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/configuration.json +0 -13
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/example/punc_example.txt +0 -3
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/fig/struct.png +0 -3
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/model.pt +0 -3
- tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/tokens.json +0 -0
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/.gitattributes +0 -32
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/README.md +0 -296
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/am.mvn +0 -8
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/config.yaml +0 -56
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/configuration.json +0 -13
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav +0 -3
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/fig/struct.png +0 -0
- tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/model.pt +0 -3
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/.gitattributes +0 -35
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md +0 -408
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/am.mvn +0 -8
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/config.yaml +0 -123
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/configuration.json +0 -14
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav +0 -3
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/struct.png +0 -0
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pt +0 -3
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/seg_dict +0 -0
- tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/tokens.json +0 -0
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/.gitattributes
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.db* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.ark* filter=lfs diff=lfs merge=lfs -text
|
30 |
-
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
31 |
-
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
32 |
-
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
33 |
-
punc.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/README.md
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
---
|
2 |
-
tasks:
|
3 |
-
- punctuation
|
4 |
-
domain:
|
5 |
-
- audio
|
6 |
-
model-type:
|
7 |
-
- Classification
|
8 |
-
frameworks:
|
9 |
-
- pytorch
|
10 |
-
metrics:
|
11 |
-
- f1_score
|
12 |
-
license: Apache License 2.0
|
13 |
-
language:
|
14 |
-
- cn
|
15 |
-
tags:
|
16 |
-
- FunASR
|
17 |
-
- CT-Transformer
|
18 |
-
- Alibaba
|
19 |
-
- ICASSP 2020
|
20 |
-
datasets:
|
21 |
-
train:
|
22 |
-
- 33M-samples online data
|
23 |
-
test:
|
24 |
-
- wikipedia data test
|
25 |
-
- 10000 industrial Mandarin sentences test
|
26 |
-
widgets:
|
27 |
-
- task: punctuation
|
28 |
-
model_revision: v2.0.4
|
29 |
-
inputs:
|
30 |
-
- type: text
|
31 |
-
name: input
|
32 |
-
title: 文本
|
33 |
-
examples:
|
34 |
-
- name: 1
|
35 |
-
title: 示例1
|
36 |
-
inputs:
|
37 |
-
- name: input
|
38 |
-
data: 我们都是木头人不会讲话不会动
|
39 |
-
inferencespec:
|
40 |
-
cpu: 1 #CPU数量
|
41 |
-
memory: 4096
|
42 |
-
---
|
43 |
-
|
44 |
-
# Controllable Time-delay Transformer模型介绍
|
45 |
-
|
46 |
-
[//]: # (Controllable Time-delay Transformer 模型是一种端到端标点分类模型。)
|
47 |
-
|
48 |
-
[//]: # (常规的Transformer会依赖很远的未来信息,导致长时间结果不固定。Controllable Time-delay Transformer 在效果无损的情况下,有效控制标点的延时。)
|
49 |
-
|
50 |
-
# Highlights
|
51 |
-
- 中文标点通用模型:可用于语音识别模型输出文本的标点预测。
|
52 |
-
- 基于[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)场景的使用
|
53 |
-
- 基于[FunASR框架](https://github.com/alibaba-damo-academy/FunASR),可进行ASR,VAD,标点的自由组合
|
54 |
-
- 基于纯文本输入的标点预测
|
55 |
-
|
56 |
-
## <strong>[FunASR开源项目介绍](https://github.com/alibaba-damo-academy/FunASR)</strong>
|
57 |
-
<strong>[FunASR](https://github.com/alibaba-damo-academy/FunASR)</strong>希望在语音识别的学术研究和工业应用之间架起一座桥梁。通过发布工业级语音识别模型的训练和微调,研究人员和开发人员可以更方便地进行语音识别模型的研究和生产,并推动语音识别生态的发展。让语音识别更有趣!
|
58 |
-
|
59 |
-
[**github仓库**](https://github.com/alibaba-damo-academy/FunASR)
|
60 |
-
| [**最新动态**](https://github.com/alibaba-damo-academy/FunASR#whats-new)
|
61 |
-
| [**环境安装**](https://github.com/alibaba-damo-academy/FunASR#installation)
|
62 |
-
| [**服务部署**](https://www.funasr.com)
|
63 |
-
| [**模型库**](https://github.com/alibaba-damo-academy/FunASR/tree/main/model_zoo)
|
64 |
-
| [**联系我们**](https://github.com/alibaba-damo-academy/FunASR#contact)
|
65 |
-
|
66 |
-
|
67 |
-
## 模型原理介绍
|
68 |
-
|
69 |
-
Controllable Time-delay Transformer是达摩院语音团队提出的高效后处理框架中的标点模块。本项目为中文通用标点模型,模型可以被应用于文本类输入的标点预测,也可应用于语音识别结果的后处理步骤,协助语音识别模块输出具有可读性的文本结果。
|
70 |
-
|
71 |
-
<p align="center">
|
72 |
-
<img src="fig/struct.png" alt="Controllable Time-delay Transformer模型结构" width="500" />
|
73 |
-
|
74 |
-
Controllable Time-delay Transformer 模型结构如上图所示,由 Embedding、Encoder 和 Predictor 三部分组成。Embedding 是词向量叠加位置向量。Encoder可以采用不同的网络结构,例如self-attention,conformer,SAN-M等。Predictor 预测每个token后的标点类型。
|
75 |
-
|
76 |
-
在模型的选择上采用了性能优越的Transformer模型。Transformer模型在获得良好性能的同时,由于模型自身序列化输入等特性,会给系统带来较大时延。常规的Transformer可以看到未来的全部信息,导致标点会依赖很远的未来信息。这会给用户带来一种标点一直在变化刷新,长时间结果不固定的不良感受。基于这一问题,我们创新性的提出了可控时延的Transformer模型(Controllable Time-Delay Transformer, CT-Transformer),在模型性能无损失的情况下,有效控制标点的延时。
|
77 |
-
|
78 |
-
更详细的细节见:
|
79 |
-
- 论文: [CONTROLLABLE TIME-DELAY TRANSFORMER FOR REAL-TIME PUNCTUATION PREDICTION AND DISFLUENCY DETECTION](https://arxiv.org/pdf/2003.01309.pdf)
|
80 |
-
|
81 |
-
## 基于ModelScope进行推理
|
82 |
-
|
83 |
-
以下为三种支持格式及api调用方式参考如下范例:
|
84 |
-
- text.scp文件路径,例如example/punc_example.txt,格式为: key + "\t" + value
|
85 |
-
```sh
|
86 |
-
cat example/punc_example.txt
|
87 |
-
1 跨境河流是养育沿岸人民的生命之源
|
88 |
-
2 从存储上来说仅仅是全景图片它就会是图片的四倍的容量
|
89 |
-
3 那今天的会就到这里吧happy new year明年见
|
90 |
-
```
|
91 |
-
```python
|
92 |
-
from modelscope.pipelines import pipeline
|
93 |
-
from modelscope.utils.constant import Tasks
|
94 |
-
|
95 |
-
inference_pipline = pipeline(
|
96 |
-
task=Tasks.punctuation,
|
97 |
-
model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
|
98 |
-
model_revision="v2.0.4")
|
99 |
-
|
100 |
-
rec_result = inference_pipline(input='example/punc_example.txt')
|
101 |
-
print(rec_result)
|
102 |
-
```
|
103 |
-
- text二进制数据,例如:用户直接从文件里读出bytes数据
|
104 |
-
```python
|
105 |
-
rec_result = inference_pipline(input='我们都是木头人不会讲话不会动')
|
106 |
-
```
|
107 |
-
- text文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt
|
108 |
-
```python
|
109 |
-
rec_result = inference_pipline(input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt')
|
110 |
-
```
|
111 |
-
|
112 |
-
|
113 |
-
## 基于FunASR进行推理
|
114 |
-
|
115 |
-
下面为快速上手教程,测试音频([中文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav),[英文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav))
|
116 |
-
|
117 |
-
### 可执行命令行
|
118 |
-
在命令行终端执行:
|
119 |
-
|
120 |
-
```shell
|
121 |
-
funasr ++model=paraformer-zh ++vad_model="fsmn-vad" ++punc_model="ct-punc" ++input=vad_example.wav
|
122 |
-
```
|
123 |
-
|
124 |
-
注:支持单条音频文件识别,也支持文件列表,列表为kaldi风格wav.scp:`wav_id wav_path`
|
125 |
-
|
126 |
-
### python示例
|
127 |
-
#### 非实时语音识别
|
128 |
-
```python
|
129 |
-
from funasr import AutoModel
|
130 |
-
# paraformer-zh is a multi-functional asr model
|
131 |
-
# use vad, punc, spk or not as you need
|
132 |
-
model = AutoModel(model="paraformer-zh", model_revision="v2.0.4",
|
133 |
-
vad_model="fsmn-vad", vad_model_revision="v2.0.4",
|
134 |
-
punc_model="ct-punc-c", punc_model_revision="v2.0.4",
|
135 |
-
# spk_model="cam++", spk_model_revision="v2.0.2",
|
136 |
-
)
|
137 |
-
res = model.generate(input=f"{model.model_path}/example/asr_example.wav",
|
138 |
-
batch_size_s=300,
|
139 |
-
hotword='魔搭')
|
140 |
-
print(res)
|
141 |
-
```
|
142 |
-
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
143 |
-
|
144 |
-
#### 实时语音识别
|
145 |
-
|
146 |
-
```python
|
147 |
-
from funasr import AutoModel
|
148 |
-
|
149 |
-
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
150 |
-
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
151 |
-
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
152 |
-
|
153 |
-
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
|
154 |
-
|
155 |
-
import soundfile
|
156 |
-
import os
|
157 |
-
|
158 |
-
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
|
159 |
-
speech, sample_rate = soundfile.read(wav_file)
|
160 |
-
chunk_stride = chunk_size[1] * 960 # 600ms
|
161 |
-
|
162 |
-
cache = {}
|
163 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
164 |
-
for i in range(total_chunk_num):
|
165 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
166 |
-
is_final = i == total_chunk_num - 1
|
167 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
|
168 |
-
print(res)
|
169 |
-
```
|
170 |
-
|
171 |
-
注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。
|
172 |
-
|
173 |
-
#### 语音端点检测(非实时)
|
174 |
-
```python
|
175 |
-
from funasr import AutoModel
|
176 |
-
|
177 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
178 |
-
|
179 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
180 |
-
res = model.generate(input=wav_file)
|
181 |
-
print(res)
|
182 |
-
```
|
183 |
-
|
184 |
-
#### 语音端点检测(实时)
|
185 |
-
```python
|
186 |
-
from funasr import AutoModel
|
187 |
-
|
188 |
-
chunk_size = 200 # ms
|
189 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
190 |
-
|
191 |
-
import soundfile
|
192 |
-
|
193 |
-
wav_file = f"{model.model_path}/example/vad_example.wav"
|
194 |
-
speech, sample_rate = soundfile.read(wav_file)
|
195 |
-
chunk_stride = int(chunk_size * sample_rate / 1000)
|
196 |
-
|
197 |
-
cache = {}
|
198 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
199 |
-
for i in range(total_chunk_num):
|
200 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
201 |
-
is_final = i == total_chunk_num - 1
|
202 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size)
|
203 |
-
if len(res[0]["value"]):
|
204 |
-
print(res)
|
205 |
-
```
|
206 |
-
|
207 |
-
#### 标点恢复
|
208 |
-
```python
|
209 |
-
from funasr import AutoModel
|
210 |
-
|
211 |
-
model = AutoModel(model="ct-punc", model_revision="v2.0.4")
|
212 |
-
|
213 |
-
res = model.generate(input="那今天的会就到这里吧 happy new year 明年见")
|
214 |
-
print(res)
|
215 |
-
```
|
216 |
-
|
217 |
-
#### 时间戳预测
|
218 |
-
```python
|
219 |
-
from funasr import AutoModel
|
220 |
-
|
221 |
-
model = AutoModel(model="fa-zh", model_revision="v2.0.4")
|
222 |
-
|
223 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
224 |
-
text_file = f"{model.model_path}/example/text.txt"
|
225 |
-
res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
|
226 |
-
print(res)
|
227 |
-
```
|
228 |
-
|
229 |
-
更多详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
230 |
-
|
231 |
-
|
232 |
-
## 微调
|
233 |
-
|
234 |
-
详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
## Benchmark
|
241 |
-
中文标点预测通用模型在自采集的通用领域业务场景数据上有良好效果。训练数据大约33M个sample,每个sample可能包含1句或多句。
|
242 |
-
|
243 |
-
### 自采集数据(20000+ samples)
|
244 |
-
|
245 |
-
| precision | recall | f1_score |
|
246 |
-
|:------------------------------------:|:-------------------------------------:|:-------------------------------------:|
|
247 |
-
| <div style="width: 150pt">53.8</div> | <div style="width: 150pt">60.0</div> | <div style="width: 150pt">56.5</div> |
|
248 |
-
|
249 |
-
## 使用方式以及适用范围
|
250 |
-
|
251 |
-
运行范围
|
252 |
-
- 支持Linux-x86_64、Mac和Windows运行。
|
253 |
-
|
254 |
-
使用方式
|
255 |
-
- 直接推理:可以直接对输入文本进行计算,输出带有标点的目标文字。
|
256 |
-
|
257 |
-
使用范围与目标场景
|
258 |
-
- 适合对文本数据进行标点预测,文本长度不限。
|
259 |
-
|
260 |
-
## 相关论文以及引用信息
|
261 |
-
|
262 |
-
```BibTeX
|
263 |
-
@inproceedings{chen2020controllable,
|
264 |
-
title={Controllable Time-Delay Transformer for Real-Time Punctuation Prediction and Disfluency Detection},
|
265 |
-
author={Chen, Qian and Chen, Mengzhe and Li, Bo and Wang, Wen},
|
266 |
-
booktitle={ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
|
267 |
-
pages={8069--8073},
|
268 |
-
year={2020},
|
269 |
-
organization={IEEE}
|
270 |
-
}
|
271 |
-
```
|
272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/config.yaml
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
model: CTTransformer
|
2 |
-
model_conf:
|
3 |
-
ignore_id: 0
|
4 |
-
embed_unit: 256
|
5 |
-
att_unit: 256
|
6 |
-
dropout_rate: 0.1
|
7 |
-
punc_list:
|
8 |
-
- <unk>
|
9 |
-
- _
|
10 |
-
- ,
|
11 |
-
- 。
|
12 |
-
- ?
|
13 |
-
- 、
|
14 |
-
punc_weight:
|
15 |
-
- 1.0
|
16 |
-
- 1.0
|
17 |
-
- 1.0
|
18 |
-
- 1.0
|
19 |
-
- 1.0
|
20 |
-
- 1.0
|
21 |
-
sentence_end_id: 3
|
22 |
-
|
23 |
-
encoder: SANMEncoder
|
24 |
-
encoder_conf:
|
25 |
-
input_size: 256
|
26 |
-
output_size: 256
|
27 |
-
attention_heads: 8
|
28 |
-
linear_units: 1024
|
29 |
-
num_blocks: 4
|
30 |
-
dropout_rate: 0.1
|
31 |
-
positional_dropout_rate: 0.1
|
32 |
-
attention_dropout_rate: 0.0
|
33 |
-
input_layer: pe
|
34 |
-
pos_enc_class: SinusoidalPositionEncoder
|
35 |
-
normalize_before: true
|
36 |
-
kernel_size: 11
|
37 |
-
sanm_shfit: 0
|
38 |
-
selfattention_layer_type: sanm
|
39 |
-
padding_idx: 0
|
40 |
-
|
41 |
-
tokenizer: CharTokenizer
|
42 |
-
tokenizer_conf:
|
43 |
-
unk_symbol: <unk>
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/configuration.json
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"framework": "pytorch",
|
3 |
-
"task" : "punctuation",
|
4 |
-
"model": {"type" : "funasr"},
|
5 |
-
"pipeline": {"type":"funasr-pipeline"},
|
6 |
-
"model_name_in_hub": {
|
7 |
-
"ms":"iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
|
8 |
-
"hf":""},
|
9 |
-
"file_path_metas": {
|
10 |
-
"init_param":"model.pt",
|
11 |
-
"config":"config.yaml",
|
12 |
-
"tokenizer_conf": {"token_list": "tokens.json"}}
|
13 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/example/punc_example.txt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
1 跨境河流是养育沿岸人民的生命之源长期以来为帮助下游地区防灾减灾中方技术人员在上游地区极为恶劣的自然条件下克服巨大困难甚至冒着生命危险向印方提供汛期水文资料处理紧急事件中方重视印方在跨境河流问题上的关切愿意进一步完善双方联合工作机制凡是中方能做的我们都会去做而且会做得更好我请印度朋友们放心中国在上游的任何开发利用都会经过科学规划和论证兼顾上下游的利益
|
2 |
-
2 从存储上来说仅仅是全景图片它就会是图片的四倍的容量然后全景的视频会是普通视频八倍的这个存储的容要求而三d的模型会是图片的十倍这都对我们今天运行在的云计算的平台存储的平台提出了更高的要求
|
3 |
-
3 那今天的会就到这里吧 happy new year 明年见
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/fig/struct.png
DELETED
Git LFS Details
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/model.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:a5818bb9d933805a916eebe41eb41648f7f9caad30b4bd59d56f3ca135421916
|
3 |
-
size 291979892
|
|
|
|
|
|
|
|
tools/damo_asr/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/tokens.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/.gitattributes
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.db* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.ark* filter=lfs diff=lfs merge=lfs -text
|
30 |
-
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
31 |
-
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
32 |
-
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/README.md
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
---
|
2 |
-
tasks:
|
3 |
-
- voice-activity-detection
|
4 |
-
domain:
|
5 |
-
- audio
|
6 |
-
model-type:
|
7 |
-
- VAD model
|
8 |
-
frameworks:
|
9 |
-
- pytorch
|
10 |
-
backbone:
|
11 |
-
- fsmn
|
12 |
-
metrics:
|
13 |
-
- f1_score
|
14 |
-
license: Apache License 2.0
|
15 |
-
language:
|
16 |
-
- cn
|
17 |
-
tags:
|
18 |
-
- FunASR
|
19 |
-
- FSMN
|
20 |
-
- Alibaba
|
21 |
-
- Online
|
22 |
-
datasets:
|
23 |
-
train:
|
24 |
-
- 20,000 hour industrial Mandarin task
|
25 |
-
test:
|
26 |
-
- 20,000 hour industrial Mandarin task
|
27 |
-
widgets:
|
28 |
-
- task: voice-activity-detection
|
29 |
-
model_revision: v2.0.4
|
30 |
-
inputs:
|
31 |
-
- type: audio
|
32 |
-
name: input
|
33 |
-
title: 音频
|
34 |
-
examples:
|
35 |
-
- name: 1
|
36 |
-
title: 示例1
|
37 |
-
inputs:
|
38 |
-
- name: input
|
39 |
-
data: git://example/vad_example.wav
|
40 |
-
inferencespec:
|
41 |
-
cpu: 1 #CPU数量
|
42 |
-
memory: 4096
|
43 |
-
---
|
44 |
-
|
45 |
-
# FSMN-Monophone VAD 模型介绍
|
46 |
-
|
47 |
-
[//]: # (FSMN-Monophone VAD 模型)
|
48 |
-
|
49 |
-
## Highlight
|
50 |
-
- 16k中文通用VAD模型:可用于检测长语音片段中有效语音的起止时间点。
|
51 |
-
- 基于[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)场景的使用
|
52 |
-
- 基于[FunASR框架](https://github.com/alibaba-damo-academy/FunASR),可进行ASR,VAD,[中文标点](https://www.modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/summary)的自由组合
|
53 |
-
- 基于音频数据的有效语音片段起止时间点检测
|
54 |
-
|
55 |
-
## <strong>[FunASR开源项目介绍](https://github.com/alibaba-damo-academy/FunASR)</strong>
|
56 |
-
<strong>[FunASR](https://github.com/alibaba-damo-academy/FunASR)</strong>希望在语音识别的学术研究和工业应用之间架起一座桥梁。通过发布工业级语音识别模型的训练和微调,研究人员和开发人员可以更方便地进行语音识别模型的研究和生产,并推动语音识别生态的发展。让语音识别更有趣!
|
57 |
-
|
58 |
-
[**github仓库**](https://github.com/alibaba-damo-academy/FunASR)
|
59 |
-
| [**最新动态**](https://github.com/alibaba-damo-academy/FunASR#whats-new)
|
60 |
-
| [**环境安装**](https://github.com/alibaba-damo-academy/FunASR#installation)
|
61 |
-
| [**服务部署**](https://www.funasr.com)
|
62 |
-
| [**模型库**](https://github.com/alibaba-damo-academy/FunASR/tree/main/model_zoo)
|
63 |
-
| [**联系我们**](https://github.com/alibaba-damo-academy/FunASR#contact)
|
64 |
-
|
65 |
-
|
66 |
-
## 模型原理介绍
|
67 |
-
|
68 |
-
FSMN-Monophone VAD是达摩院语音团队提出的高效语音端点检测模型,用于检测输入音频中有效语音的起止时间点信息,并将检测出来的有效音频片段输入识别引擎进行识别,减少无效语音带来的识别错误。
|
69 |
-
|
70 |
-
<p align="center">
|
71 |
-
<img src="fig/struct.png" alt="VAD模型结构" width="500" />
|
72 |
-
|
73 |
-
FSMN-Monophone VAD模型结构如上图所示:模型结构层面,FSMN模型结构建模时可考虑上下文信息,训练和推理速度快,且时延可控;同时根据VAD模型size以及低时延的要求,对FSMN的网络结构、右看帧数进行了适配。在建模单元层面,speech信息比较丰富,仅用单类来表征学习能力有限,我们将单一speech类升级为Monophone。建模单元细分,可以避免参数平均,抽象学习能力增强,区分性更好。
|
74 |
-
|
75 |
-
## 基于ModelScope进行推理
|
76 |
-
|
77 |
-
- 推理支持音频格式如下:
|
78 |
-
- wav文件路径,例如:data/test/audios/vad_example.wav
|
79 |
-
- wav文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav
|
80 |
-
- wav二进制数据,格式bytes,例如:用户直接从文件里读出bytes数据或者是麦克风录出bytes数据。
|
81 |
-
- 已解析的audio音频,例如:audio, rate = soundfile.read("vad_example_zh.wav"),类型为numpy.ndarray或者torch.Tensor。
|
82 |
-
- wav.scp文件,需符合如下要求:
|
83 |
-
|
84 |
-
```sh
|
85 |
-
cat wav.scp
|
86 |
-
vad_example1 data/test/audios/vad_example1.wav
|
87 |
-
vad_example2 data/test/audios/vad_example2.wav
|
88 |
-
...
|
89 |
-
```
|
90 |
-
|
91 |
-
- 若输入格式wav文件url,api调用方式可参考如下范例:
|
92 |
-
|
93 |
-
```python
|
94 |
-
from modelscope.pipelines import pipeline
|
95 |
-
from modelscope.utils.constant import Tasks
|
96 |
-
|
97 |
-
inference_pipeline = pipeline(
|
98 |
-
task=Tasks.voice_activity_detection,
|
99 |
-
model='iic/speech_fsmn_vad_zh-cn-16k-common-pytorch',
|
100 |
-
model_revision="v2.0.4",
|
101 |
-
)
|
102 |
-
|
103 |
-
segments_result = inference_pipeline(input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav')
|
104 |
-
print(segments_result)
|
105 |
-
```
|
106 |
-
|
107 |
-
- 输入音频为pcm格式,调用api时需要传入音频采样率参数fs,例如:
|
108 |
-
|
109 |
-
```python
|
110 |
-
segments_result = inference_pipeline(input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.pcm', fs=16000)
|
111 |
-
```
|
112 |
-
|
113 |
-
- 若输入格式为文件wav.scp(注:文件名需要以.scp结尾),可添加 output_dir 参数将识别结果写入文件中,参考示例如下:
|
114 |
-
|
115 |
-
```python
|
116 |
-
inference_pipeline(input="wav.scp", output_dir='./output_dir')
|
117 |
-
```
|
118 |
-
识别结果输出路径结构如下:
|
119 |
-
|
120 |
-
```sh
|
121 |
-
tree output_dir/
|
122 |
-
output_dir/
|
123 |
-
└── 1best_recog
|
124 |
-
└── text
|
125 |
-
|
126 |
-
1 directory, 1 files
|
127 |
-
```
|
128 |
-
text:VAD检测语音起止时间点结果文件(单位:ms)
|
129 |
-
|
130 |
-
- 若输入音频为已解析的audio音频,api调用方式可参考如下范例:
|
131 |
-
|
132 |
-
```python
|
133 |
-
import soundfile
|
134 |
-
|
135 |
-
waveform, sample_rate = soundfile.read("vad_example_zh.wav")
|
136 |
-
segments_result = inference_pipeline(input=waveform)
|
137 |
-
print(segments_result)
|
138 |
-
```
|
139 |
-
|
140 |
-
- VAD常用参数调整说明(参考:vad.yaml文件):
|
141 |
-
- max_end_silence_time:尾部连续检测到多长时间静音进行尾点判停,参数范围500ms~6000ms,默认值800ms(该值过低容易出现语音提前截断的情况)。
|
142 |
-
- speech_noise_thres:speech的得分减去noise的得分大于此值则判断为speech,参数范围:(-1,1)
|
143 |
-
- 取值越趋于-1,噪音被误判定为语音的概率越大,FA越高
|
144 |
-
- 取值越趋于+1,语音被误判定为噪音的概率越大,Pmiss越高
|
145 |
-
- 通常情况下,该值会根据当前模型在长语音测试集上的效果取balance
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
## 基于FunASR进行推理
|
151 |
-
|
152 |
-
下面为快速上手教程,测试音频([中文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav),[英文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav))
|
153 |
-
|
154 |
-
### 可执行命令行
|
155 |
-
在命令行终端执行:
|
156 |
-
|
157 |
-
```shell
|
158 |
-
funasr ++model=paraformer-zh ++vad_model="fsmn-vad" ++punc_model="ct-punc" ++input=vad_example.wav
|
159 |
-
```
|
160 |
-
|
161 |
-
注:支持单条音频文件识别,也支持文件列表,列表为kaldi风格wav.scp:`wav_id wav_path`
|
162 |
-
|
163 |
-
### python示例
|
164 |
-
#### 非实时语音识别
|
165 |
-
```python
|
166 |
-
from funasr import AutoModel
|
167 |
-
# paraformer-zh is a multi-functional asr model
|
168 |
-
# use vad, punc, spk or not as you need
|
169 |
-
model = AutoModel(model="paraformer-zh", model_revision="v2.0.4",
|
170 |
-
vad_model="fsmn-vad", vad_model_revision="v2.0.4",
|
171 |
-
punc_model="ct-punc-c", punc_model_revision="v2.0.4",
|
172 |
-
# spk_model="cam++", spk_model_revision="v2.0.2",
|
173 |
-
)
|
174 |
-
res = model.generate(input=f"{model.model_path}/example/asr_example.wav",
|
175 |
-
batch_size_s=300,
|
176 |
-
hotword='魔搭')
|
177 |
-
print(res)
|
178 |
-
```
|
179 |
-
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
180 |
-
|
181 |
-
#### 实时语音识别
|
182 |
-
|
183 |
-
```python
|
184 |
-
from funasr import AutoModel
|
185 |
-
|
186 |
-
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
187 |
-
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
188 |
-
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
189 |
-
|
190 |
-
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
|
191 |
-
|
192 |
-
import soundfile
|
193 |
-
import os
|
194 |
-
|
195 |
-
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
|
196 |
-
speech, sample_rate = soundfile.read(wav_file)
|
197 |
-
chunk_stride = chunk_size[1] * 960 # 600ms
|
198 |
-
|
199 |
-
cache = {}
|
200 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
201 |
-
for i in range(total_chunk_num):
|
202 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
203 |
-
is_final = i == total_chunk_num - 1
|
204 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
|
205 |
-
print(res)
|
206 |
-
```
|
207 |
-
|
208 |
-
注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。
|
209 |
-
|
210 |
-
#### 语音端点检测(非实时)
|
211 |
-
```python
|
212 |
-
from funasr import AutoModel
|
213 |
-
|
214 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
215 |
-
|
216 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
217 |
-
res = model.generate(input=wav_file)
|
218 |
-
print(res)
|
219 |
-
```
|
220 |
-
|
221 |
-
#### 语音端点检测(实时)
|
222 |
-
```python
|
223 |
-
from funasr import AutoModel
|
224 |
-
|
225 |
-
chunk_size = 200 # ms
|
226 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
227 |
-
|
228 |
-
import soundfile
|
229 |
-
|
230 |
-
wav_file = f"{model.model_path}/example/vad_example.wav"
|
231 |
-
speech, sample_rate = soundfile.read(wav_file)
|
232 |
-
chunk_stride = int(chunk_size * sample_rate / 1000)
|
233 |
-
|
234 |
-
cache = {}
|
235 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
236 |
-
for i in range(total_chunk_num):
|
237 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
238 |
-
is_final = i == total_chunk_num - 1
|
239 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size)
|
240 |
-
if len(res[0]["value"]):
|
241 |
-
print(res)
|
242 |
-
```
|
243 |
-
|
244 |
-
#### 标点恢复
|
245 |
-
```python
|
246 |
-
from funasr import AutoModel
|
247 |
-
|
248 |
-
model = AutoModel(model="ct-punc", model_revision="v2.0.4")
|
249 |
-
|
250 |
-
res = model.generate(input="那今天的会就到这里吧 happy new year 明年见")
|
251 |
-
print(res)
|
252 |
-
```
|
253 |
-
|
254 |
-
#### 时间戳预测
|
255 |
-
```python
|
256 |
-
from funasr import AutoModel
|
257 |
-
|
258 |
-
model = AutoModel(model="fa-zh", model_revision="v2.0.4")
|
259 |
-
|
260 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
261 |
-
text_file = f"{model.model_path}/example/text.txt"
|
262 |
-
res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
|
263 |
-
print(res)
|
264 |
-
```
|
265 |
-
|
266 |
-
更多详细用法��[示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
267 |
-
|
268 |
-
|
269 |
-
## 微调
|
270 |
-
|
271 |
-
详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
## 使用方式以及适用范围
|
278 |
-
|
279 |
-
运行范围
|
280 |
-
- 支持Linux-x86_64、Mac和Windows运行。
|
281 |
-
|
282 |
-
使用方式
|
283 |
-
- 直接推理:可以直接对长语音数据进行计算,有效语音片段的起止时间点信息(单位:ms)。
|
284 |
-
|
285 |
-
## 相关论文以及引用信息
|
286 |
-
|
287 |
-
```BibTeX
|
288 |
-
@inproceedings{zhang2018deep,
|
289 |
-
title={Deep-FSMN for large vocabulary continuous speech recognition},
|
290 |
-
author={Zhang, Shiliang and Lei, Ming and Yan, Zhijie and Dai, Lirong},
|
291 |
-
booktitle={2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
|
292 |
-
pages={5869--5873},
|
293 |
-
year={2018},
|
294 |
-
organization={IEEE}
|
295 |
-
}
|
296 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/am.mvn
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<Nnet>
|
2 |
-
<Splice> 400 400
|
3 |
-
[ 0 ]
|
4 |
-
<AddShift> 400 400
|
5 |
-
<LearnRateCoef> 0 [ -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 ]
|
6 |
-
<Rescale> 400 400
|
7 |
-
<LearnRateCoef> 0 [ 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 ]
|
8 |
-
</Nnet>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/config.yaml
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
frontend: WavFrontendOnline
|
2 |
-
frontend_conf:
|
3 |
-
fs: 16000
|
4 |
-
window: hamming
|
5 |
-
n_mels: 80
|
6 |
-
frame_length: 25
|
7 |
-
frame_shift: 10
|
8 |
-
dither: 0.0
|
9 |
-
lfr_m: 5
|
10 |
-
lfr_n: 1
|
11 |
-
|
12 |
-
model: FsmnVADStreaming
|
13 |
-
model_conf:
|
14 |
-
sample_rate: 16000
|
15 |
-
detect_mode: 1
|
16 |
-
snr_mode: 0
|
17 |
-
max_end_silence_time: 800
|
18 |
-
max_start_silence_time: 3000
|
19 |
-
do_start_point_detection: True
|
20 |
-
do_end_point_detection: True
|
21 |
-
window_size_ms: 200
|
22 |
-
sil_to_speech_time_thres: 150
|
23 |
-
speech_to_sil_time_thres: 150
|
24 |
-
speech_2_noise_ratio: 1.0
|
25 |
-
do_extend: 1
|
26 |
-
lookback_time_start_point: 200
|
27 |
-
lookahead_time_end_point: 100
|
28 |
-
max_single_segment_time: 60000
|
29 |
-
snr_thres: -100.0
|
30 |
-
noise_frame_num_used_for_snr: 100
|
31 |
-
decibel_thres: -100.0
|
32 |
-
speech_noise_thres: 0.6
|
33 |
-
fe_prior_thres: 0.0001
|
34 |
-
silence_pdf_num: 1
|
35 |
-
sil_pdf_ids: [0]
|
36 |
-
speech_noise_thresh_low: -0.1
|
37 |
-
speech_noise_thresh_high: 0.3
|
38 |
-
output_frame_probs: False
|
39 |
-
frame_in_ms: 10
|
40 |
-
frame_length_ms: 25
|
41 |
-
|
42 |
-
encoder: FSMN
|
43 |
-
encoder_conf:
|
44 |
-
input_dim: 400
|
45 |
-
input_affine_dim: 140
|
46 |
-
fsmn_layers: 4
|
47 |
-
linear_dim: 250
|
48 |
-
proj_dim: 128
|
49 |
-
lorder: 20
|
50 |
-
rorder: 0
|
51 |
-
lstride: 1
|
52 |
-
rstride: 0
|
53 |
-
output_affine_dim: 140
|
54 |
-
output_dim: 248
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/configuration.json
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"framework": "pytorch",
|
3 |
-
"task" : "voice-activity-detection",
|
4 |
-
"pipeline": {"type":"funasr-pipeline"},
|
5 |
-
"model": {"type" : "funasr"},
|
6 |
-
"file_path_metas": {
|
7 |
-
"init_param":"model.pt",
|
8 |
-
"config":"config.yaml",
|
9 |
-
"frontend_conf":{"cmvn_file": "am.mvn"}},
|
10 |
-
"model_name_in_hub": {
|
11 |
-
"ms":"iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
|
12 |
-
"hf":""}
|
13 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:a7431f0169ef76ef630c945a1d2c3675d8c8c2df2ae4a6b16f8a88ba1bccfbbb
|
3 |
-
size 2261722
|
|
|
|
|
|
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/fig/struct.png
DELETED
Binary file (27.9 kB)
|
|
tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/model.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b3be75be477f0780277f3bae0fe489f48718f585f3a6e45d7dd1fbb1a4255fc5
|
3 |
-
size 1721366
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/.gitattributes
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.db* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.ark* filter=lfs diff=lfs merge=lfs -text
|
30 |
-
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
31 |
-
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
32 |
-
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
33 |
-
model.pb filter=lfs diff=lfs merge=lfs -text
|
34 |
-
lm.pb filter=lfs diff=lfs merge=lfs -text
|
35 |
-
lm/lm.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
DELETED
@@ -1,408 +0,0 @@
|
|
1 |
-
---
|
2 |
-
tasks:
|
3 |
-
- auto-speech-recognition
|
4 |
-
domain:
|
5 |
-
- audio
|
6 |
-
model-type:
|
7 |
-
- Non-autoregressive
|
8 |
-
frameworks:
|
9 |
-
- pytorch
|
10 |
-
backbone:
|
11 |
-
- transformer/conformer
|
12 |
-
metrics:
|
13 |
-
- CER
|
14 |
-
license: Apache License 2.0
|
15 |
-
language:
|
16 |
-
- cn
|
17 |
-
tags:
|
18 |
-
- FunASR
|
19 |
-
- Paraformer
|
20 |
-
- Alibaba
|
21 |
-
- INTERSPEECH 2022
|
22 |
-
datasets:
|
23 |
-
train:
|
24 |
-
- 60,000 hour industrial Mandarin task
|
25 |
-
test:
|
26 |
-
- AISHELL-1 dev/test
|
27 |
-
- AISHELL-2 dev_android/dev_ios/dev_mic/test_android/test_ios/test_mic
|
28 |
-
- WentSpeech dev/test_meeting/test_net
|
29 |
-
- SpeechIO TIOBE
|
30 |
-
- 60,000 hour industrial Mandarin task
|
31 |
-
indexing:
|
32 |
-
results:
|
33 |
-
- task:
|
34 |
-
name: Automatic Speech Recognition
|
35 |
-
dataset:
|
36 |
-
name: 60,000 hour industrial Mandarin task
|
37 |
-
type: audio # optional
|
38 |
-
args: 16k sampling rate, 8404 characters # optional
|
39 |
-
metrics:
|
40 |
-
- type: CER
|
41 |
-
value: 8.53% # float
|
42 |
-
description: greedy search, withou lm, avg.
|
43 |
-
args: default
|
44 |
-
- type: RTF
|
45 |
-
value: 0.0251 # float
|
46 |
-
description: GPU inference on V100
|
47 |
-
args: batch_size=1
|
48 |
-
widgets:
|
49 |
-
- task: auto-speech-recognition
|
50 |
-
inputs:
|
51 |
-
- type: audio
|
52 |
-
name: input
|
53 |
-
title: 音频
|
54 |
-
examples:
|
55 |
-
- name: 1
|
56 |
-
title: 示例1
|
57 |
-
inputs:
|
58 |
-
- name: input
|
59 |
-
data: https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
|
60 |
-
inferencespec:
|
61 |
-
cpu: 8 #CPU数量
|
62 |
-
memory: 4096
|
63 |
-
model_revision: v2.0.4
|
64 |
-
finetune-support: True
|
65 |
-
---
|
66 |
-
|
67 |
-
|
68 |
-
# Paraformer-large模型介绍
|
69 |
-
|
70 |
-
## Highlights
|
71 |
-
- 热词版本:[Paraformer-large热词版模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary)支持热词定制功能,基于提供的热词列表进行激励增强,提升热词的召回率和准确率。
|
72 |
-
- 长音频版本:[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),集成VAD、ASR、标点与时间戳功能,可直接对时长为数小时音频进行识别,并输出带标点文字与时间戳。
|
73 |
-
|
74 |
-
## <strong>[FunASR开源项目介绍](https://github.com/alibaba-damo-academy/FunASR)</strong>
|
75 |
-
<strong>[FunASR](https://github.com/alibaba-damo-academy/FunASR)</strong>希望在语音识别的学术研究和工业应用之间架起一座桥梁。通过发布工业级语音识别模型的训练和微调,研究人员和开发人员可以更方便地进行语音识别模型的研究和生产,并推动语音识别生态的发展。让语音识别更有趣!
|
76 |
-
|
77 |
-
[**github仓库**](https://github.com/alibaba-damo-academy/FunASR)
|
78 |
-
| [**最新动态**](https://github.com/alibaba-damo-academy/FunASR#whats-new)
|
79 |
-
| [**环境安装**](https://github.com/alibaba-damo-academy/FunASR#installation)
|
80 |
-
| [**服务部署**](https://www.funasr.com)
|
81 |
-
| [**模型库**](https://github.com/alibaba-damo-academy/FunASR/tree/main/model_zoo)
|
82 |
-
| [**联系我们**](https://github.com/alibaba-damo-academy/FunASR#contact)
|
83 |
-
|
84 |
-
|
85 |
-
## 模型原理介绍
|
86 |
-
|
87 |
-
Paraformer是达摩院语音团队提出的一种高效的非自回归端到端语音识别框架。本项目为Paraformer中文通用语音识别模型,采用工业级数万小时的标注音频进行模型训练,保证了模型的通用识别效果。模型可以被应用于语音输入法、语音导航、智能会议纪要等场景。
|
88 |
-
|
89 |
-
<p align="center">
|
90 |
-
<img src="fig/struct.png" alt="Paraformer模型结构" width="500" />
|
91 |
-
|
92 |
-
|
93 |
-
Paraformer模型结构如上图所示,由 Encoder、Predictor、Sampler、Decoder 与 Loss function 五部分组成。Encoder可以采用不同的网络结构,例如self-attention,conformer,SAN-M等。Predictor 为两层FFN,预测目标文字个数以及抽取目标文字对应的声学向量。Sampler 为无可学习参数模块,依据输入的声学向量和目标向量,生产含有语义的特征向量。Decoder 结构与自回归模型类似,为双向建模(自回归为单向建模)。Loss function 部分,除了交叉熵(CE)与 MWER 区分性优化目标,还包括了 Predictor 优化目标 MAE。
|
94 |
-
|
95 |
-
|
96 |
-
其核心点主要有:
|
97 |
-
- Predictor 模块:基于 Continuous integrate-and-fire (CIF) 的 预测器 (Predictor) 来抽取目标文字对应的声学特征向量,可以更加准确的预测语音中目标文字个数。
|
98 |
-
- Sampler:通过采样,将声学特征向量与目标文字向量变换成含有语义信息的特征向量,配合双向的 Decoder 来增强模型对于上下文的建模能力。
|
99 |
-
- 基于负样本采样的 MWER 训练准则。
|
100 |
-
|
101 |
-
更详细的细节见:
|
102 |
-
- 论文: [Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition](https://arxiv.org/abs/2206.08317)
|
103 |
-
- 论文解读:[Paraformer: 高识别率、高计算效率的单轮非自回归端到端语音识别模型](https://mp.weixin.qq.com/s/xQ87isj5_wxWiQs4qUXtVw)
|
104 |
-
|
105 |
-
|
106 |
-
## 基于ModelScope进行推理
|
107 |
-
|
108 |
-
- 推理支持音频格式如下:
|
109 |
-
- wav文件路���,例如:data/test/audios/asr_example.wav
|
110 |
-
- pcm文件路径,例如:data/test/audios/asr_example.pcm
|
111 |
-
- wav文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
|
112 |
-
- wav二进制数据,格式bytes,例如:用户直接从文件里读出bytes数据或者是麦克风录出bytes数据。
|
113 |
-
- 已解析的audio音频,例如:audio, rate = soundfile.read("asr_example_zh.wav"),类型为numpy.ndarray或者torch.Tensor。
|
114 |
-
- wav.scp文件,需符合如下要求:
|
115 |
-
|
116 |
-
```sh
|
117 |
-
cat wav.scp
|
118 |
-
asr_example1 data/test/audios/asr_example1.wav
|
119 |
-
asr_example2 data/test/audios/asr_example2.wav
|
120 |
-
...
|
121 |
-
```
|
122 |
-
|
123 |
-
- 若输入格式wav文件url,api调用方式可参考如下范例:
|
124 |
-
|
125 |
-
```python
|
126 |
-
from modelscope.pipelines import pipeline
|
127 |
-
from modelscope.utils.constant import Tasks
|
128 |
-
|
129 |
-
inference_pipeline = pipeline(
|
130 |
-
task=Tasks.auto_speech_recognition,
|
131 |
-
model='iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', model_revision="v2.0.4")
|
132 |
-
|
133 |
-
rec_result = inference_pipeline(input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
|
134 |
-
print(rec_result)
|
135 |
-
```
|
136 |
-
|
137 |
-
- 输入音频为pcm格式,调用api时需要传入音频采样率参数audio_fs,例如:
|
138 |
-
|
139 |
-
```python
|
140 |
-
rec_result = inference_pipeline(input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.pcm', fs=16000)
|
141 |
-
```
|
142 |
-
|
143 |
-
- 输入音频为wav格式,api调用方式可参考如下范例:
|
144 |
-
|
145 |
-
```python
|
146 |
-
rec_result = inference_pipeline(input'asr_example_zh.wav')
|
147 |
-
```
|
148 |
-
|
149 |
-
- 若输入格式为文件wav.scp(注:文件名需要以.scp结尾),可添加 output_dir 参数将识别结果写入文件中,api调用方式可参考如下范例:
|
150 |
-
|
151 |
-
```python
|
152 |
-
inference_pipeline(input="wav.scp", output_dir='./output_dir')
|
153 |
-
```
|
154 |
-
识别结果输出路径结构如下:
|
155 |
-
|
156 |
-
```sh
|
157 |
-
tree output_dir/
|
158 |
-
output_dir/
|
159 |
-
└── 1best_recog
|
160 |
-
├── score
|
161 |
-
└── text
|
162 |
-
|
163 |
-
1 directory, 3 files
|
164 |
-
```
|
165 |
-
score:识别路径得分
|
166 |
-
|
167 |
-
text:语音识别结果文件
|
168 |
-
|
169 |
-
|
170 |
-
- 若输入音频为已解析的audio音频,api调用方式可参考如下范例:
|
171 |
-
|
172 |
-
```python
|
173 |
-
import soundfile
|
174 |
-
|
175 |
-
waveform, sample_rate = soundfile.read("asr_example_zh.wav")
|
176 |
-
rec_result = inference_pipeline(input=waveform)
|
177 |
-
```
|
178 |
-
|
179 |
-
- ASR、VAD、PUNC模型自由组合
|
180 |
-
|
181 |
-
可根据使用需求对VAD和PUNC标点模型进行自由组合,使用方式如下:
|
182 |
-
```python
|
183 |
-
inference_pipeline = pipeline(
|
184 |
-
task=Tasks.auto_speech_recognition,
|
185 |
-
model='iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', model_revision="v2.0.4",
|
186 |
-
vad_model='iic/speech_fsmn_vad_zh-cn-16k-common-pytorch', vad_model_revision="v2.0.4",
|
187 |
-
punc_model='iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', punc_model_revision="v2.0.4",
|
188 |
-
# spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
|
189 |
-
# spk_model_revision="v2.0.2",
|
190 |
-
)
|
191 |
-
```
|
192 |
-
若不使用PUNC模型,可配置punc_model="",或不传入punc_model参数,如需加入LM模型,可增加配置lm_model='damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch',并设置lm_weight和beam_size参数。
|
193 |
-
|
194 |
-
## 基于FunASR进行推理
|
195 |
-
|
196 |
-
下面为快速上手教程,测试音频([中文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav),[英文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav))
|
197 |
-
|
198 |
-
### 可执行命令行
|
199 |
-
在命令行终端执行:
|
200 |
-
|
201 |
-
```shell
|
202 |
-
funasr ++model=paraformer-zh ++vad_model="fsmn-vad" ++punc_model="ct-punc" ++input=vad_example.wav
|
203 |
-
```
|
204 |
-
|
205 |
-
注:支持单条音频文件识别,也支持文件列表,列表为kaldi风格wav.scp:`wav_id wav_path`
|
206 |
-
|
207 |
-
### python示例
|
208 |
-
#### 非实时语音识别
|
209 |
-
```python
|
210 |
-
from funasr import AutoModel
|
211 |
-
# paraformer-zh is a multi-functional asr model
|
212 |
-
# use vad, punc, spk or not as you need
|
213 |
-
model = AutoModel(model="paraformer-zh", model_revision="v2.0.4",
|
214 |
-
vad_model="fsmn-vad", vad_model_revision="v2.0.4",
|
215 |
-
punc_model="ct-punc-c", punc_model_revision="v2.0.4",
|
216 |
-
# spk_model="cam++", spk_model_revision="v2.0.2",
|
217 |
-
)
|
218 |
-
res = model.generate(input=f"{model.model_path}/example/asr_example.wav",
|
219 |
-
batch_size_s=300,
|
220 |
-
hotword='魔搭')
|
221 |
-
print(res)
|
222 |
-
```
|
223 |
-
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
224 |
-
|
225 |
-
#### 实时语音识别
|
226 |
-
|
227 |
-
```python
|
228 |
-
from funasr import AutoModel
|
229 |
-
|
230 |
-
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
231 |
-
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
232 |
-
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
233 |
-
|
234 |
-
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
|
235 |
-
|
236 |
-
import soundfile
|
237 |
-
import os
|
238 |
-
|
239 |
-
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
|
240 |
-
speech, sample_rate = soundfile.read(wav_file)
|
241 |
-
chunk_stride = chunk_size[1] * 960 # 600ms
|
242 |
-
|
243 |
-
cache = {}
|
244 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
245 |
-
for i in range(total_chunk_num):
|
246 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
247 |
-
is_final = i == total_chunk_num - 1
|
248 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
|
249 |
-
print(res)
|
250 |
-
```
|
251 |
-
|
252 |
-
注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。
|
253 |
-
|
254 |
-
#### 语音端点检测(非实时)
|
255 |
-
```python
|
256 |
-
from funasr import AutoModel
|
257 |
-
|
258 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
259 |
-
|
260 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
261 |
-
res = model.generate(input=wav_file)
|
262 |
-
print(res)
|
263 |
-
```
|
264 |
-
|
265 |
-
#### 语音端点检测(实时)
|
266 |
-
```python
|
267 |
-
from funasr import AutoModel
|
268 |
-
|
269 |
-
chunk_size = 200 # ms
|
270 |
-
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
271 |
-
|
272 |
-
import soundfile
|
273 |
-
|
274 |
-
wav_file = f"{model.model_path}/example/vad_example.wav"
|
275 |
-
speech, sample_rate = soundfile.read(wav_file)
|
276 |
-
chunk_stride = int(chunk_size * sample_rate / 1000)
|
277 |
-
|
278 |
-
cache = {}
|
279 |
-
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
280 |
-
for i in range(total_chunk_num):
|
281 |
-
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
282 |
-
is_final = i == total_chunk_num - 1
|
283 |
-
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size)
|
284 |
-
if len(res[0]["value"]):
|
285 |
-
print(res)
|
286 |
-
```
|
287 |
-
|
288 |
-
#### 标点恢复
|
289 |
-
```python
|
290 |
-
from funasr import AutoModel
|
291 |
-
|
292 |
-
model = AutoModel(model="ct-punc", model_revision="v2.0.4")
|
293 |
-
|
294 |
-
res = model.generate(input="那今天的会就到这里吧 happy new year 明年见")
|
295 |
-
print(res)
|
296 |
-
```
|
297 |
-
|
298 |
-
#### 时间戳预测
|
299 |
-
```python
|
300 |
-
from funasr import AutoModel
|
301 |
-
|
302 |
-
model = AutoModel(model="fa-zh", model_revision="v2.0.4")
|
303 |
-
|
304 |
-
wav_file = f"{model.model_path}/example/asr_example.wav"
|
305 |
-
text_file = f"{model.model_path}/example/text.txt"
|
306 |
-
res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
|
307 |
-
print(res)
|
308 |
-
```
|
309 |
-
|
310 |
-
更多详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
311 |
-
|
312 |
-
|
313 |
-
## 微调
|
314 |
-
|
315 |
-
详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
316 |
-
|
317 |
-
|
318 |
-
## Benchmark
|
319 |
-
结合大数据、大模型优化的Paraformer在一序列语音识别的benchmark上获得当前SOTA的效果,以下展示学术数据集AISHELL-1、AISHELL-2、WenetSpeech,公开评测项目SpeechIO TIOBE白盒测试场景的效果。在学术界常用的中文语音识别评测任务中,其表现远远超于目前公开发表论文中的结果,远好于单独封闭数据集上的模型。
|
320 |
-
|
321 |
-
### AISHELL-1
|
322 |
-
|
323 |
-
| AISHELL-1 test | w/o LM | w/ LM |
|
324 |
-
|:------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|
|
325 |
-
| <div style="width: 150pt">Espnet</div> | <div style="width: 150pt">4.90</div> | <div style="width: 150pt">4.70</div> |
|
326 |
-
| <div style="width: 150pt">Wenet</div> | <div style="width: 150pt">4.61</div> | <div style="width: 150pt">4.36</div> |
|
327 |
-
| <div style="width: 150pt">K2</div> | <div style="width: 150pt">-</div> | <div style="width: 150pt">4.26</div> |
|
328 |
-
| <div style="width: 150pt">Blockformer</div> | <div style="width: 150pt">4.29</div> | <div style="width: 150pt">4.05</div> |
|
329 |
-
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 150pt">1.95</div> | <div style="width: 150pt">1.68</div> |
|
330 |
-
|
331 |
-
### AISHELL-2
|
332 |
-
|
333 |
-
| | dev_ios| test_android| test_ios|test_mic|
|
334 |
-
|:-------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|:------------------------------------:|:------------------------------------:|
|
335 |
-
| <div style="width: 150pt">Espnet</div> | <div style="width: 70pt">5.40</div> |<div style="width: 70pt">6.10</div> |<div style="width: 70pt">5.70</div> |<div style="width: 70pt">6.10</div> |
|
336 |
-
| <div style="width: 150pt">WeNet</div> | <div style="width: 70pt">-</div> |<div style="width: 70pt">-</div> |<div style="width: 70pt">5.39</div> |<div style="width: 70pt">-</div> |
|
337 |
-
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 70pt">2.80</div> |<div style="width: 70pt">3.13</div> |<div style="width: 70pt">2.85</div> |<div style="width: 70pt">3.06</div> |
|
338 |
-
|
339 |
-
|
340 |
-
### Wenetspeech
|
341 |
-
|
342 |
-
| | dev| test_meeting| test_net|
|
343 |
-
|:-------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|:------------------------------------:|
|
344 |
-
| <div style="width: 150pt">Espnet</div> | <div style="width: 100pt">9.70</div> |<div style="width: 100pt">15.90</div> |<div style="width: 100pt">8.80</div> |
|
345 |
-
| <div style="width: 150pt">WeNet</div> | <div style="width: 100pt">8.60</div> |<div style="width: 100pt">17.34</div> |<div style="width: 100pt">9.26</div> |
|
346 |
-
| <div style="width: 150pt">K2</div> | <div style="width: 100pt">7.76</div> |<div style="width: 100pt">13.41</div> |<div style="width: 100pt">8.71</div> |
|
347 |
-
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 100pt">3.57</div> |<div style="width: 100pt">6.97</div> |<div style="width: 100pt">6.74</div> |
|
348 |
-
|
349 |
-
### SpeechIO TIOBE
|
350 |
-
|
351 |
-
Paraformer-large模型结合Transformer-LM模型做shallow fusion,在公开评测项目SpeechIO TIOBE白盒测试场景上获得当前SOTA的效果,目前[Transformer-LM模型](https://modelscope.cn/models/damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/summary)已在ModelScope上开源,以下展示SpeechIO TIOBE白盒测试场景without LM、with Transformer-LM的效果:
|
352 |
-
|
353 |
-
- Decode config w/o LM:
|
354 |
-
- Decode without LM
|
355 |
-
- Beam size: 1
|
356 |
-
- Decode config w/ LM:
|
357 |
-
- Decode with [Transformer-LM](https://modelscope.cn/models/damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/summary)
|
358 |
-
- Beam size: 10
|
359 |
-
- LM weight: 0.15
|
360 |
-
|
361 |
-
| testset | w/o LM | w/ LM |
|
362 |
-
|:------------------:|:----:|:----:|
|
363 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00001</div>| <div style="width: 150pt">0.49</div> | <div style="width: 150pt">0.35</div> |
|
364 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00002</div>| <div style="width: 150pt">3.23</div> | <div style="width: 150pt">2.86</div> |
|
365 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00003</div>| <div style="width: 150pt">1.13</div> | <div style="width: 150pt">0.80</div> |
|
366 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00004</div>| <div style="width: 150pt">1.33</div> | <div style="width: 150pt">1.10</div> |
|
367 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00005</div>| <div style="width: 150pt">1.41</div> | <div style="width: 150pt">1.18</div> |
|
368 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00006</div>| <div style="width: 150pt">5.25</div> | <div style="width: 150pt">4.85</div> |
|
369 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00007</div>| <div style="width: 150pt">5.51</div> | <div style="width: 150pt">4.97</div> |
|
370 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00008</div>| <div style="width: 150pt">3.69</div> | <div style="width: 150pt">3.18</div> |
|
371 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH00009</div>| <div style="width: 150pt">3.02</div> | <div style="width: 150pt">2.78</div> |
|
372 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000010</div>| <div style="width: 150pt">3.35</div> | <div style="width: 150pt">2.99</div> |
|
373 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000011</div>| <div style="width: 150pt">1.54</div> | <div style="width: 150pt">1.25</div> |
|
374 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000012</div>| <div style="width: 150pt">2.06</div> | <div style="width: 150pt">1.68</div> |
|
375 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000013</div>| <div style="width: 150pt">2.57</div> | <div style="width: 150pt">2.25</div> |
|
376 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000014</div>| <div style="width: 150pt">3.86</div> | <div style="width: 150pt">3.08</div> |
|
377 |
-
|<div style="width: 200pt">SPEECHIO_ASR_ZH000015</div>| <div style="width: 150pt">3.34</div> | <div style="width: 150pt">2.67</div> |
|
378 |
-
|
379 |
-
|
380 |
-
## 使用方式以及适用范围
|
381 |
-
|
382 |
-
运行范围
|
383 |
-
- 支持Linux-x86_64、Mac和Windows运行。
|
384 |
-
|
385 |
-
使用方式
|
386 |
-
- 直接推理:可以直接对输入音频进行解码,输出目标文字。
|
387 |
-
- 微调:加载训练好的模型,采用私有或者开源数据进行模型训练。
|
388 |
-
|
389 |
-
使用范围与目标场景
|
390 |
-
- 适合与离线语音识别场景,如录音文件转写,配合GPU推理效果更加,推荐输入语音时长在20s以下,若想解码长音频,推荐使用[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),集成VAD、ASR、标点与时间戳功能,可直接对时长为数小时音频进行识别,并输出带标点文字与时间戳。
|
391 |
-
|
392 |
-
|
393 |
-
## 模型局限性以及可能的偏差
|
394 |
-
|
395 |
-
考虑到特征提取流程和工具以及训练工具差异,会对CER的数据带来一定的差异(<0.1%),推理GPU环境差异导致的RTF数值差异。
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
## 相关论文以及引用信息
|
400 |
-
|
401 |
-
```BibTeX
|
402 |
-
@inproceedings{gao2022paraformer,
|
403 |
-
title={Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition},
|
404 |
-
author={Gao, Zhifu and Zhang, Shiliang and McLoughlin, Ian and Yan, Zhijie},
|
405 |
-
booktitle={INTERSPEECH},
|
406 |
-
year={2022}
|
407 |
-
}
|
408 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/am.mvn
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<Nnet>
|
2 |
-
<Splice> 560 560
|
3 |
-
[ 0 ]
|
4 |
-
<AddShift> 560 560
|
5 |
-
<LearnRateCoef> 0 [ -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 ]
|
6 |
-
<Rescale> 560 560
|
7 |
-
<LearnRateCoef> 0 [ 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 ]
|
8 |
-
</Nnet>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/config.yaml
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
|
2 |
-
# network architecture
|
3 |
-
model: Paraformer
|
4 |
-
model_conf:
|
5 |
-
ctc_weight: 0.0
|
6 |
-
lsm_weight: 0.1
|
7 |
-
length_normalized_loss: true
|
8 |
-
predictor_weight: 1.0
|
9 |
-
predictor_bias: 1
|
10 |
-
sampling_ratio: 0.75
|
11 |
-
|
12 |
-
# encoder
|
13 |
-
encoder: SANMEncoder
|
14 |
-
encoder_conf:
|
15 |
-
output_size: 512
|
16 |
-
attention_heads: 4
|
17 |
-
linear_units: 2048
|
18 |
-
num_blocks: 50
|
19 |
-
dropout_rate: 0.1
|
20 |
-
positional_dropout_rate: 0.1
|
21 |
-
attention_dropout_rate: 0.1
|
22 |
-
input_layer: pe
|
23 |
-
pos_enc_class: SinusoidalPositionEncoder
|
24 |
-
normalize_before: true
|
25 |
-
kernel_size: 11
|
26 |
-
sanm_shfit: 0
|
27 |
-
selfattention_layer_type: sanm
|
28 |
-
|
29 |
-
# decoder
|
30 |
-
decoder: ParaformerSANMDecoder
|
31 |
-
decoder_conf:
|
32 |
-
attention_heads: 4
|
33 |
-
linear_units: 2048
|
34 |
-
num_blocks: 16
|
35 |
-
dropout_rate: 0.1
|
36 |
-
positional_dropout_rate: 0.1
|
37 |
-
self_attention_dropout_rate: 0.1
|
38 |
-
src_attention_dropout_rate: 0.1
|
39 |
-
att_layer_num: 16
|
40 |
-
kernel_size: 11
|
41 |
-
sanm_shfit: 0
|
42 |
-
|
43 |
-
predictor: CifPredictorV2
|
44 |
-
predictor_conf:
|
45 |
-
idim: 512
|
46 |
-
threshold: 1.0
|
47 |
-
l_order: 1
|
48 |
-
r_order: 1
|
49 |
-
tail_threshold: 0.45
|
50 |
-
|
51 |
-
# frontend related
|
52 |
-
frontend: WavFrontend
|
53 |
-
frontend_conf:
|
54 |
-
fs: 16000
|
55 |
-
window: hamming
|
56 |
-
n_mels: 80
|
57 |
-
frame_length: 25
|
58 |
-
frame_shift: 10
|
59 |
-
lfr_m: 7
|
60 |
-
lfr_n: 6
|
61 |
-
|
62 |
-
specaug: SpecAugLFR
|
63 |
-
specaug_conf:
|
64 |
-
apply_time_warp: false
|
65 |
-
time_warp_window: 5
|
66 |
-
time_warp_mode: bicubic
|
67 |
-
apply_freq_mask: true
|
68 |
-
freq_mask_width_range:
|
69 |
-
- 0
|
70 |
-
- 30
|
71 |
-
lfr_rate: 6
|
72 |
-
num_freq_mask: 1
|
73 |
-
apply_time_mask: true
|
74 |
-
time_mask_width_range:
|
75 |
-
- 0
|
76 |
-
- 12
|
77 |
-
num_time_mask: 1
|
78 |
-
|
79 |
-
train_conf:
|
80 |
-
accum_grad: 1
|
81 |
-
grad_clip: 5
|
82 |
-
max_epoch: 150
|
83 |
-
val_scheduler_criterion:
|
84 |
-
- valid
|
85 |
-
- acc
|
86 |
-
best_model_criterion:
|
87 |
-
- - valid
|
88 |
-
- acc
|
89 |
-
- max
|
90 |
-
keep_nbest_models: 10
|
91 |
-
log_interval: 50
|
92 |
-
|
93 |
-
optim: adam
|
94 |
-
optim_conf:
|
95 |
-
lr: 0.0005
|
96 |
-
scheduler: warmuplr
|
97 |
-
scheduler_conf:
|
98 |
-
warmup_steps: 30000
|
99 |
-
|
100 |
-
dataset: AudioDataset
|
101 |
-
dataset_conf:
|
102 |
-
index_ds: IndexDSJsonl
|
103 |
-
batch_sampler: DynamicBatchLocalShuffleSampler
|
104 |
-
batch_type: example # example or length
|
105 |
-
batch_size: 1 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
|
106 |
-
max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
|
107 |
-
buffer_size: 500
|
108 |
-
shuffle: True
|
109 |
-
num_workers: 0
|
110 |
-
|
111 |
-
tokenizer: CharTokenizer
|
112 |
-
tokenizer_conf:
|
113 |
-
unk_symbol: <unk>
|
114 |
-
split_with_space: true
|
115 |
-
|
116 |
-
|
117 |
-
input_size: 560
|
118 |
-
ctc_conf:
|
119 |
-
dropout_rate: 0.0
|
120 |
-
ctc_type: builtin
|
121 |
-
reduce: true
|
122 |
-
ignore_nan_grad: true
|
123 |
-
normalize: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/configuration.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"framework": "pytorch",
|
3 |
-
"task" : "auto-speech-recognition",
|
4 |
-
"model": {"type" : "funasr"},
|
5 |
-
"pipeline": {"type":"funasr-pipeline"},
|
6 |
-
"model_name_in_hub": {
|
7 |
-
"ms":"iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
|
8 |
-
"hf":""},
|
9 |
-
"file_path_metas": {
|
10 |
-
"init_param":"model.pt",
|
11 |
-
"config":"config.yaml",
|
12 |
-
"tokenizer_conf": {"token_list": "tokens.json", "seg_dict_file": "seg_dict"},
|
13 |
-
"frontend_conf":{"cmvn_file": "am.mvn"}}
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:a1bd32dc78493c123f9625a66deee562aed2895f53fbc39f2cca3be7e6f4f20f
|
3 |
-
size 177572
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/struct.png
DELETED
Binary file (49.9 kB)
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5bba782a5e9196166233b9ab12ba04cadff9ef9212b4ff6153ed9290ff679025
|
3 |
-
size 880502012
|
|
|
|
|
|
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/seg_dict
DELETED
The diff for this file is too large to render.
See raw diff
|
|
tools/damo_asr/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/tokens.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|