csukuangfj
commited on
Commit
•
149d674
1
Parent(s):
ab2e12a
add korean and thai
Browse files
app.py
CHANGED
@@ -138,7 +138,12 @@ def process(language: str, repo_id: str, add_punctuation: str, in_filename: str)
|
|
138 |
recognizer = get_pretrained_model(repo_id)
|
139 |
vad = get_vad()
|
140 |
|
141 |
-
if
|
|
|
|
|
|
|
|
|
|
|
142 |
add_punctuation = "No"
|
143 |
|
144 |
if add_punctuation == "Yes":
|
|
|
138 |
recognizer = get_pretrained_model(repo_id)
|
139 |
vad = get_vad()
|
140 |
|
141 |
+
if (
|
142 |
+
"whisper" in repo_id
|
143 |
+
or "korean" in repo_id
|
144 |
+
or "vosk-model" in repo_id
|
145 |
+
or "asr-gigaspeech2-th-zipformer" in repo_id
|
146 |
+
):
|
147 |
add_punctuation = "No"
|
148 |
|
149 |
if add_punctuation == "Yes":
|
model.py
CHANGED
@@ -242,6 +242,8 @@ def get_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
|
242 |
return russian_models[repo_id](repo_id)
|
243 |
elif repo_id in korean_models:
|
244 |
return korean_models[repo_id](repo_id)
|
|
|
|
|
245 |
else:
|
246 |
raise ValueError(f"Unsupported repo_id: {repo_id}")
|
247 |
|
@@ -399,6 +401,45 @@ def _get_korean_pre_trained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer
|
|
399 |
return recognizer
|
400 |
|
401 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
chinese_dialect_models = {
|
403 |
"csukuangfj/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04": _get_chinese_dialect_models,
|
404 |
}
|
@@ -432,6 +473,10 @@ russian_models = {
|
|
432 |
"alphacep/vosk-model-small-ru": _get_russian_pre_trained_model,
|
433 |
}
|
434 |
|
|
|
|
|
|
|
|
|
435 |
language_to_models = {
|
436 |
"超多种中文方言": list(chinese_dialect_models.keys()),
|
437 |
"Chinese+English": list(chinese_english_mixed_models.keys()),
|
@@ -439,4 +484,5 @@ language_to_models = {
|
|
439 |
"English": list(english_models.keys()),
|
440 |
"Russian": list(russian_models.keys()),
|
441 |
"Korean": list(korean_models.keys()),
|
|
|
442 |
}
|
|
|
242 |
return russian_models[repo_id](repo_id)
|
243 |
elif repo_id in korean_models:
|
244 |
return korean_models[repo_id](repo_id)
|
245 |
+
elif repo_id in thai_models:
|
246 |
+
return thai_models[repo_id](repo_id)
|
247 |
else:
|
248 |
raise ValueError(f"Unsupported repo_id: {repo_id}")
|
249 |
|
|
|
401 |
return recognizer
|
402 |
|
403 |
|
404 |
+
@lru_cache(maxsize=10)
|
405 |
+
def _get_yifan_thai_pretrained_model(repo_id: str) -> sherpa_onnx.OfflineRecognizer:
|
406 |
+
assert repo_id in (
|
407 |
+
"yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20",
|
408 |
+
), repo_id
|
409 |
+
|
410 |
+
encoder_model = _get_nn_model_filename(
|
411 |
+
repo_id=repo_id,
|
412 |
+
filename="encoder-epoch-12-avg-5.int8.onnx",
|
413 |
+
subfolder="exp",
|
414 |
+
)
|
415 |
+
|
416 |
+
decoder_model = _get_nn_model_filename(
|
417 |
+
repo_id=repo_id,
|
418 |
+
filename="decoder-epoch-12-avg-5.onnx",
|
419 |
+
subfolder="exp",
|
420 |
+
)
|
421 |
+
|
422 |
+
joiner_model = _get_nn_model_filename(
|
423 |
+
repo_id=repo_id,
|
424 |
+
filename="joiner-epoch-12-avg-5.int8.onnx",
|
425 |
+
subfolder="exp",
|
426 |
+
)
|
427 |
+
|
428 |
+
tokens = _get_token_filename(repo_id=repo_id, subfolder="data/lang_bpe_2000")
|
429 |
+
|
430 |
+
recognizer = sherpa_onnx.OfflineRecognizer.from_transducer(
|
431 |
+
tokens=tokens,
|
432 |
+
encoder=encoder_model,
|
433 |
+
decoder=decoder_model,
|
434 |
+
joiner=joiner_model,
|
435 |
+
num_threads=2,
|
436 |
+
sample_rate=16000,
|
437 |
+
feature_dim=80,
|
438 |
+
)
|
439 |
+
|
440 |
+
return recognizer
|
441 |
+
|
442 |
+
|
443 |
chinese_dialect_models = {
|
444 |
"csukuangfj/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04": _get_chinese_dialect_models,
|
445 |
}
|
|
|
473 |
"alphacep/vosk-model-small-ru": _get_russian_pre_trained_model,
|
474 |
}
|
475 |
|
476 |
+
thai_models = {
|
477 |
+
"yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20": _get_yifan_thai_pretrained_model,
|
478 |
+
}
|
479 |
+
|
480 |
language_to_models = {
|
481 |
"超多种中文方言": list(chinese_dialect_models.keys()),
|
482 |
"Chinese+English": list(chinese_english_mixed_models.keys()),
|
|
|
484 |
"English": list(english_models.keys()),
|
485 |
"Russian": list(russian_models.keys()),
|
486 |
"Korean": list(korean_models.keys()),
|
487 |
+
"Thai": list(thai_models.keys()),
|
488 |
}
|