|
|
|
import os |
|
import re |
|
from pathlib import Path |
|
from typing import List |
|
|
|
BASE_URL = "https://huggingface.co/csukuangfj/sherpa-onnx-apk/resolve/main/" |
|
|
|
from dataclasses import dataclass |
|
|
|
|
|
@dataclass |
|
class APK: |
|
major: int |
|
minor: int |
|
patch: int |
|
arch: str |
|
short_name: str |
|
|
|
def __init__(self, s): |
|
|
|
|
|
s = str(s) |
|
s = s.split("/")[-1] |
|
split = s.split("-") |
|
self.major, self.minor, self.patch = list(map(int, split[2].split("."))) |
|
self.arch = split[3] |
|
self.lang = split[5] |
|
self.short_name = split[6] |
|
if "arm" in s: |
|
self.arch += "-" + split[4] |
|
self.lang = split[6] |
|
self.short_name = split[7] |
|
|
|
if "armeabi" in self.arch: |
|
self.arch = "y" + self.arch |
|
|
|
if "arm64" in self.arch: |
|
self.arch = "z" + self.arch |
|
|
|
if "small" in self.short_name: |
|
self.short_name = "zzz" + self.short_name |
|
|
|
|
|
def sort_by_apk(x): |
|
x = APK(x) |
|
return (x.major, x.minor, x.patch, x.arch, x.lang, x.short_name) |
|
|
|
|
|
def get_all_files(d_list: List[str], suffix: str) -> List[str]: |
|
if isinstance(d_list, str): |
|
d_list = [d_list] |
|
min_major = 1 |
|
min_minor = 9 |
|
min_patch = 10 |
|
|
|
ss = [] |
|
for d in d_list: |
|
for root, _, files in os.walk(d): |
|
for f in files: |
|
if f.endswith(suffix): |
|
major, minor, patch = list(map(int, f.split("-")[2].split("."))) |
|
if major >= min_major and minor >= min_minor and patch >= min_patch: |
|
ss.append(os.path.join(root, f)) |
|
|
|
ans = sorted(ss, key=sort_by_apk, reverse=True) |
|
|
|
return list(map(lambda x: BASE_URL + str(x), ans)) |
|
|
|
|
|
def to_file(filename: str, files: List[str]): |
|
content = r""" |
|
<h1> APKs for VAD + non-streaming speech recognition </h1> |
|
This page lists the <strong>VAD + non-streaming speech recognition</strong> APKs for <a href="http://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>, |
|
one of the deployment frameworks of <a href="https://github.com/k2-fsa">the Next-gen Kaldi project</a>. |
|
<br/> |
|
The name of an APK has the following rule: |
|
<ul> |
|
<li> sherpa-onnx-{version}-{arch}-vad_asr-{lang}-{model}.apk |
|
</ul> |
|
where |
|
<ul> |
|
<li> version: It specifies the current version, e.g., 1.9.23 |
|
<li> arch: The architecture targeted by this APK, e.g., arm64-v8a, armeabi-v7a, x86_64, x86 |
|
<li> lang: The lang of the model used in the APK, e.g., en for English, zh for Chinese |
|
<li> model: The name of the model used in the APK |
|
</ul> |
|
|
|
<br/> |
|
|
|
You can download all supported models from |
|
<a href="https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models">https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models</a> |
|
|
|
<br/> |
|
<br/> |
|
|
|
<strong>Note about the license</strong> The code of Next-gen Kaldi is using |
|
<a href="https://www.apache.org/licenses/LICENSE-2.0">Apache-2.0 license</a>. However, |
|
we support models from different frameworks. Please check the license of your selected model. |
|
|
|
<br/> |
|
<br/> |
|
|
|
<!-- |
|
see https://www.tablesgenerator.com/html_tables# |
|
--> |
|
|
|
<style type="text/css"> |
|
.tg {border-collapse:collapse;border-spacing:0;} |
|
.tg td{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px; |
|
overflow:hidden;padding:10px 5px;word-break:normal;} |
|
.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px; |
|
font-weight:normal;overflow:hidden;padding:10px 5px;word-break:normal;} |
|
.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:top} |
|
.tg .tg-0lax{text-align:left;vertical-align:top} |
|
</style> |
|
<table class="tg"> |
|
<thead> |
|
<tr> |
|
<th class="tg-0pky">APK</th> |
|
<th class="tg-0lax">Comment</th> |
|
<th class="tg-0pky">VAD model</th> |
|
<th class="tg-0pky">Non-streaming ASR model</th> |
|
</tr> |
|
</thead> |
|
<tbody> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-telespeech.apk</td> |
|
<td class="tg-0lax">支持非常多种中文方言. It is converted from <a href="https://github.com/Tele-AI/TeleSpeech-ASR">https://github.com/Tele-AI/TeleSpeech-ASR</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2">sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-th-zipformer.apk</td> |
|
<td class="tg-0lax">It supports only Thai. It is converted from <a href="https://huggingface.co/yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20/tree/main">https://huggingface.co/yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20/tree/main</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2">sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-ko-zipformer.apk</td> |
|
<td class="tg-0lax">It supports only Korean. It is converted from <a href="https://huggingface.co/johnBamma/icefall-asr-ksponspeech-zipformer-2024-06-24">https://huggingface.co/johnBamma/icefall-asr-ksponspeech-zipformer-2024-06-24</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-korean-2024-06-24.tar.bz2">sherpa-onnx-zipformer-korean-2024-06-24.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-be_de_en_es_fr_hr_it_pl_ru_uk-fast_conformer_ctc_20k.apk</td> |
|
<td class="tg-0lax">It supports <span style="color:red;">10 languages</span>: Belarusian, German, English, Spanish, French, Croatian, Italian, Polish, Russian, and Ukrainian. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc">STT Multilingual FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on ~20000 hours of data.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en_des_es_fr-fast_conformer_ctc_14288.apk</td> |
|
<td class="tg-0lax">It supports <span style="color:red;">4 languages</span>: German, English, Spanish, and French . It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc_blend_eu">STT European FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 14288 hours of data.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-es-fast_conformer_ctc_1424.apk</td> |
|
<td class="tg-0lax">It supports only Spanish. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_es_fastconformer_hybrid_large_pc">STT Es FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 1424 hours of data.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en-fast_conformer_ctc_24500.apk</td> |
|
<td class="tg-0lax">It supports only English. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_fastconformer_hybrid_large_pc">STT En FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 8500 hours of data.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-zipformer.apk</td> |
|
<td class="tg-0lax">It supports only Chinese.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/icefall-asr-zipformer-wenetspeech-20230615.tar.bz2">icefall-asr-zipformer-wenetspeech-20230615</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-paraformer.apk</td> |
|
<td class="tg-0lax"><span style="font-weight:400;font-style:normal">It supports both Chinese and English.</span></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2">sherpa-onnx-paraformer-zh-2023-03-28</a></td> |
|
</tr> |
|
<tr> |
|
<td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en-whisper_tiny.apk</td> |
|
<td class="tg-0lax">It supports only English.</td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td> |
|
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2">sherpa-onnx-whisper-tiny.en</a></td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
|
|
<br/> |
|
<br/> |
|
|
|
<div/> |
|
""" |
|
if "-cn" not in filename: |
|
content += """ |
|
For Chinese users, please <a href="./apk-asr-cn.html">visit this address</a>, |
|
which replaces <a href="huggingface.co">huggingface.co</a> with <a href="hf-mirror.com">hf-mirror.com</a> |
|
<br/> |
|
<br/> |
|
中国用户, 请访问<a href="./apk-asr-cn.html">这个地址</a> |
|
<br/> |
|
<br/> |
|
""" |
|
|
|
with open(filename, "w") as f: |
|
print(content, file=f) |
|
for x in files: |
|
name = x.rsplit("/", maxsplit=1)[-1] |
|
print(f'<a href="{x}" />{name}<br/>', file=f) |
|
|
|
|
|
def main(): |
|
apk = get_all_files("vad-asr", suffix=".apk") |
|
to_file("./apk-vad-asr.html", apk) |
|
|
|
|
|
apk2 = [] |
|
for a in apk: |
|
a = a.replace("huggingface.co", "hf-mirror.com") |
|
a = a.replace("resolve", "blob") |
|
apk2.append(a) |
|
|
|
to_file("./apk-vad-asr-cn.html", apk2) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|