File size: 7,672 Bytes
3932541
83435d7
3932541
 
 
09564a6
3932541
 
d02a3f7
3932541
83435d7
 
 
 
 
1c31846
83435d7
385ad2c
83435d7
3932541
 
 
 
 
 
 
 
 
83435d7
3932541
 
 
 
 
 
 
 
dfe3a41
3932541
5a1f0ed
3932541
dfe3a41
3932541
 
 
 
 
 
 
 
 
c12f7f2
3932541
 
 
 
 
 
 
 
 
 
 
a0d87da
3932541
 
 
 
 
 
3511def
f85a0f8
 
 
 
 
e0d0174
f85a0f8
 
 
 
 
 
 
3932541
 
 
 
 
 
 
 
 
 
 
a0d87da
3932541
 
 
 
 
 
6ff756a
3932541
 
 
a0d87da
 
 
 
 
3932541
 
 
743f616
 
 
 
 
 
 
 
 
 
 
83435d7
 
cbdec74
743f616
46d7387
 
83435d7
743f616
 
 
 
 
83435d7
 
 
 
 
3932541
 
 
dadac84
 
44f9ae8
83435d7
 
4d53ef2
 
dadac84
83435d7
3932541
743f616
3932541
a046b93
743f616
3932541
 
 
 
743f616
83435d7
743f616
 
 
 
 
 
 
 
 
b1cec8b
 
 
 
743f616
3932541
743f616
3932541
 
 
743f616
b1cec8b
 
 
743f616
f85a0f8
 
 
 
 
 
3932541
f85a0f8
 
 
 
3932541
 
 
 
 
 
 
 
 
743f616
 
 
 
 
 
 
 
 
dadac84
743f616
 
 
 
83435d7
 
 
 
 
 
 
 
 
44941a8
 
ccfdd1c
 
 
46d7387
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
import requests
import json
import pandas as pd
from tqdm.auto import tqdm
import streamlit as st
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load

aliases_lang = {"sv": "sv-SE"}
cer_langs = ["ja", "zh-CN", "zh-HK", "zh-TW"]
with open("languages.json") as f:
    lang2name = json.load(f)
suggested_datasets = [
    "librispeech_asr",
    "mozilla-foundation/common_voice_8_0",
    "mozilla-foundation/common_voice_11_0",
    "speech-recognition-community-v2/eval_data",
    "facebook/multilingual_librispeech"
]


def make_clickable(model_name):
    link = "https://huggingface.co/" + model_name
    return f'<a target="_blank" href="{link}">{model_name}</a>'


def get_model_ids():
    api = HfApi()
    models = api.list_models(filter="hf-asr-leaderboard")
    model_ids = [x.modelId for x in models]
    return model_ids


def get_metadata(model_id):
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        return metadata_load(readme_path)
    except:
        # 404 README.md not found
        print(f"Model id: {model_id} is not great!")
        return None
    


def parse_metric_value(value):
    if isinstance(value, str):
        "".join(value.split("%"))
        try:
            value = float(value)
        except:  # noqa: E722
            value = None
    elif isinstance(value, float) and value < 1.1:
        # assuming that WER is given in 0.xx format
        value = 100 * value
    elif isinstance(value, list):
        if len(value) > 0:
            value = value[0]
        else:
            value = None
    value = round(value, 2) if value is not None else None
    return value


def parse_metrics_rows(meta):
    if "model-index" not in meta or "language" not in meta:
        return None
    for result in meta["model-index"][0]["results"]:
        if "dataset" not in result or "metrics" not in result:
            continue
        dataset = result["dataset"]["type"]
        if "args" in result["dataset"] and isinstance(result["dataset"]["args"], dict) and "language" in result["dataset"]["args"]:
            lang = result["dataset"]["args"]["language"]
        else:
            lang = meta["language"]
            lang = lang[0] if isinstance(lang, list) else lang
        lang = aliases_lang[lang] if lang in aliases_lang else lang
        config = result["dataset"]["config"] if "config" in result["dataset"] else lang
        split = result["dataset"]["split"] if "split" in result["dataset"] else None
        row = {
            "dataset": dataset,
            "lang": lang,
            "config": config,
            "split": split
        }
        for metric in result["metrics"]:
            type = metric["type"].lower().strip()
            if type not in ["wer", "cer"]:
                continue
            value = parse_metric_value(metric["value"])
            if value is None:
                continue
            if type not in row or value < row[type]:
                # overwrite the metric if the new value is lower (e.g. with LM)
                row[type] = value
        if "wer" in row or "cer" in row:
            yield row


@st.cache(ttl=600)
def get_data():
    data = []
    model_ids = get_model_ids()
    for model_id in tqdm(model_ids):
        meta = get_metadata(model_id)
        if meta is None:
            continue
        for row in parse_metrics_rows(meta):
            if row is None:
                continue
            row["model_id"] = model_id
            data.append(row)
    return pd.DataFrame.from_records(data)


def sort_datasets(datasets):
    # 1. sort by name
    datasets = sorted(datasets)
    # 2. bring the suggested datasets to the top and append the rest
    datasets = sorted(
        datasets,
        key=lambda dataset_id: suggested_datasets.index(dataset_id)
        if dataset_id in suggested_datasets
        else len(suggested_datasets),
    )
    return datasets


@st.cache(ttl=600)
def generate_dataset_info(datasets):
    msg = """
    The models have been trained and/or evaluated on the following datasets:
    """
    for dataset_id in datasets:
        if dataset_id in suggested_datasets:
            msg += f"* [{dataset_id}](https://hf.co/datasets/{dataset_id}) *(recommended)*\n"
        else:
            msg += f"* [{dataset_id}](https://hf.co/datasets/{dataset_id})\n"

    msg = "\n".join([line.strip() for line in msg.split("\n")])
    return msg


dataframe = get_data()
dataframe = dataframe.fillna("")

st.sidebar.image("logo.png", width=200)

st.markdown("# The πŸ€— Speech Bench")

st.markdown(
    f"This is a leaderboard of **{dataframe['model_id'].nunique()}** speech recognition models "
    f"and **{dataframe['dataset'].nunique()}** datasets.\n\n"
    "β¬… Please select the language you want to find a model for from the dropdown on the left."
)

lang = st.sidebar.selectbox(
    "Language",
    sorted(dataframe["lang"].unique(), key=lambda key: lang2name.get(key, key)),
    format_func=lambda key: lang2name.get(key, key),
    index=0,
)
lang_df = dataframe[dataframe.lang == lang]

sorted_datasets = sort_datasets(lang_df["dataset"].unique())

lang_name = lang2name[lang] if lang in lang2name else ""
num_models = len(lang_df["model_id"].unique())
num_datasets = len(lang_df["dataset"].unique())
text = f"""
For the `{lang}` ({lang_name}) language, there are currently `{num_models}` model(s) 
trained on `{num_datasets}` dataset(s) available for `automatic-speech-recognition`.
"""
st.markdown(text)

st.sidebar.markdown("""
Choose the dataset that is most relevant to your task and select it from the dropdown below:
""")

dataset = st.sidebar.selectbox(
    "Dataset",
    sorted_datasets,
    index=0,
)
dataset_df = lang_df[lang_df.dataset == dataset]

text = generate_dataset_info(sorted_datasets)
st.sidebar.markdown(text)

# sort by WER or CER depending on the language
metric_col = "cer" if lang in cer_langs else "wer"
if dataset_df["config"].nunique() > 1:
    # if there are more than one dataset config
    dataset_df = dataset_df[["model_id", "config", metric_col]]
    dataset_df = dataset_df.pivot_table(index=['model_id'], columns=["config"], values=[metric_col])
    dataset_df = dataset_df.reset_index(level=0)
else:
    dataset_df = dataset_df[["model_id", metric_col]]
dataset_df.sort_values(dataset_df.columns[-1], inplace=True)
dataset_df = dataset_df.fillna("")

dataset_df.rename(
    columns={
        "model_id": "Model",
        "wer": "WER (lower is better)",
        "cer": "CER (lower is better)",
    },
    inplace=True,
)

st.markdown(
    "Please click on the model's name to be redirected to its model card which includes documentation and examples on how to use it."
)

# display the model ranks
dataset_df = dataset_df.reset_index(drop=True)
dataset_df.index += 1

# turn the model ids into clickable links
dataset_df["Model"] = dataset_df["Model"].apply(make_clickable)

table_html = dataset_df.to_html(escape=False)
table_html = table_html.replace("<th>", '<th align="left">')  # left-align the headers
st.write(table_html, unsafe_allow_html=True)

if lang in cer_langs:
    st.markdown(
        "---\n\* **CER** is [Char Error Rate](https://huggingface.co/metrics/cer)"
    )
else:
    st.markdown(
        "---\n\* **WER** is [Word Error Rate](https://huggingface.co/metrics/wer)"
    )

st.markdown(
    "Want to beat the Leaderboard? Don't see your speech recognition model show up here? "
    "Simply add the `hf-asr-leaderboard` tag to your model card alongside your evaluation metrics. "
    "Try our [Metrics Editor](https://huggingface.co/spaces/huggingface/speech-bench-metrics-editor) to get started!"
)