File size: 8,407 Bytes
c4f6a3f
 
 
 
 
270f535
c4f6a3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcff974
 
c4f6a3f
fcff974
 
 
 
 
c4f6a3f
fcff974
 
c4f6a3f
fcff974
 
c4f6a3f
fcff974
c4f6a3f
fcff974
 
 
 
 
 
c4f6a3f
fcff974
 
 
c4f6a3f
fcff974
c4f6a3f
fcff974
0595faa
 
c4f6a3f
 
fcff974
c4f6a3f
 
 
 
 
 
 
f7e741d
c4f6a3f
 
 
 
fcff974
c4f6a3f
 
 
fcff974
270f535
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
import pandas as pd
import streamlit as st
from huggingface_hub import HfApi
from utils import ascending_metrics, metric_ranges, LANGUAGES
import numpy as np
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode, ColumnsAutoSizeMode
from os.path import exists
import threading

st.set_page_config(layout="wide")


def get_model_infos():
    api = HfApi()
    model_infos = api.list_models(filter="model-index", cardData=True)
    return model_infos


def parse_metric_value(value):
    if isinstance(value, str):
        "".join(value.split("%"))
        try:
            value = float(value)
        except:  # noqa: E722
            value = None
    elif isinstance(value, list):
        if len(value) > 0:
            value = value[0]
        else:
            value = None
    value = round(value, 4) if isinstance(value, float) else None
    return value


def parse_metrics_rows(meta, only_verified=False):
    if not isinstance(meta["model-index"], list) or len(meta["model-index"]) == 0 or "results" not in meta["model-index"][0]:
        return None
    for result in meta["model-index"][0]["results"]:
        if not isinstance(result, dict) or "dataset" not in result or "metrics" not in result or "type" not in result["dataset"]:
            continue
        dataset = result["dataset"]["type"]
        if dataset == "":
            continue
        row = {"dataset": dataset, "split": "-unspecified-", "config": "-unspecified-"}
        if "split" in result["dataset"]:
            row["split"] = result["dataset"]["split"]
        if "config" in result["dataset"]:
            row["config"] = result["dataset"]["config"]
        no_results = True
        incorrect_results = False
        for metric in result["metrics"]:
            name = metric["type"].lower().strip()

            if name in ("model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"):
                # Metrics are not allowed to be named "dataset", "split", "config", "pipeline_tag"
                continue
            value = parse_metric_value(metric.get("value", None))
            if value is None:
                continue
            if name in row:
                new_metric_better = value < row[name] if name in ascending_metrics else value > row[name]
            if name not in row or new_metric_better:
                # overwrite the metric if the new value is better.

                if only_verified:
                    if "verified" in metric and metric["verified"]:
                        no_results = False
                        row[name] = value
                        if name in metric_ranges:
                            if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
                                incorrect_results = True
                else:
                    no_results = False
                    row[name] = value
                    if name in metric_ranges:
                        if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
                            incorrect_results = True
        if no_results or incorrect_results:
            continue
        yield row


@st.cache(ttl=0)
def get_data_wrapper():
    def get_data(dataframe=None, verified_dataframe=None):
        data = []
        verified_data = []
        print("getting model infos")
        model_infos = get_model_infos()
        print("got model infos")
        for model_info in model_infos:
            meta = model_info.cardData
            if meta is None:
                continue
            for row in parse_metrics_rows(meta):
                if row is None:
                    continue
                row["model_id"] = model_info.id
                row["pipeline_tag"] = model_info.pipeline_tag
                row["only_verified"] = False
                data.append(row)
            for row in parse_metrics_rows(meta, only_verified=True):
                if row is None:
                    continue
                row["model_id"] = model_info.id
                row["pipeline_tag"] = model_info.pipeline_tag
                row["only_verified"] = True
                data.append(row)
        dataframe = pd.DataFrame.from_records(data)
        dataframe.to_pickle("cache.pkl")

    if exists("cache.pkl"):
        # If we have saved the results previously, call an asynchronous process
        # to fetch the results and update the saved file. Don't make users wait
        # while we fetch the new results. Instead, display the old results for
        # now. The new results should be loaded when this method
        # is called again.
        dataframe = pd.read_pickle("cache.pkl")
        t = threading.Thread(name="get_data procs", target=get_data)
        t.start()
    else:
        # We have to make the users wait during the first startup of this app.
        get_data()
        dataframe = pd.read_pickle("cache.pkl")

    return dataframe


dataframe = get_data_wrapper()

st.markdown("# 🤗 Whisper Event: Final Leaderboard")

query_params = st.experimental_get_query_params()
if "first_query_params" not in st.session_state:
    st.session_state.first_query_params = query_params
first_query_params = st.session_state.first_query_params

default_config = first_query_params.get("config", [None])[0]
default_metric = first_query_params.get("metric", [None])[0]

only_verified_results = False
task = "automatic-speech-recognition"
dataset = "mozilla-foundation/common_voice_11_0"
split = "test"

dataframe = dataframe[dataframe.only_verified == only_verified_results]

current_query_params = {"dataset": [dataset], "only_verified": [int(only_verified_results)], "task": [task],
                        "split": [split]}

st.experimental_set_query_params(**current_query_params)

dataset_df = dataframe[dataframe.dataset == dataset]
dataset_df = dataset_df[dataset_df.split == split]

dataset_df = dataset_df.dropna(axis="columns", how="all")

selectable_metrics = ["wer", "cer"]
sorting_metric = "wer"

selectable_configs = list(set(dataset_df["config"]))
selectable_configs.sort(key=lambda name: name.lower())
selectable_configs.remove("-unspecified-")
selectable_configs = [config for config in selectable_configs if config in LANGUAGES]
visual_configs = [f"{config}: {LANGUAGES[config]}" for config in selectable_configs]

dataset_df = dataset_df[["config", "model_id"] + selectable_metrics]
all_ds = []

for config in selectable_configs:
    dataset_df_ds = dataset_df[dataset_df.config == config]

    dataset_df_ds = dataset_df_ds.dropna(thresh=2)  # Want at least two non-na values (one for model_id and one for a metric)

    # Make the default metric appear right after model names and dataset names
    cols = dataset_df_ds.columns.tolist()
    cols.remove(sorting_metric)
    sorting_metric_index = 1 if dataset != "-any-" else 2
    cols = cols[:sorting_metric_index] + [sorting_metric] + cols[sorting_metric_index:]
    dataset_df_ds = dataset_df_ds[cols]

    # Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
    dataset_df_ds = dataset_df_ds.sort_values(by=cols[sorting_metric_index:], ascending=[metric in ascending_metrics for metric in cols[sorting_metric_index:]])
    dataset_df_ds = dataset_df_ds.replace(np.nan, '-')

    all_ds.append(dataset_df_ds.iloc[0])

all_ds = pd.DataFrame(all_ds, columns=["config", "model_id", "wer", "cer"])
language_names = [LANGUAGES[config] for config in selectable_configs]
all_ds.insert(1, "language", language_names)

# Make the leaderboard
gb = GridOptionsBuilder.from_dataframe(all_ds)
gb.configure_default_column(sortable=False)
gb.configure_column(
    "model_id",
    cellRenderer=JsCode('''function(params) {return '<a target="_blank" href="https://huggingface.co/'+params.value+'">'+params.value+'</a>'}'''),
)

for name in selectable_metrics:
    gb.configure_column(name, type=["numericColumn","numberColumnFilter","customNumericFormat"], precision=2, aggFunc='sum')

gb.configure_column(
    sorting_metric,
    sortable=True,
    #cellStyle=JsCode('''function(params) { return {'backgroundColor': '#FFD21E'}}''')
)

go = gb.build()
fit_columns = len(all_ds.columns) < 10
AgGrid(all_ds, gridOptions=go, height=28*len(all_ds) + (35 if fit_columns else 41), allow_unsafe_jscode=True, enable_enterprise_modules=False, columns_auto_size_mode=ColumnsAutoSizeMode.FIT_CONTENTS)