sanchit-gandhi HF staff sanchit-gandhi HF staff commited on
Commit
c4f6a3f
0 Parent(s):

Duplicate from sanchit-gandhi/leaderboards

Browse files

Co-authored-by: Sanchit Gandhi <sanchit-gandhi@users.noreply.huggingface.co>

Files changed (4) hide show
  1. README.md +15 -0
  2. app.py +250 -0
  3. requirements.txt +4 -0
  4. utils.py +37 -0
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Leaderboards
3
+ emoji: 📈
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: streamlit
7
+ sdk_version: 1.10.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: sanchit-gandhi/leaderboards
12
+ ---
13
+
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ from huggingface_hub import HfApi
4
+ from utils import ascending_metrics, metric_ranges, LANGUAGES
5
+ import numpy as np
6
+ from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
7
+ from os.path import exists
8
+ import threading
9
+
10
+ st.set_page_config(layout="wide")
11
+
12
+
13
+ def get_model_infos():
14
+ api = HfApi()
15
+ model_infos = api.list_models(filter="model-index", cardData=True)
16
+ return model_infos
17
+
18
+
19
+ def parse_metric_value(value):
20
+ if isinstance(value, str):
21
+ "".join(value.split("%"))
22
+ try:
23
+ value = float(value)
24
+ except: # noqa: E722
25
+ value = None
26
+ elif isinstance(value, list):
27
+ if len(value) > 0:
28
+ value = value[0]
29
+ else:
30
+ value = None
31
+ value = round(value, 4) if isinstance(value, float) else None
32
+ return value
33
+
34
+
35
+ def parse_metrics_rows(meta, only_verified=False):
36
+ if not isinstance(meta["model-index"], list) or len(meta["model-index"]) == 0 or "results" not in meta["model-index"][0]:
37
+ return None
38
+ for result in meta["model-index"][0]["results"]:
39
+ if not isinstance(result, dict) or "dataset" not in result or "metrics" not in result or "type" not in result["dataset"]:
40
+ continue
41
+ dataset = result["dataset"]["type"]
42
+ if dataset == "":
43
+ continue
44
+ row = {"dataset": dataset, "split": "-unspecified-", "config": "-unspecified-"}
45
+ if "split" in result["dataset"]:
46
+ row["split"] = result["dataset"]["split"]
47
+ if "config" in result["dataset"]:
48
+ row["config"] = result["dataset"]["config"]
49
+ no_results = True
50
+ incorrect_results = False
51
+ for metric in result["metrics"]:
52
+ name = metric["type"].lower().strip()
53
+
54
+ if name in ("model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"):
55
+ # Metrics are not allowed to be named "dataset", "split", "config", "pipeline_tag"
56
+ continue
57
+ value = parse_metric_value(metric.get("value", None))
58
+ if value is None:
59
+ continue
60
+ if name in row:
61
+ new_metric_better = value < row[name] if name in ascending_metrics else value > row[name]
62
+ if name not in row or new_metric_better:
63
+ # overwrite the metric if the new value is better.
64
+
65
+ if only_verified:
66
+ if "verified" in metric and metric["verified"]:
67
+ no_results = False
68
+ row[name] = value
69
+ if name in metric_ranges:
70
+ if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
71
+ incorrect_results = True
72
+ else:
73
+ no_results = False
74
+ row[name] = value
75
+ if name in metric_ranges:
76
+ if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
77
+ incorrect_results = True
78
+ if no_results or incorrect_results:
79
+ continue
80
+ yield row
81
+
82
+
83
+ @st.cache(ttl=0)
84
+ def get_data_wrapper():
85
+ def get_data(dataframe=None, verified_dataframe=None):
86
+ data = []
87
+ verified_data = []
88
+ print("getting model infos")
89
+ model_infos = get_model_infos()
90
+ print("got model infos")
91
+ for model_info in model_infos:
92
+ meta = model_info.cardData
93
+ if meta is None:
94
+ continue
95
+ for row in parse_metrics_rows(meta):
96
+ if row is None:
97
+ continue
98
+ row["model_id"] = model_info.id
99
+ row["pipeline_tag"] = model_info.pipeline_tag
100
+ row["only_verified"] = False
101
+ data.append(row)
102
+ for row in parse_metrics_rows(meta, only_verified=True):
103
+ if row is None:
104
+ continue
105
+ row["model_id"] = model_info.id
106
+ row["pipeline_tag"] = model_info.pipeline_tag
107
+ row["only_verified"] = True
108
+ data.append(row)
109
+ dataframe = pd.DataFrame.from_records(data)
110
+ dataframe.to_pickle("cache.pkl")
111
+
112
+ if exists("cache.pkl"):
113
+ # If we have saved the results previously, call an asynchronous process
114
+ # to fetch the results and update the saved file. Don't make users wait
115
+ # while we fetch the new results. Instead, display the old results for
116
+ # now. The new results should be loaded when this method
117
+ # is called again.
118
+ dataframe = pd.read_pickle("cache.pkl")
119
+ t = threading.Thread(name="get_data procs", target=get_data)
120
+ t.start()
121
+ else:
122
+ # We have to make the users wait during the first startup of this app.
123
+ get_data()
124
+ dataframe = pd.read_pickle("cache.pkl")
125
+
126
+ return dataframe
127
+
128
+
129
+ dataframe = get_data_wrapper()
130
+
131
+ st.markdown("# 🤗 Whisper Event: Final Leaderboard")
132
+
133
+ query_params = st.experimental_get_query_params()
134
+ if "first_query_params" not in st.session_state:
135
+ st.session_state.first_query_params = query_params
136
+ first_query_params = st.session_state.first_query_params
137
+
138
+ default_config = first_query_params.get("config", [None])[0]
139
+ default_metric = first_query_params.get("metric", [None])[0]
140
+
141
+ only_verified_results = False
142
+ task = "automatic-speech-recognition"
143
+ dataset = "mozilla-foundation/common_voice_11_0"
144
+ split = "test"
145
+
146
+ dataframe = dataframe[dataframe.only_verified == only_verified_results]
147
+
148
+ current_query_params = {"dataset": [dataset], "only_verified": [int(only_verified_results)], "task": [task],
149
+ "split": [split]}
150
+
151
+ st.experimental_set_query_params(**current_query_params)
152
+
153
+ dataset_df = dataframe[dataframe.dataset == dataset]
154
+ dataset_df = dataset_df[dataset_df.split == split]
155
+
156
+ dataset_df = dataset_df.dropna(axis="columns", how="all")
157
+
158
+ dataset = st.sidebar.selectbox(
159
+ "Dataset",
160
+ [dataset],
161
+ index=0,
162
+ )
163
+
164
+ selectable_configs = list(set(dataset_df["config"]))
165
+ selectable_configs.sort(key=lambda name: name.lower())
166
+ selectable_configs.remove("-unspecified-")
167
+ selectable_configs = [config for config in selectable_configs if config in LANGUAGES]
168
+
169
+ visual_configs = [f"{config}: {LANGUAGES[config]}" for config in selectable_configs]
170
+
171
+ config = st.sidebar.selectbox(
172
+ "Language",
173
+ visual_configs,
174
+ index=0,
175
+ help="Filter the results on the current leaderboard by language."
176
+ )
177
+
178
+ config = config.split(":")[0]
179
+
180
+ dataset_df = dataset_df[dataset_df.config == config]
181
+
182
+ split = st.sidebar.selectbox(
183
+ "Split",
184
+ [split],
185
+ index=0,
186
+ )
187
+
188
+ not_selectable_metrics = ["model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"]
189
+ # also ignore irrelevant ASR metrics
190
+ not_selectable_metrics.extend(["wer_without_norm", "mer"])
191
+
192
+ selectable_metrics = list(filter(lambda column: column not in not_selectable_metrics, dataset_df.columns))
193
+
194
+ dataset_df = dataset_df.filter(["model_id"] + (["dataset"] if dataset == "-any-" else []) + selectable_metrics)
195
+ dataset_df = dataset_df.dropna(thresh=2) # Want at least two non-na values (one for model_id and one for a metric).
196
+
197
+ sorting_metric = st.sidebar.radio(
198
+ "Sorting Metric",
199
+ selectable_metrics,
200
+ index=selectable_metrics.index(default_metric) if default_metric in selectable_metrics else 0,
201
+ help="Select the metric to sort the leaderboard by. Click on the metric name in the leaderboard to reverse the sorting order."
202
+ )
203
+
204
+ current_query_params.update({"metric": [sorting_metric]})
205
+
206
+ st.experimental_set_query_params(**current_query_params)
207
+
208
+ st.markdown(
209
+ f"This is the leaderboard for {LANGUAGES[config]} ({config})."
210
+ )
211
+
212
+ st.markdown(
213
+ "Please click on the model's name to be redirected to its model card."
214
+ )
215
+
216
+ st.markdown(
217
+ "Want to beat the leaderboard? Don't see your model here? Simply ..."
218
+ )
219
+
220
+ # Make the default metric appear right after model names and dataset names
221
+ cols = dataset_df.columns.tolist()
222
+ cols.remove(sorting_metric)
223
+ sorting_metric_index = 1 if dataset != "-any-" else 2
224
+ cols = cols[:sorting_metric_index] + [sorting_metric] + cols[sorting_metric_index:]
225
+ dataset_df = dataset_df[cols]
226
+
227
+ # Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
228
+ dataset_df = dataset_df.sort_values(by=cols[sorting_metric_index:], ascending=[metric in ascending_metrics for metric in cols[sorting_metric_index:]])
229
+ dataset_df = dataset_df.replace(np.nan, '-')
230
+
231
+ # Make the leaderboard
232
+ gb = GridOptionsBuilder.from_dataframe(dataset_df)
233
+ gb.configure_default_column(sortable=False)
234
+ gb.configure_column(
235
+ "model_id",
236
+ cellRenderer=JsCode('''function(params) {return '<a target="_blank" href="https://huggingface.co/'+params.value+'">'+params.value+'</a>'}'''),
237
+ )
238
+
239
+ for name in selectable_metrics:
240
+ gb.configure_column(name, type=["numericColumn","numberColumnFilter","customNumericFormat"], precision=4, aggFunc='sum')
241
+
242
+ gb.configure_column(
243
+ sorting_metric,
244
+ sortable=True,
245
+ cellStyle=JsCode('''function(params) { return {'backgroundColor': '#FFD21E'}}''')
246
+ )
247
+
248
+ go = gb.build()
249
+ fit_columns = len(dataset_df.columns) < 10
250
+ AgGrid(dataset_df, gridOptions=go, height=28*len(dataset_df) + (35 if fit_columns else 41), allow_unsafe_jscode=True, fit_columns_on_grid_load=fit_columns, enable_enterprise_modules=False)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ pandas==1.5.1
2
+ huggingface_hub==0.11.1
3
+ numpy==1.23.4
4
+ streamlit-aggrid==0.3.3
utils.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ascending_metrics = {
2
+ "wer",
3
+ "cer",
4
+ "loss",
5
+ "mae",
6
+ "mahalanobis",
7
+ "mse",
8
+ "perplexity",
9
+ "ter",
10
+ }
11
+
12
+ metric_ranges = {
13
+ "accuracy": (0,1),
14
+ "precision": (0,1),
15
+ "recall": (0,1),
16
+ "macro f1": (0,1),
17
+ "micro f1": (0,1),
18
+ "pearson": (-1, 1),
19
+ "matthews_correlation": (-1, 1),
20
+ "spearmanr": (-1, 1),
21
+ "google_bleu": (0, 1),
22
+ "precision@10": (0, 1),
23
+ "mae": (0, 1),
24
+ "mauve": (0, 1),
25
+ "frontier_integral": (0, 1),
26
+ "mean_iou": (0, 1),
27
+ "mean_accuracy": (0, 1),
28
+ "overall_accuracy": (0, 1),
29
+ "meteor": (0, 1),
30
+ "mse": (0, 1),
31
+ "perplexity": (0, float("inf")),
32
+ "rogue1": (0, 1),
33
+ "rogue2": (0, 1),
34
+ "sari": (0, 100),
35
+ }
36
+
37
+ LANGUAGES = {'ab': 'Abkhaz', 'ace': 'Acehnese', 'ady': 'Adyghe', 'af': 'Afrikaans', 'am': 'Amharic', 'an': 'Aragonese', 'ar': 'Arabic', 'arn': 'Mapudungun', 'as': 'Assamese', 'ast': 'Asturian', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bas': 'Basaa', 'be': 'Belarusian', 'bg': 'Bulgarian', 'bn': 'Bengali', 'br': 'Breton', 'bs': 'Bosnian', 'bxr': 'Buryat', 'ca': 'Catalan', 'cak': 'Kaqchikel', 'ckb': 'Central Kurdish', 'cnh': 'Hakha Chin', 'co': 'Corsican', 'cs': 'Czech', 'cv': 'Chuvash', 'cy': 'Welsh', 'da': 'Danish', 'de': 'German', 'dsb': 'Sorbian, Lower', 'dv': 'Dhivehi', 'dyu': 'Dioula', 'el': 'Greek', 'en': 'English', 'eo': 'Esperanto', 'es': 'Spanish', 'et': 'Estonian', 'eu': 'Basque', 'fa': 'Persian', 'ff': 'Fulah', 'fi': 'Finnish', 'fo': 'Faroese', 'fr': 'French', 'fy-NL': 'Frisian', 'ga-IE': 'Irish', 'gl': 'Galician', 'gn': 'Guarani', 'gom': 'Goan Konkani', 'ha': 'Hausa', 'he': 'Hebrew', 'hi': 'Hindi', 'hil': 'Hiligaynon', 'hr': 'Croatian', 'hsb': 'Sorbian, Upper', 'ht': 'Haitian', 'hu': 'Hungarian', 'hy-AM': 'Armenian', 'hyw': 'Armenian Western', 'ia': 'Interlingua', 'id': 'Indonesian', 'ie': 'Interlingue', 'ig': 'Igbo', 'is': 'Icelandic', 'it': 'Italian', 'izh': 'Izhorian', 'ja': 'Japanese', 'jbo': 'Lojban', 'ka': 'Georgian', 'kaa': 'Karakalpak', 'kab': 'Kabyle', 'kbd': 'Kabardian', 'ki': 'Kikuyu', 'kk': 'Kazakh', 'km': 'Khmer', 'kmr': 'Kurmanji Kurdish', 'kn': 'Kannada', 'knn': 'Konkani (Devanagari)', 'ko': 'Korean', 'kpv': 'Komi-Zyrian', 'kw': 'Cornish', 'ky': 'Kyrgyz', 'lb': 'Luxembourgish', 'lg': 'Luganda', 'lij': 'Ligurian', 'ln': 'Lingala', 'lo': 'Lao', 'lt': 'Lithuanian', 'lv': 'Latvian', 'mai': 'Maithili', 'mdf': 'Moksha', 'mg': 'Malagasy', 'mhr': 'Meadow Mari', 'mk': 'Macedonian', 'ml': 'Malayalam', 'mn': 'Mongolian', 'mni': 'Meetei Lon', 'mos': 'Mossi', 'mr': 'Marathi', 'mrj': 'Hill Mari', 'ms': 'Malay', 'mt': 'Maltese', 'my': 'Burmese', 'myv': 'Erzya', 'nan-tw': 'Taiwanese (Minnan)', 'nb-NO': 'Norwegian Bokmål', 'nd': 'IsiNdebele (North)', 'ne-NP': 'Nepali', 'nia': 'Nias', 'nl': 'Dutch', 'nn-NO': 'Norwegian Nynorsk', 'nr': 'IsiNdebele (South)', 'nso': 'Northern Sotho', 'nyn': 'Runyankole', 'oc': 'Occitan', 'om': 'Afaan Ormoo', 'or': 'Odia', 'pa-IN': 'Punjabi', 'pap-AW': 'Papiamento (Aruba)', 'pl': 'Polish', 'ps': 'Pashto', 'pt': 'Portuguese', 'quc': "K'iche'", 'quy': 'Quechua Chanka', 'rm-sursilv': 'Romansh Sursilvan', 'rm-vallader': 'Romansh Vallader', 'ro': 'Romanian', 'ru': 'Russian', 'rw': 'Kinyarwanda', 'sah': 'Sakha', 'sat': 'Santali (Ol Chiki)', 'sc': 'Sardinian', 'scn': 'Sicilian', 'sdh': 'Southern Kurdish', 'shi': 'Shilha', 'si': 'Sinhala', 'sk': 'Slovak', 'skr': 'Saraiki', 'sl': 'Slovenian', 'snk': 'Soninke', 'so': 'Somali', 'sq': 'Albanian', 'sr': 'Serbian', 'ss': 'Siswati', 'st': 'Southern Sotho', 'sv-SE': 'Swedish', 'sw': 'Swahili', 'syr': 'Syriac', 'ta': 'Tamil', 'te': 'Telugu', 'tg': 'Tajik', 'th': 'Thai', 'ti': 'Tigrinya', 'tig': 'Tigre', 'tk': 'Turkmen', 'tl': 'Tagalog', 'tn': 'Setswana', 'tok': 'Toki Pona', 'tr': 'Turkish', 'ts': 'Xitsonga', 'tt': 'Tatar', 'tw': 'Twi', 'ty': 'Tahitian', 'uby': 'Ubykh', 'udm': 'Udmurt', 'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Tshivenda', 'vec': 'Venetian', 'vi': 'Vietnamese', 'vot': 'Votic', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'yue': 'Cantonese', 'zgh': 'Tamazight', 'zh-CN': 'Chinese (China)', 'zh-HK': 'Chinese (Hong Kong)', 'zh-TW': 'Chinese (Taiwan)', 'zu': 'Zulu'}