sanchit-gandhi HF staff Tristan commited on
Commit
dbf0e3c
0 Parent(s):

Duplicate from autoevaluate/leaderboards

Browse files

Co-authored-by: Tristan Thrush <Tristan@users.noreply.huggingface.co>

Files changed (4) hide show
  1. README.md +15 -0
  2. app.py +316 -0
  3. requirements.txt +4 -0
  4. utils.py +35 -0
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Leaderboards
3
+ emoji: 📈
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: streamlit
7
+ sdk_version: 1.10.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: autoevaluate/leaderboards
12
+ ---
13
+
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ from huggingface_hub import HfApi, hf_hub_download
4
+ from huggingface_hub.repocard import metadata_load
5
+ from utils import ascending_metrics, metric_ranges
6
+ import numpy as np
7
+ from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
8
+ from os.path import exists
9
+ import threading
10
+
11
+ st.set_page_config(layout="wide")
12
+
13
+
14
+ def get_model_infos():
15
+ api = HfApi()
16
+ model_infos = api.list_models(filter="model-index", cardData=True)
17
+ return model_infos
18
+
19
+
20
+ def parse_metric_value(value):
21
+ if isinstance(value, str):
22
+ "".join(value.split("%"))
23
+ try:
24
+ value = float(value)
25
+ except: # noqa: E722
26
+ value = None
27
+ elif isinstance(value, list):
28
+ if len(value) > 0:
29
+ value = value[0]
30
+ else:
31
+ value = None
32
+ value = round(value, 4) if isinstance(value, float) else None
33
+ return value
34
+
35
+
36
+ def parse_metrics_rows(meta, only_verified=False):
37
+ if not isinstance(meta["model-index"], list) or len(meta["model-index"]) == 0 or "results" not in meta["model-index"][0]:
38
+ return None
39
+ for result in meta["model-index"][0]["results"]:
40
+ if not isinstance(result, dict) or "dataset" not in result or "metrics" not in result or "type" not in result["dataset"]:
41
+ continue
42
+ dataset = result["dataset"]["type"]
43
+ if dataset == "":
44
+ continue
45
+ row = {"dataset": dataset, "split": "-unspecified-", "config": "-unspecified-"}
46
+ if "split" in result["dataset"]:
47
+ row["split"] = result["dataset"]["split"]
48
+ if "config" in result["dataset"]:
49
+ row["config"] = result["dataset"]["config"]
50
+ no_results = True
51
+ incorrect_results = False
52
+ for metric in result["metrics"]:
53
+ name = metric["type"].lower().strip()
54
+
55
+ if name in ("model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"):
56
+ # Metrics are not allowed to be named "dataset", "split", "config", "pipeline_tag"
57
+ continue
58
+ value = parse_metric_value(metric.get("value", None))
59
+ if value is None:
60
+ continue
61
+ if name in row:
62
+ new_metric_better = value < row[name] if name in ascending_metrics else value > row[name]
63
+ if name not in row or new_metric_better:
64
+ # overwrite the metric if the new value is better.
65
+
66
+ if only_verified:
67
+ if "verified" in metric and metric["verified"]:
68
+ no_results = False
69
+ row[name] = value
70
+ if name in metric_ranges:
71
+ if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
72
+ incorrect_results = True
73
+ else:
74
+ no_results = False
75
+ row[name] = value
76
+ if name in metric_ranges:
77
+ if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
78
+ incorrect_results = True
79
+ if no_results or incorrect_results:
80
+ continue
81
+ yield row
82
+
83
+ @st.cache(ttl=0)
84
+ def get_data_wrapper():
85
+
86
+ def get_data(dataframe=None, verified_dataframe=None):
87
+ data = []
88
+ verified_data = []
89
+ print("getting model infos")
90
+ model_infos = get_model_infos()
91
+ print("got model infos")
92
+ for model_info in model_infos:
93
+ meta = model_info.cardData
94
+ if meta is None:
95
+ continue
96
+ for row in parse_metrics_rows(meta):
97
+ if row is None:
98
+ continue
99
+ row["model_id"] = model_info.id
100
+ row["pipeline_tag"] = model_info.pipeline_tag
101
+ row["only_verified"] = False
102
+ data.append(row)
103
+ for row in parse_metrics_rows(meta, only_verified=True):
104
+ if row is None:
105
+ continue
106
+ row["model_id"] = model_info.id
107
+ row["pipeline_tag"] = model_info.pipeline_tag
108
+ row["only_verified"] = True
109
+ data.append(row)
110
+ dataframe = pd.DataFrame.from_records(data)
111
+ dataframe.to_pickle("cache.pkl")
112
+
113
+ if exists("cache.pkl"):
114
+ # If we have saved the results previously, call an asynchronous process
115
+ # to fetch the results and update the saved file. Don't make users wait
116
+ # while we fetch the new results. Instead, display the old results for
117
+ # now. The new results should be loaded when this method
118
+ # is called again.
119
+ dataframe = pd.read_pickle("cache.pkl")
120
+ t = threading.Thread(name="get_data procs", target=get_data)
121
+ t.start()
122
+ else:
123
+ # We have to make the users wait during the first startup of this app.
124
+ get_data()
125
+ dataframe = pd.read_pickle("cache.pkl")
126
+
127
+ return dataframe
128
+
129
+ dataframe = get_data_wrapper()
130
+
131
+ st.markdown("# 🤗 Leaderboards")
132
+
133
+ query_params = st.experimental_get_query_params()
134
+ if "first_query_params" not in st.session_state:
135
+ st.session_state.first_query_params = query_params
136
+ first_query_params = st.session_state.first_query_params
137
+
138
+ default_task = first_query_params.get("task", [None])[0]
139
+ default_only_verified = bool(int(first_query_params.get("only_verified", [0])[0]))
140
+ print(default_only_verified)
141
+ default_dataset = first_query_params.get("dataset", [None])[0]
142
+ default_split = first_query_params.get("split", [None])[0]
143
+ default_config = first_query_params.get("config", [None])[0]
144
+ default_metric = first_query_params.get("metric", [None])[0]
145
+
146
+ only_verified_results = st.sidebar.checkbox(
147
+ "Filter for Verified Results",
148
+ value=default_only_verified,
149
+ help="Select this checkbox if you want to see only results produced by the Hugging Face model evaluator, and no self-reported results."
150
+ )
151
+
152
+ selectable_tasks = list(set(dataframe.pipeline_tag))
153
+ if None in selectable_tasks:
154
+ selectable_tasks.remove(None)
155
+ selectable_tasks.sort(key=lambda name: name.lower())
156
+ selectable_tasks = ["-any-"] + selectable_tasks
157
+
158
+ task = st.sidebar.selectbox(
159
+ "Task",
160
+ selectable_tasks,
161
+ index=(selectable_tasks).index(default_task) if default_task in selectable_tasks else 0,
162
+ help="Filter the selectable datasets by task. Leave as \"-any-\" to see all selectable datasets."
163
+ )
164
+
165
+ if task != "-any-":
166
+ dataframe = dataframe[dataframe.pipeline_tag == task]
167
+
168
+ selectable_datasets = ["-any-"] + sorted(list(set(dataframe.dataset.tolist())), key=lambda name: name.lower())
169
+ if "" in selectable_datasets:
170
+ selectable_datasets.remove("")
171
+
172
+ dataset = st.sidebar.selectbox(
173
+ "Dataset",
174
+ selectable_datasets,
175
+ index=selectable_datasets.index(default_dataset) if default_dataset in selectable_datasets else 0,
176
+ help="Select a dataset to see the leaderboard!"
177
+ )
178
+
179
+ dataframe = dataframe[dataframe.only_verified == only_verified_results]
180
+
181
+ current_query_params = {"dataset": [dataset], "only_verified": [int(only_verified_results)], "task": [task]}
182
+
183
+ st.experimental_set_query_params(**current_query_params)
184
+
185
+ if dataset != "-any-":
186
+ dataset_df = dataframe[dataframe.dataset == dataset]
187
+ else:
188
+ dataset_df = dataframe
189
+
190
+ dataset_df = dataset_df.dropna(axis="columns", how="all")
191
+
192
+ if len(dataset_df) > 0:
193
+ selectable_configs = list(set(dataset_df["config"]))
194
+ selectable_configs.sort(key=lambda name: name.lower())
195
+
196
+ if "-unspecified-" in selectable_configs:
197
+ selectable_configs.remove("-unspecified-")
198
+ selectable_configs = ["-unspecified-"] + selectable_configs
199
+
200
+ if dataset != "-any-":
201
+ config = st.sidebar.selectbox(
202
+ "Config",
203
+ selectable_configs,
204
+ index=selectable_configs.index(default_config) if default_config in selectable_configs else 0,
205
+ help="Filter the results on the current leaderboard by the dataset config. Self-reported results might not report the config, which is why \"-unspecified-\" is an option."
206
+ )
207
+ dataset_df = dataset_df[dataset_df.config == config]
208
+
209
+ selectable_splits = list(set(dataset_df["split"]))
210
+ selectable_splits.sort(key=lambda name: name.lower())
211
+
212
+ if "-unspecified-" in selectable_splits:
213
+ selectable_splits.remove("-unspecified-")
214
+ selectable_splits = ["-unspecified-"] + selectable_splits
215
+
216
+ split = st.sidebar.selectbox(
217
+ "Split",
218
+ selectable_splits,
219
+ index=selectable_splits.index(default_split) if default_split in selectable_splits else 0,
220
+ help="Filter the results on the current leaderboard by the dataset split. Self-reported results might not report the split, which is why \"-unspecified-\" is an option."
221
+ )
222
+
223
+ current_query_params.update({"config": [config], "split": [split]})
224
+
225
+ st.experimental_set_query_params(**current_query_params)
226
+
227
+ dataset_df = dataset_df[dataset_df.split == split]
228
+
229
+ not_selectable_metrics = ["model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"]
230
+ selectable_metrics = list(filter(lambda column: column not in not_selectable_metrics, dataset_df.columns))
231
+
232
+ dataset_df = dataset_df.filter(["model_id"] + (["dataset"] if dataset == "-any-" else []) + selectable_metrics)
233
+ dataset_df = dataset_df.dropna(thresh=2) # Want at least two non-na values (one for model_id and one for a metric).
234
+
235
+ sorting_metric = st.sidebar.radio(
236
+ "Sorting Metric",
237
+ selectable_metrics,
238
+ index=selectable_metrics.index(default_metric) if default_metric in selectable_metrics else 0,
239
+ help="Select the metric to sort the leaderboard by. Click on the metric name in the leaderboard to reverse the sorting order."
240
+ )
241
+
242
+ current_query_params.update({"metric": [sorting_metric]})
243
+
244
+ st.experimental_set_query_params(**current_query_params)
245
+
246
+ st.markdown(
247
+ "Please click on the model's name to be redirected to its model card."
248
+ )
249
+
250
+ st.markdown(
251
+ "Want to beat the leaderboard? Don't see your model here? Simply request an automatic evaluation [here](https://huggingface.co/spaces/autoevaluate/model-evaluator)."
252
+ )
253
+
254
+ st.markdown(
255
+ "If you do not see your self-reported results here, ensure that your results are in the expected range for all metrics. E.g., accuracy is 0-1, not 0-100."
256
+ )
257
+
258
+ if dataset == "-any-":
259
+ st.info(
260
+ "Note: you haven't chosen a dataset, so the leaderboard is showing the best scoring model for a random sample of the datasets available."
261
+ )
262
+
263
+ # Make the default metric appear right after model names and dataset names
264
+ cols = dataset_df.columns.tolist()
265
+ cols.remove(sorting_metric)
266
+ sorting_metric_index = 1 if dataset != "-any-" else 2
267
+ cols = cols[:sorting_metric_index] + [sorting_metric] + cols[sorting_metric_index:]
268
+ dataset_df = dataset_df[cols]
269
+
270
+ # Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
271
+ dataset_df = dataset_df.sort_values(by=cols[sorting_metric_index:], ascending=[metric in ascending_metrics for metric in cols[sorting_metric_index:]])
272
+ dataset_df = dataset_df.replace(np.nan, '-')
273
+
274
+ # If dataset is "-any-", only show the best model for a random sample of 100 datasets.
275
+ # Otherwise The leaderboard is way too long and doesn't give the users a feel for all of
276
+ # the datasets available for a task.
277
+ if dataset == "-any-":
278
+ filtered_dataset_df_dict = {column: [] for column in dataset_df.columns}
279
+ seen_datasets = set()
280
+ for _, row in dataset_df.iterrows():
281
+ if row["dataset"] not in seen_datasets:
282
+ for column in dataset_df.columns:
283
+ filtered_dataset_df_dict[column].append(row[column])
284
+ seen_datasets.add(row["dataset"])
285
+ dataset_df = pd.DataFrame(filtered_dataset_df_dict)
286
+ dataset_df = dataset_df.sample(min(100, len(dataset_df)))
287
+
288
+ # Make the leaderboard
289
+ gb = GridOptionsBuilder.from_dataframe(dataset_df)
290
+ gb.configure_default_column(sortable=False)
291
+ gb.configure_column(
292
+ "model_id",
293
+ cellRenderer=JsCode('''function(params) {return '<a target="_blank" href="https://huggingface.co/'+params.value+'">'+params.value+'</a>'}'''),
294
+ )
295
+ if dataset == "-any-":
296
+ gb.configure_column(
297
+ "dataset",
298
+ cellRenderer=JsCode('''function(params) {return '<a target="_blank" href="https://huggingface.co/spaces/autoevaluate/leaderboards?dataset='+params.value+'">'+params.value+'</a>'}'''),
299
+ )
300
+ for name in selectable_metrics:
301
+ gb.configure_column(name, type=["numericColumn","numberColumnFilter","customNumericFormat"], precision=4, aggFunc='sum')
302
+
303
+ gb.configure_column(
304
+ sorting_metric,
305
+ sortable=True,
306
+ cellStyle=JsCode('''function(params) { return {'backgroundColor': '#FFD21E'}}''')
307
+ )
308
+
309
+ go = gb.build()
310
+ fit_columns = len(dataset_df.columns) < 10
311
+ AgGrid(dataset_df, gridOptions=go, height=28*len(dataset_df) + (35 if fit_columns else 41), allow_unsafe_jscode=True, fit_columns_on_grid_load=fit_columns, enable_enterprise_modules=False)
312
+
313
+ else:
314
+ st.markdown(
315
+ "No " + ("verified" if only_verified_results else "unverified") + " results to display. Try toggling the verified results filter."
316
+ )
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pandas==1.5.1
2
+ huggingface_hub==0.11.1
3
+ numpy==1.23.4
4
+ streamlit-aggrid==0.3.3
utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ascending_metrics = {
2
+ "wer",
3
+ "cer",
4
+ "loss",
5
+ "mae",
6
+ "mahalanobis",
7
+ "mse",
8
+ "perplexity",
9
+ "ter",
10
+ }
11
+
12
+ metric_ranges = {
13
+ "accuracy": (0,1),
14
+ "precision": (0,1),
15
+ "recall": (0,1),
16
+ "macro f1": (0,1),
17
+ "micro f1": (0,1),
18
+ "pearson": (-1, 1),
19
+ "matthews_correlation": (-1, 1),
20
+ "spearmanr": (-1, 1),
21
+ "google_bleu": (0, 1),
22
+ "precision@10": (0, 1),
23
+ "mae": (0, 1),
24
+ "mauve": (0, 1),
25
+ "frontier_integral": (0, 1),
26
+ "mean_iou": (0, 1),
27
+ "mean_accuracy": (0, 1),
28
+ "overall_accuracy": (0, 1),
29
+ "meteor": (0, 1),
30
+ "mse": (0, 1),
31
+ "perplexity": (0, float("inf")),
32
+ "rogue1": (0, 1),
33
+ "rogue2": (0, 1),
34
+ "sari": (0, 100),
35
+ }