Tristan Thrush commited on
Commit
2d4b120
β€’
0 Parent(s):

first commit

Browse files
Files changed (3) hide show
  1. README.md +13 -0
  2. app.py +124 -0
  3. requirements.txt +4 -0
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: πŸ€— Leaderboards
3
+ emoji: πŸ“ˆ
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: streamlit
7
+ app_file: app.py
8
+ pinned: false
9
+ license: apache-2.0
10
+ ---
11
+
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ from tqdm.auto import tqdm
4
+ import streamlit as st
5
+ from huggingface_hub import HfApi, hf_hub_download
6
+ from huggingface_hub.repocard import metadata_load
7
+
8
+
9
+ def make_clickable(model_name):
10
+ link = "https://huggingface.co/" + model_name
11
+ return f'<a target="_blank" href="{link}">{model_name}</a>'
12
+
13
+
14
+ def get_model_ids():
15
+ api = HfApi()
16
+ # TODO: switch to hf-leaderboards for the final version.
17
+ models = api.list_models(filter="hf-asr-leaderboard")
18
+ model_ids = [x.modelId for x in models]
19
+ return model_ids
20
+
21
+
22
+ def get_metadata(model_id):
23
+ try:
24
+ readme_path = hf_hub_download(model_id, filename="README.md")
25
+ return metadata_load(readme_path)
26
+ except requests.exceptions.HTTPError:
27
+ # 404 README.md not found
28
+ return None
29
+
30
+
31
+ def parse_metric_value(value):
32
+ if isinstance(value, str):
33
+ "".join(value.split("%"))
34
+ try:
35
+ value = float(value)
36
+ except: # noqa: E722
37
+ value = None
38
+ elif isinstance(value, list):
39
+ if len(value) > 0:
40
+ value = value[0]
41
+ else:
42
+ value = None
43
+ value = round(value, 2) if value is not None else None
44
+ return value
45
+
46
+
47
+ def parse_metrics_rows(meta):
48
+ if "model-index" not in meta:
49
+ return None
50
+ for result in meta["model-index"][0]["results"]:
51
+ if "dataset" not in result or "metrics" not in result:
52
+ continue
53
+ dataset = result["dataset"]["type"]
54
+ if "args" not in result["dataset"]:
55
+ continue
56
+ row = {"dataset": dataset}
57
+ for metric in result["metrics"]:
58
+ type = metric["type"].lower().strip()
59
+ value = parse_metric_value(metric["value"])
60
+ if value is None:
61
+ continue
62
+ if type not in row or value < row[type]:
63
+ # overwrite the metric if the new value is lower (e.g. with LM)
64
+ row[type] = value
65
+ yield row
66
+
67
+
68
+ @st.cache(ttl=600)
69
+ def get_data():
70
+ data = []
71
+ model_ids = get_model_ids()
72
+ for model_id in tqdm(model_ids):
73
+ meta = get_metadata(model_id)
74
+ if meta is None:
75
+ continue
76
+ for row in parse_metrics_rows(meta):
77
+ if row is None:
78
+ continue
79
+ row["model_id"] = model_id
80
+ data.append(row)
81
+ return pd.DataFrame.from_records(data)
82
+
83
+
84
+ dataframe = get_data()
85
+ selectable_datasets = list(set(dataframe.dataset.tolist()))
86
+
87
+ st.markdown("# πŸ€— Leaderboards")
88
+
89
+ dataset = st.sidebar.selectbox(
90
+ "Dataset",
91
+ selectable_datasets,
92
+ index=selectable_datasets.index("common_voice"),
93
+ )
94
+
95
+ dataset_df = dataframe[dataframe.dataset == dataset]
96
+ dataset_df = dataset_df.dropna(axis="columns", how="all")
97
+
98
+ metric = st.sidebar.selectbox(
99
+ "Metric",
100
+ list(filter(lambda column: column not in ("model_id", "dataset"), dataset_df.columns)),
101
+ )
102
+
103
+ dataset_df = dataset_df.filter(["model_id", metric])
104
+ dataset_df = dataset_df.dropna()
105
+ dataset_df = dataset_df.sort_values(by=metric, ascending=False)
106
+
107
+ st.markdown(
108
+ "Please click on the model's name to be redirected to its model card which includes documentation and examples on how to use it."
109
+ )
110
+
111
+ # display the model ranks
112
+ dataset_df = dataset_df.reset_index(drop=True)
113
+ dataset_df.index += 1
114
+
115
+ # turn the model ids into clickable links
116
+ dataset_df["model_id"] = dataset_df["model_id"].apply(make_clickable)
117
+
118
+ table_html = dataset_df.to_html(escape=False)
119
+ table_html = table_html.replace("<th>", '<th align="left">') # left-align the headers
120
+ st.write(table_html, unsafe_allow_html=True)
121
+
122
+ st.markdown(
123
+ "Want to beat the Leaderboard? Don't see your model here? Simply add the `hf-leaderboards` tag to your model card alongside your evaluation metrics. See [this commit](https://huggingface.co/facebook/wav2vec2-base-960h/commit/88338305603a4d8db25aca96e669beb5f7dc65cb) as an example."
124
+ )
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ pandas
2
+ tqdm
3
+ streamlit
4
+ huggingface_hub