Spaces:
Runtime error
Runtime error
Tristan Thrush
commited on
Commit
•
09881d4
1
Parent(s):
c84ed95
fixed rounding bug, improved messages above the leaderboard
Browse files
app.py
CHANGED
@@ -15,6 +15,9 @@ def make_clickable(model_name):
|
|
15 |
def make_bold(value):
|
16 |
return f'<b>{value}</b>'
|
17 |
|
|
|
|
|
|
|
18 |
|
19 |
def get_model_ids():
|
20 |
api = HfApi()
|
@@ -72,7 +75,7 @@ def parse_metrics_rows(meta):
|
|
72 |
@st.cache(ttl=86400)
|
73 |
def get_data():
|
74 |
data = []
|
75 |
-
model_ids = get_model_ids()
|
76 |
for model_id in tqdm(model_ids):
|
77 |
meta = get_metadata(model_id)
|
78 |
if meta is None:
|
@@ -117,7 +120,11 @@ dataset_df = dataset_df.sort_values(by=metric, ascending=metric in ascending_met
|
|
117 |
dataset_df = dataset_df.replace(np.nan, '-')
|
118 |
|
119 |
st.markdown(
|
120 |
-
"Please click on the model's name to be redirected to its model card
|
|
|
|
|
|
|
|
|
121 |
)
|
122 |
|
123 |
# display the model ranks
|
@@ -127,6 +134,8 @@ dataset_df.index += 1
|
|
127 |
# turn the model ids into clickable links
|
128 |
dataset_df["model_id"] = dataset_df["model_id"].apply(make_clickable)
|
129 |
dataset_df[metric] = dataset_df[metric].apply(make_bold)
|
|
|
|
|
130 |
|
131 |
# Make the selected metric appear right after model names
|
132 |
cols = dataset_df.columns.tolist()
|
@@ -139,13 +148,9 @@ def highlight_cols(s):
|
|
139 |
huggingface_yellow = "#FFD21E"
|
140 |
return "background-color: %s" % huggingface_yellow
|
141 |
|
142 |
-
dataset_df = dataset_df.style.applymap(highlight_cols, subset=pd.IndexSlice[
|
143 |
|
144 |
# Turn table into html
|
145 |
table_html = dataset_df.to_html(escape=False)
|
146 |
table_html = table_html.replace("<th>", '<th align="left">') # left-align the headers
|
147 |
st.write(table_html, unsafe_allow_html=True)
|
148 |
-
|
149 |
-
st.markdown(
|
150 |
-
"Want to beat the Leaderboard? Don't see your model here? Simply add the `hf-leaderboards` tag to your model card alongside your evaluation metrics. See [this commit](https://huggingface.co/facebook/wav2vec2-base-960h/commit/88338305603a4d8db25aca96e669beb5f7dc65cb) as an example."
|
151 |
-
)
|
|
|
15 |
def make_bold(value):
|
16 |
return f'<b>{value}</b>'
|
17 |
|
18 |
+
def make_string(value):
|
19 |
+
return str(value)
|
20 |
+
|
21 |
|
22 |
def get_model_ids():
|
23 |
api = HfApi()
|
|
|
75 |
@st.cache(ttl=86400)
|
76 |
def get_data():
|
77 |
data = []
|
78 |
+
model_ids = get_model_ids()[:10]
|
79 |
for model_id in tqdm(model_ids):
|
80 |
meta = get_metadata(model_id)
|
81 |
if meta is None:
|
|
|
120 |
dataset_df = dataset_df.replace(np.nan, '-')
|
121 |
|
122 |
st.markdown(
|
123 |
+
"Please click on the model's name to be redirected to its model card."
|
124 |
+
)
|
125 |
+
|
126 |
+
st.markdown(
|
127 |
+
"Want to beat the leaderboard? Don't see your model here? Simply request an automatic evaluation [here](https://huggingface.co/spaces/autoevaluate/autoevaluate)."
|
128 |
)
|
129 |
|
130 |
# display the model ranks
|
|
|
134 |
# turn the model ids into clickable links
|
135 |
dataset_df["model_id"] = dataset_df["model_id"].apply(make_clickable)
|
136 |
dataset_df[metric] = dataset_df[metric].apply(make_bold)
|
137 |
+
for other_metric in selectable_metrics:
|
138 |
+
dataset_df[other_metric] = dataset_df[other_metric].apply(make_string)
|
139 |
|
140 |
# Make the selected metric appear right after model names
|
141 |
cols = dataset_df.columns.tolist()
|
|
|
148 |
huggingface_yellow = "#FFD21E"
|
149 |
return "background-color: %s" % huggingface_yellow
|
150 |
|
151 |
+
dataset_df = dataset_df.style.applymap(highlight_cols, subset=pd.IndexSlice[metric])
|
152 |
|
153 |
# Turn table into html
|
154 |
table_html = dataset_df.to_html(escape=False)
|
155 |
table_html = table_html.replace("<th>", '<th align="left">') # left-align the headers
|
156 |
st.write(table_html, unsafe_allow_html=True)
|
|
|
|
|
|
|
|