Tristan Thrush commited on
Commit
82c959c
1 Parent(s): 5fce9cb

removed requirement to be from autoeval org

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -158,7 +158,8 @@ st.experimental_set_query_params(**{"dataset": [dataset]})
158
  dataset_df = dataframe[dataframe.dataset == dataset]
159
  dataset_df = dataset_df.dropna(axis="columns", how="all")
160
 
161
- if "config" in dataset_df.columns:
 
162
  selectable_configs = list(set(dataset_df["config"]))
163
  config = st.sidebar.selectbox(
164
  "Config",
@@ -166,7 +167,6 @@ if "config" in dataset_df.columns:
166
  )
167
  dataset_df = dataset_df[dataset_df.config == config]
168
 
169
- if "split" in dataset_df.columns:
170
  selectable_splits = list(set(dataset_df["split"]))
171
  split = st.sidebar.selectbox(
172
  "Split",
@@ -174,33 +174,33 @@ if "split" in dataset_df.columns:
174
  )
175
  dataset_df = dataset_df[dataset_df.split == split]
176
 
177
- selectable_metrics = list(filter(lambda column: column not in ("model_id", "dataset", "split", "config"), dataset_df.columns))
178
 
179
- dataset_df = dataset_df.filter(["model_id"] + selectable_metrics)
180
- dataset_df = dataset_df.dropna(thresh=2) # Want at least two non-na values (one for model_id and one for a metric).
181
 
182
- sorting_metric = st.sidebar.radio(
183
- "Sorting Metric",
184
- selectable_metrics,
185
- )
186
 
187
- st.markdown(
188
- "Please click on the model's name to be redirected to its model card."
189
- )
190
 
191
- st.markdown(
192
- "Want to beat the leaderboard? Don't see your model here? Simply request an automatic evaluation [here](https://huggingface.co/spaces/autoevaluate/autoevaluate)."
193
- )
194
 
195
- # Make the default metric appear right after model names
196
- cols = dataset_df.columns.tolist()
197
- cols.remove(sorting_metric)
198
- cols = cols[:1] + [sorting_metric] + cols[1:]
199
- dataset_df = dataset_df[cols]
200
 
201
- # Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
202
- dataset_df = dataset_df.sort_values(by=cols[1:], ascending=[metric in ascending_metrics for metric in cols[1:]])
203
- dataset_df = dataset_df.replace(np.nan, '-')
204
 
205
  # Make the leaderboard
206
  gb = GridOptionsBuilder.from_dataframe(dataset_df)
 
158
  dataset_df = dataframe[dataframe.dataset == dataset]
159
  dataset_df = dataset_df.dropna(axis="columns", how="all")
160
 
161
+ if len(dataset_df) > 0:
162
+
163
  selectable_configs = list(set(dataset_df["config"]))
164
  config = st.sidebar.selectbox(
165
  "Config",
 
167
  )
168
  dataset_df = dataset_df[dataset_df.config == config]
169
 
 
170
  selectable_splits = list(set(dataset_df["split"]))
171
  split = st.sidebar.selectbox(
172
  "Split",
 
174
  )
175
  dataset_df = dataset_df[dataset_df.split == split]
176
 
177
+ selectable_metrics = list(filter(lambda column: column not in ("model_id", "dataset", "split", "config"), dataset_df.columns))
178
 
179
+ dataset_df = dataset_df.filter(["model_id"] + selectable_metrics)
180
+ dataset_df = dataset_df.dropna(thresh=2) # Want at least two non-na values (one for model_id and one for a metric).
181
 
182
+ sorting_metric = st.sidebar.radio(
183
+ "Sorting Metric",
184
+ selectable_metrics,
185
+ )
186
 
187
+ st.markdown(
188
+ "Please click on the model's name to be redirected to its model card."
189
+ )
190
 
191
+ st.markdown(
192
+ "Want to beat the leaderboard? Don't see your model here? Simply request an automatic evaluation [here](https://huggingface.co/spaces/autoevaluate/autoevaluate)."
193
+ )
194
 
195
+ # Make the default metric appear right after model names
196
+ cols = dataset_df.columns.tolist()
197
+ cols.remove(sorting_metric)
198
+ cols = cols[:1] + [sorting_metric] + cols[1:]
199
+ dataset_df = dataset_df[cols]
200
 
201
+ # Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
202
+ dataset_df = dataset_df.sort_values(by=cols[1:], ascending=[metric in ascending_metrics for metric in cols[1:]])
203
+ dataset_df = dataset_df.replace(np.nan, '-')
204
 
205
  # Make the leaderboard
206
  gb = GridOptionsBuilder.from_dataframe(dataset_df)