pminervini commited on
Commit
942ae30
β€’
1 Parent(s): b97296b
app.py CHANGED
@@ -205,7 +205,7 @@ with demo:
205
  value=original_df[COLS] if original_df.empty is False else original_df,
206
  headers=COLS,
207
  datatype=TYPES,
208
- visible=False,
209
  )
210
  search_bar.submit(
211
  update_table,
@@ -271,7 +271,7 @@ with demo:
271
  value=finished_eval_queue_df,
272
  headers=EVAL_COLS,
273
  datatype=EVAL_TYPES,
274
- row_count=5,
275
  )
276
  with gr.Accordion(
277
  f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
@@ -282,7 +282,7 @@ with demo:
282
  value=running_eval_queue_df,
283
  headers=EVAL_COLS,
284
  datatype=EVAL_TYPES,
285
- row_count=5,
286
  )
287
 
288
  with gr.Accordion(
@@ -294,7 +294,7 @@ with demo:
294
  value=pending_eval_queue_df,
295
  headers=EVAL_COLS,
296
  datatype=EVAL_TYPES,
297
- row_count=5,
298
  )
299
  with gr.Row():
300
  gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
 
205
  value=original_df[COLS] if original_df.empty is False else original_df,
206
  headers=COLS,
207
  datatype=TYPES,
208
+ visible=False
209
  )
210
  search_bar.submit(
211
  update_table,
 
271
  value=finished_eval_queue_df,
272
  headers=EVAL_COLS,
273
  datatype=EVAL_TYPES,
274
+ row_count=5
275
  )
276
  with gr.Accordion(
277
  f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
 
282
  value=running_eval_queue_df,
283
  headers=EVAL_COLS,
284
  datatype=EVAL_TYPES,
285
+ row_count=5
286
  )
287
 
288
  with gr.Accordion(
 
294
  value=pending_eval_queue_df,
295
  headers=EVAL_COLS,
296
  datatype=EVAL_TYPES,
297
+ row_count=5
298
  )
299
  with gr.Row():
300
  gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
src/display/css_html_js.py CHANGED
@@ -1,5 +1,9 @@
1
  custom_css = """
2
 
 
 
 
 
3
  .markdown-text {
4
  font-size: 16px !important;
5
  }
 
1
  custom_css = """
2
 
3
+ .gradio-container {
4
+ max-width: 100%!important;
5
+ }
6
+
7
  .markdown-text {
8
  font-size: 16px !important;
9
  }
src/leaderboard/read_evals.py CHANGED
@@ -56,8 +56,7 @@ class EvalResult:
56
  result_key = f"{org}_{model}_{precision.value.name}"
57
  full_model = "/".join(org_and_model)
58
 
59
- still_on_hub, error, model_config = \
60
- is_model_on_hub(full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False)
61
  architecture = "?"
62
  if model_config is not None:
63
  architectures = getattr(model_config, "architectures", None)
 
56
  result_key = f"{org}_{model}_{precision.value.name}"
57
  full_model = "/".join(org_and_model)
58
 
59
+ still_on_hub, error, model_config = is_model_on_hub(full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False)
 
60
  architecture = "?"
61
  if model_config is not None:
62
  architectures = getattr(model_config, "architectures", None)