Clémentine commited on
Commit
fc1e99b
1 Parent(s): df66f6e

fix col width

Browse files
app.py CHANGED
@@ -73,14 +73,13 @@ leaderboard_df = original_df.copy()
73
 
74
 
75
  # Basics
76
- def change_tab(query_param: str):
77
- query_param = query_param.replace("'", '"')
78
- query_param = json.loads(query_param)
79
-
80
- if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "evaluation":
81
- return gr.Tabs.update(selected=1)
82
- else:
83
- return gr.Tabs.update(selected=0)
84
 
85
 
86
  # Searching and filtering
@@ -192,28 +191,28 @@ with demo:
192
  value=False, label="Show gated/private/deleted models", interactive=True
193
  )
194
  with gr.Column(min_width=320):
195
- with gr.Box(elem_id="box-filter"):
196
- filter_columns_type = gr.CheckboxGroup(
197
- label="Model types",
198
- choices=[t.to_str() for t in ModelType],
199
- value=[t.to_str() for t in ModelType],
200
- interactive=True,
201
- elem_id="filter-columns-type",
202
- )
203
- filter_columns_precision = gr.CheckboxGroup(
204
- label="Precision",
205
- choices=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
206
- value=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
207
- interactive=True,
208
- elem_id="filter-columns-precision",
209
- )
210
- filter_columns_size = gr.CheckboxGroup(
211
- label="Model sizes (in billions of parameters)",
212
- choices=list(NUMERIC_INTERVALS.keys()),
213
- value=list(NUMERIC_INTERVALS.keys()),
214
- interactive=True,
215
- elem_id="filter-columns-size",
216
- )
217
 
218
  leaderboard_table = gr.components.Dataframe(
219
  value=leaderboard_df[
@@ -223,10 +222,10 @@ with demo:
223
  ],
224
  headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
225
  datatype=TYPES,
226
- max_rows=None,
227
  elem_id="leaderboard-table",
228
  interactive=False,
229
  visible=True,
 
230
  )
231
 
232
  # Dummy leaderboard for handling the case when the user uses backspace key
@@ -234,7 +233,6 @@ with demo:
234
  value=original_df[COLS],
235
  headers=COLS,
236
  datatype=TYPES,
237
- max_rows=None,
238
  visible=False,
239
  )
240
  search_bar.submit(
@@ -358,7 +356,7 @@ with demo:
358
  value=finished_eval_queue_df,
359
  headers=EVAL_COLS,
360
  datatype=EVAL_TYPES,
361
- max_rows=5,
362
  )
363
  with gr.Accordion(
364
  f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
@@ -369,7 +367,7 @@ with demo:
369
  value=running_eval_queue_df,
370
  headers=EVAL_COLS,
371
  datatype=EVAL_TYPES,
372
- max_rows=5,
373
  )
374
 
375
  with gr.Accordion(
@@ -381,7 +379,7 @@ with demo:
381
  value=pending_eval_queue_df,
382
  headers=EVAL_COLS,
383
  datatype=EVAL_TYPES,
384
- max_rows=5,
385
  )
386
  with gr.Row():
387
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
@@ -442,15 +440,15 @@ with demo:
442
  show_copy_button=True,
443
  )
444
 
445
- dummy = gr.Textbox(visible=False)
446
- demo.load(
447
- change_tab,
448
- dummy,
449
- tabs,
450
- _js=get_window_url_params,
451
- )
452
 
453
  scheduler = BackgroundScheduler()
454
  scheduler.add_job(restart_space, "interval", seconds=1800)
455
  scheduler.start()
456
- demo.queue(concurrency_count=40).launch()
 
73
 
74
 
75
  # Basics
76
+ #def change_tab(query_param: str):
77
+ # query_param = query_param.replace("'", '"')
78
+ # query_param = json.loads(query_param)
79
+ # if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "evaluation":
80
+ # return gr.Tabs.update(selected=1)
81
+ # else:
82
+ # return gr.Tabs.update(selected=0)
 
83
 
84
 
85
  # Searching and filtering
 
191
  value=False, label="Show gated/private/deleted models", interactive=True
192
  )
193
  with gr.Column(min_width=320):
194
+ #with gr.Box(elem_id="box-filter"):
195
+ filter_columns_type = gr.CheckboxGroup(
196
+ label="Model types",
197
+ choices=[t.to_str() for t in ModelType],
198
+ value=[t.to_str() for t in ModelType],
199
+ interactive=True,
200
+ elem_id="filter-columns-type",
201
+ )
202
+ filter_columns_precision = gr.CheckboxGroup(
203
+ label="Precision",
204
+ choices=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
205
+ value=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
206
+ interactive=True,
207
+ elem_id="filter-columns-precision",
208
+ )
209
+ filter_columns_size = gr.CheckboxGroup(
210
+ label="Model sizes (in billions of parameters)",
211
+ choices=list(NUMERIC_INTERVALS.keys()),
212
+ value=list(NUMERIC_INTERVALS.keys()),
213
+ interactive=True,
214
+ elem_id="filter-columns-size",
215
+ )
216
 
217
  leaderboard_table = gr.components.Dataframe(
218
  value=leaderboard_df[
 
222
  ],
223
  headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
224
  datatype=TYPES,
 
225
  elem_id="leaderboard-table",
226
  interactive=False,
227
  visible=True,
228
+ column_widths=["2%", "33%"]
229
  )
230
 
231
  # Dummy leaderboard for handling the case when the user uses backspace key
 
233
  value=original_df[COLS],
234
  headers=COLS,
235
  datatype=TYPES,
 
236
  visible=False,
237
  )
238
  search_bar.submit(
 
356
  value=finished_eval_queue_df,
357
  headers=EVAL_COLS,
358
  datatype=EVAL_TYPES,
359
+ row_count=5,
360
  )
361
  with gr.Accordion(
362
  f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
 
367
  value=running_eval_queue_df,
368
  headers=EVAL_COLS,
369
  datatype=EVAL_TYPES,
370
+ row_count=5,
371
  )
372
 
373
  with gr.Accordion(
 
379
  value=pending_eval_queue_df,
380
  headers=EVAL_COLS,
381
  datatype=EVAL_TYPES,
382
+ row_count=5,
383
  )
384
  with gr.Row():
385
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
 
440
  show_copy_button=True,
441
  )
442
 
443
+ #dummy = gr.Textbox(visible=False)
444
+ #demo.load(
445
+ # change_tab,
446
+ # dummy,
447
+ # tabs,
448
+ # js=get_window_url_params,
449
+ #)
450
 
451
  scheduler = BackgroundScheduler()
452
  scheduler.add_job(restart_space, "interval", seconds=1800)
453
  scheduler.start()
454
+ demo.queue().launch()
requirements.txt CHANGED
@@ -3,7 +3,7 @@ aiofiles==23.1.0
3
  aiohttp==3.8.4
4
  aiosignal==1.3.1
5
  altair==4.2.2
6
- anyio==3.6.2
7
  APScheduler==3.10.1
8
  async-timeout==4.0.2
9
  attrs==23.1.0
@@ -20,8 +20,8 @@ filelock==3.11.0
20
  fonttools==4.39.3
21
  frozenlist==1.3.3
22
  fsspec==2023.5.0
23
- gradio==3.43.2
24
- gradio-client==0.5.0
25
  h11==0.14.0
26
  httpcore==0.17.0
27
  httpx==0.24.0
 
3
  aiohttp==3.8.4
4
  aiosignal==1.3.1
5
  altair==4.2.2
6
+ anyio==3.7.1
7
  APScheduler==3.10.1
8
  async-timeout==4.0.2
9
  attrs==23.1.0
 
20
  fonttools==4.39.3
21
  frozenlist==1.3.3
22
  fsspec==2023.5.0
23
+ gradio==4.3.0
24
+ gradio-client==0.7.0
25
  h11==0.14.0
26
  httpcore==0.17.0
27
  httpx==0.24.0
src/leaderboard/read_evals.py CHANGED
@@ -24,6 +24,7 @@ class EvalResult:
24
  precision: str = ""
25
  model_type: ModelType = ModelType.Unknown
26
  weight_type: str = "Original"
 
27
  license: str = "?"
28
  likes: int = 0
29
  num_params: int = 0
 
24
  precision: str = ""
25
  model_type: ModelType = ModelType.Unknown
26
  weight_type: str = "Original"
27
+ architecture: str = "Unknown"
28
  license: str = "?"
29
  likes: int = 0
30
  num_params: int = 0
src/submission/check_validity.py CHANGED
@@ -67,6 +67,8 @@ def get_model_size(model_info: ModelInfo, precision: str):
67
  model_size = size_factor * model_size
68
  return model_size
69
 
 
 
70
 
71
  def user_submission_permission(submission_name, users_to_submission_dates, rate_limit_period, rate_limit_quota):
72
  org_or_user, _ = submission_name.split("/")
 
67
  model_size = size_factor * model_size
68
  return model_size
69
 
70
+ def get_model_arch(model_info: ModelInfo):
71
+ return model_info.config.get("architectures", "Unknown")
72
 
73
  def user_submission_permission(submission_name, users_to_submission_dates, rate_limit_period, rate_limit_quota):
74
  org_or_user, _ = submission_name.split("/")