juncliu commited on
Commit
a47646c
Β·
1 Parent(s): c870669

add model_link

Browse files
app.py CHANGED
@@ -110,17 +110,23 @@ def init_leaderboard(ori_dataframe, model_info_df):
110
  if ori_dataframe is None or ori_dataframe.empty:
111
  raise ValueError("Leaderboard DataFrame is empty or None.")
112
  model_info_col_list = [c.name for c in fields(ModelInfoColumn) if c.displayed_by_default if c.name not in ['#Params (B)', 'available_on_hub', 'hub', 'Model sha','Hub License']]
 
113
  default_selection_list = list(ori_dataframe.columns) + model_info_col_list
114
- print('default_selection_list: ', default_selection_list)
115
  # ipdb.set_trace()
116
  # default_selection_list = [col for col in default_selection_list if col not in ['#Params (B)', 'available_on_hub', 'hub', 'Model sha','Hub License']]
117
  merged_df = get_merged_df(ori_dataframe, model_info_df)
118
  new_cols = ['T'] + [col for col in merged_df.columns if col != 'T']
119
  merged_df = merged_df[new_cols]
120
  print('Merged df: ', merged_df)
 
 
 
 
 
121
  return Leaderboard(
122
  value=merged_df,
123
- # datatype=[c.type for c in fields(ModelInfoColumn)],
124
  select_columns=SelectColumns(
125
  default_selection=default_selection_list,
126
  # default_selection=[c.name for c in fields(ModelInfoColumn) if
@@ -183,96 +189,6 @@ with demo:
183
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=4):
184
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
185
 
186
- # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=5):
187
- # with gr.Column():
188
- # with gr.Row():
189
- # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
190
- #
191
- # with gr.Column():
192
- # with gr.Accordion(
193
- # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
194
- # open=False,
195
- # ):
196
- # with gr.Row():
197
- # finished_eval_table = gr.components.Dataframe(
198
- # value=finished_eval_queue_df,
199
- # headers=EVAL_COLS,
200
- # datatype=EVAL_TYPES,
201
- # row_count=5,
202
- # )
203
- # with gr.Accordion(
204
- # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
205
- # open=False,
206
- # ):
207
- # with gr.Row():
208
- # running_eval_table = gr.components.Dataframe(
209
- # value=running_eval_queue_df,
210
- # headers=EVAL_COLS,
211
- # datatype=EVAL_TYPES,
212
- # row_count=5,
213
- # )
214
- #
215
- # with gr.Accordion(
216
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
217
- # open=False,
218
- # ):
219
- # with gr.Row():
220
- # pending_eval_table = gr.components.Dataframe(
221
- # value=pending_eval_queue_df,
222
- # headers=EVAL_COLS,
223
- # datatype=EVAL_TYPES,
224
- # row_count=5,
225
- # )
226
- # with gr.Row():
227
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your model outputs !", elem_classes="markdown-text")
228
- # gr.Markdown(
229
- # "Send your model outputs for all the models using the ContextualBench code and email them to us at xnguyen@salesforce.com ",
230
- # elem_classes="markdown-text")
231
-
232
- # with gr.Row():
233
- # with gr.Column():
234
- # model_name_textbox = gr.Textbox(label="Model name")
235
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
236
- # model_type = gr.Dropdown(
237
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
238
- # label="Model type",
239
- # multiselect=False,
240
- # value=None,
241
- # interactive=True,
242
- # )
243
-
244
- # with gr.Column():
245
- # precision = gr.Dropdown(
246
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
247
- # label="Precision",
248
- # multiselect=False,
249
- # value="float16",
250
- # interactive=True,
251
- # )
252
- # weight_type = gr.Dropdown(
253
- # choices=[i.value.name for i in WeightType],
254
- # label="Weights type",
255
- # multiselect=False,
256
- # value="Original",
257
- # interactive=True,
258
- # )
259
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
260
-
261
- # submit_button = gr.Button("Submit Eval")
262
- # submission_result = gr.Markdown()
263
- # submit_button.click(
264
- # add_new_eval,
265
- # [
266
- # model_name_textbox,
267
- # base_model_name_textbox,
268
- # revision_name_textbox,
269
- # precision,
270
- # weight_type,
271
- # model_type,
272
- # ],
273
- # submission_result,
274
- # )
275
-
276
  with gr.Row():
277
  with gr.Accordion("πŸ“™ Citation", open=False):
278
  citation_button = gr.Textbox(
 
110
  if ori_dataframe is None or ori_dataframe.empty:
111
  raise ValueError("Leaderboard DataFrame is empty or None.")
112
  model_info_col_list = [c.name for c in fields(ModelInfoColumn) if c.displayed_by_default if c.name not in ['#Params (B)', 'available_on_hub', 'hub', 'Model sha','Hub License']]
113
+ col2type_dict = {c.name: c.type for c in fields(ModelInfoColumn)}
114
  default_selection_list = list(ori_dataframe.columns) + model_info_col_list
115
+ # print('default_selection_list: ', default_selection_list)
116
  # ipdb.set_trace()
117
  # default_selection_list = [col for col in default_selection_list if col not in ['#Params (B)', 'available_on_hub', 'hub', 'Model sha','Hub License']]
118
  merged_df = get_merged_df(ori_dataframe, model_info_df)
119
  new_cols = ['T'] + [col for col in merged_df.columns if col != 'T']
120
  merged_df = merged_df[new_cols]
121
  print('Merged df: ', merged_df)
122
+ # get the data type
123
+ datatype_list = [col2type_dict[col] if col in col2type_dict else 'number' for col in merged_df.columns]
124
+ # print('datatype_list: ', datatype_list)
125
+ # print('merged_df.column: ', merged_df.columns)
126
+ # ipdb.set_trace()
127
  return Leaderboard(
128
  value=merged_df,
129
+ datatype=[c.type for c in fields(ModelInfoColumn)],
130
  select_columns=SelectColumns(
131
  default_selection=default_selection_list,
132
  # default_selection=[c.name for c in fields(ModelInfoColumn) if
 
189
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=4):
190
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  with gr.Row():
193
  with gr.Accordion("πŸ“™ Citation", open=False):
194
  citation_button = gr.Textbox(
results/Chronos_small/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Chronos_small",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Chronos_small",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/amazon/chronos-t5-small"
6
  }
results/Moirai_base/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Moirai_base",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Moirai_base",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/Salesforce/moirai-1.1-R-base"
6
  }
results/Moirai_large/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Moirai_large",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Moirai_large",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/Salesforce/moirai-1.1-R-large"
6
  }
results/Moirai_small/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Moirai_small",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Moirai_small",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/Salesforce/moirai-1.1-R-large"
6
  }
results/chronos_base/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Chronos_base",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Chronos_base",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/amazon/chronos-t5-base"
6
  }
results/chronos_large/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "Chronos_large",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "Chronos_large",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/amazon/chronos-t5-large"
6
  }
results/timer_small/config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "model": "timer_small",
3
+ "model_type": "pretrained",
4
+ "model_dtype": "float32"
5
+ }
results/timesfm/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "model": "TimesFM",
3
  "model_type": "pretrained",
4
- "model_dtype": "float32"
 
5
  }
 
1
  {
2
  "model": "TimesFM",
3
  "model_type": "pretrained",
4
+ "model_dtype": "float32",
5
+ "model_link": "https://huggingface.co/google/timesfm-1.0-200m"
6
  }
src/display/utils.py CHANGED
@@ -27,14 +27,14 @@ model_info_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "
27
  model_info_dict.append(["model", ColumnContent, ColumnContent("model", "markdown", True, never_hidden=True)])
28
  # Model information
29
  model_info_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False, True)])
30
- model_info_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
31
- model_info_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
32
  model_info_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False, True)])
33
  model_info_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False, True)])
34
  model_info_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False, True)])
35
  model_info_dict.append(["likes", ColumnContent, ColumnContent("Hub ❀️", "number", False, True)])
36
  model_info_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
37
- model_info_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
38
 
39
  # We use make dataclass to dynamically fill the scores from Tasks
40
  ModelInfoColumn = make_dataclass("ModelInfoColumn", model_info_dict, frozen=True)
 
27
  model_info_dict.append(["model", ColumnContent, ColumnContent("model", "markdown", True, never_hidden=True)])
28
  # Model information
29
  model_info_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False, True)])
30
+ # model_info_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
31
+ # model_info_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
32
  model_info_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False, True)])
33
  model_info_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False, True)])
34
  model_info_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False, True)])
35
  model_info_dict.append(["likes", ColumnContent, ColumnContent("Hub ❀️", "number", False, True)])
36
  model_info_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
37
+ # model_info_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
38
 
39
  # We use make dataclass to dynamically fill the scores from Tasks
40
  ModelInfoColumn = make_dataclass("ModelInfoColumn", model_info_dict, frozen=True)
src/leaderboard/read_evals.py CHANGED
@@ -42,7 +42,7 @@ class ModelConfig:
42
  def to_dict(self):
43
  """Converts the model info to a dict compatible with our dataframe display"""
44
  data_dict = {
45
- "model": self.model,
46
  'model_w_link': model_hyperlink(self.model_link, self.model),
47
  ModelInfoColumn.precision.name: self.precision.value.name,
48
  ModelInfoColumn.model_type.name: self.model_type.value.name,
 
42
  def to_dict(self):
43
  """Converts the model info to a dict compatible with our dataframe display"""
44
  data_dict = {
45
+ ModelInfoColumn.model.name: self.model,
46
  'model_w_link': model_hyperlink(self.model_link, self.model),
47
  ModelInfoColumn.precision.name: self.precision.value.name,
48
  ModelInfoColumn.model_type.name: self.model_type.value.name,