choco9966 commited on
Commit
b36f71e
1 Parent(s): b8961bc

[Add] git add

Browse files
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: LeaderboardTest
3
- emoji:
4
- colorFrom: gray
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.43.2
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Leaderboard Test
3
+ emoji: 📉
4
+ colorFrom: green
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.27.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ import gradio as gr
6
+ import pandas as pd
7
+ from apscheduler.schedulers.background import BackgroundScheduler
8
+ from huggingface_hub import HfApi
9
+
10
+ from src.assets.css_html_js import custom_css, get_window_url_params
11
+ from src.assets.text_content import (
12
+ CITATION_BUTTON_LABEL,
13
+ CITATION_BUTTON_TEXT,
14
+ EVALUATION_QUEUE_TEXT,
15
+ INTRODUCTION_TEXT,
16
+ LLM_BENCHMARKS_TEXT,
17
+ TITLE,
18
+ )
19
+ from src.display_models.get_model_metadata import DO_NOT_SUBMIT_MODELS, ModelType
20
+ from src.display_models.utils import (
21
+ AutoEvalColumn,
22
+ EvalQueueColumn,
23
+ fields,
24
+ styled_error,
25
+ styled_message,
26
+ styled_warning,
27
+ )
28
+ from src.load_from_hub import get_evaluation_queue_df, get_leaderboard_df, is_model_on_hub, load_all_info_from_hub
29
+ from src.rate_limiting import user_submission_permission
30
+
31
+ pd.set_option("display.precision", 1)
32
+
33
+ # clone / pull the lmeh eval data
34
+ H4_TOKEN = os.environ.get("H4_TOKEN", None)
35
+
36
+ QUEUE_REPO = "BearSean/leaderboard-test-requests"
37
+ RESULTS_REPO = "BearSean/leaderboard-test-results"
38
+
39
+ PRIVATE_QUEUE_REPO = "BearSean/leaderboard-test-requests"
40
+ PRIVATE_RESULTS_REPO = "BearSean/leaderboard-test-results"
41
+
42
+ IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
43
+
44
+ EVAL_REQUESTS_PATH = "eval-queue"
45
+ EVAL_RESULTS_PATH = "eval-results"
46
+
47
+ EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
48
+ EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
49
+
50
+ api = HfApi(token=H4_TOKEN)
51
+
52
+
53
+ def restart_space():
54
+ api.restart_space(repo_id="BearSean/leaderboard-test", token=H4_TOKEN)
55
+
56
+ # Rate limit variables
57
+ RATE_LIMIT_PERIOD = 7
58
+ RATE_LIMIT_QUOTA = 5
59
+
60
+ # Column selection
61
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
62
+ TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
63
+ COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
64
+ TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
65
+
66
+ if not IS_PUBLIC:
67
+ COLS.insert(2, AutoEvalColumn.precision.name)
68
+ TYPES.insert(2, AutoEvalColumn.precision.type)
69
+
70
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
71
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
72
+
73
+ BENCHMARK_COLS = [
74
+ c.name
75
+ for c in [
76
+ AutoEvalColumn.arc,
77
+ AutoEvalColumn.hellaswag,
78
+ AutoEvalColumn.mmlu,
79
+ AutoEvalColumn.truthfulqa,
80
+ ]
81
+ ]
82
+
83
+ ## LOAD INFO FROM HUB
84
+ eval_queue, requested_models, eval_results, users_to_submission_dates = load_all_info_from_hub(
85
+ QUEUE_REPO, RESULTS_REPO, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH
86
+ )
87
+
88
+ if not IS_PUBLIC:
89
+ (eval_queue_private, requested_models_private, eval_results_private, _) = load_all_info_from_hub(
90
+ PRIVATE_QUEUE_REPO,
91
+ PRIVATE_RESULTS_REPO,
92
+ EVAL_REQUESTS_PATH_PRIVATE,
93
+ EVAL_RESULTS_PATH_PRIVATE,
94
+ )
95
+ else:
96
+ eval_queue_private, eval_results_private = None, None
97
+
98
+ original_df = get_leaderboard_df(eval_results, eval_results_private, COLS, BENCHMARK_COLS)
99
+ models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
100
+
101
+ # Commented out because it causes infinite restart loops in local
102
+ # to_be_dumped = f"models = {repr(models)}\n"
103
+
104
+ # with open("models_backlinks.py", "w") as f:
105
+ # f.write(to_be_dumped)
106
+
107
+ # print(to_be_dumped)
108
+
109
+ leaderboard_df = original_df.copy()
110
+ (
111
+ finished_eval_queue_df,
112
+ running_eval_queue_df,
113
+ pending_eval_queue_df,
114
+ ) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
115
+
116
+
117
+ ## INTERACTION FUNCTIONS
118
+ def add_new_eval(
119
+ model: str,
120
+ base_model: str,
121
+ revision: str,
122
+ precision: str,
123
+ private: bool,
124
+ weight_type: str,
125
+ model_type: str,
126
+ ):
127
+ precision = precision.split(" ")[0]
128
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
129
+
130
+ num_models_submitted_in_period = user_submission_permission(model, users_to_submission_dates, RATE_LIMIT_PERIOD)
131
+ if num_models_submitted_in_period > RATE_LIMIT_QUOTA:
132
+ error_msg = f"Organisation or user `{model.split('/')[0]}`"
133
+ error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard "
134
+ error_msg += f"in the last {RATE_LIMIT_PERIOD} days.\n"
135
+ error_msg += "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗"
136
+ return styled_error(error_msg)
137
+
138
+ if model_type is None or model_type == "":
139
+ return styled_error("Please select a model type.")
140
+
141
+ # check the model actually exists before adding the eval
142
+ if revision == "":
143
+ revision = "main"
144
+
145
+ if weight_type in ["Delta", "Adapter"]:
146
+ base_model_on_hub, error = is_model_on_hub(base_model, revision)
147
+ if not base_model_on_hub:
148
+ return styled_error(f'Base model "{base_model}" {error}')
149
+
150
+ if not weight_type == "Adapter":
151
+ model_on_hub, error = is_model_on_hub(model, revision)
152
+ if not model_on_hub:
153
+ return styled_error(f'Model "{model}" {error}')
154
+
155
+ print("adding new eval")
156
+
157
+ eval_entry = {
158
+ "model": model,
159
+ "base_model": base_model,
160
+ "revision": revision,
161
+ "private": private,
162
+ "precision": precision,
163
+ "weight_type": weight_type,
164
+ "status": "PENDING",
165
+ "submitted_time": current_time,
166
+ "model_type": model_type,
167
+ }
168
+
169
+ user_name = ""
170
+ model_path = model
171
+ if "/" in model:
172
+ user_name = model.split("/")[0]
173
+ model_path = model.split("/")[1]
174
+
175
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
176
+ os.makedirs(OUT_DIR, exist_ok=True)
177
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
178
+
179
+ # Check if the model has been forbidden:
180
+ if out_path.split("eval-queue/")[1] in DO_NOT_SUBMIT_MODELS:
181
+ return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
182
+
183
+ # Check for duplicate submission
184
+ if f"{model}_{revision}_{precision}" in requested_models:
185
+ return styled_warning("This model has been already submitted.")
186
+
187
+ with open(out_path, "w") as f:
188
+ f.write(json.dumps(eval_entry))
189
+
190
+ api.upload_file(
191
+ path_or_fileobj=out_path,
192
+ path_in_repo=out_path.split("eval-queue/")[1],
193
+ repo_id=QUEUE_REPO,
194
+ repo_type="dataset",
195
+ commit_message=f"Add {model} to eval queue",
196
+ )
197
+
198
+ # remove the local file
199
+ os.remove(out_path)
200
+
201
+ return styled_message(
202
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
203
+ )
204
+
205
+
206
+ # Basics
207
+ def refresh() -> list[pd.DataFrame]:
208
+ leaderboard_df = get_leaderboard_df(eval_results, eval_results_private, COLS, BENCHMARK_COLS)
209
+ (
210
+ finished_eval_queue_df,
211
+ running_eval_queue_df,
212
+ pending_eval_queue_df,
213
+ ) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
214
+ return (
215
+ leaderboard_df,
216
+ finished_eval_queue_df,
217
+ running_eval_queue_df,
218
+ pending_eval_queue_df,
219
+ )
220
+
221
+
222
+ def change_tab(query_param: str):
223
+ query_param = query_param.replace("'", '"')
224
+ query_param = json.loads(query_param)
225
+
226
+ if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "evaluation":
227
+ return gr.Tabs.update(selected=1)
228
+ else:
229
+ return gr.Tabs.update(selected=0)
230
+
231
+
232
+ # Searching and filtering
233
+ def search_table(df: pd.DataFrame, current_columns_df: pd.DataFrame, query: str) -> pd.DataFrame:
234
+ current_columns = current_columns_df.columns
235
+ if AutoEvalColumn.model_type.name in current_columns:
236
+ filtered_df = df[
237
+ (df[AutoEvalColumn.dummy.name].str.contains(query, case=False))
238
+ | (df[AutoEvalColumn.model_type.name].str.contains(query, case=False))
239
+ ]
240
+ else:
241
+ filtered_df = df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
242
+ return filtered_df[current_columns]
243
+
244
+
245
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
246
+ always_here_cols = [
247
+ AutoEvalColumn.model_type_symbol.name,
248
+ AutoEvalColumn.model.name,
249
+ ]
250
+ # We use COLS to maintain sorting
251
+ filtered_df = df[
252
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
253
+ ]
254
+ return filtered_df
255
+
256
+ NUMERIC_INTERVALS = {
257
+ "< 1.5B": (0, 1.5),
258
+ "~3B": (1.5, 5),
259
+ "~7B": (6, 11),
260
+ "~13B": (12, 15),
261
+ # "~35B": (16, 55),
262
+ # "60B+": (55, 10000),
263
+ }
264
+
265
+ def filter_models(
266
+ df: pd.DataFrame, current_columns_df: pd.DataFrame, type_query: list, size_query: list, show_deleted: bool
267
+ ) -> pd.DataFrame:
268
+ current_columns = current_columns_df.columns
269
+
270
+ # Show all models
271
+ if show_deleted:
272
+ filtered_df = df[current_columns]
273
+ else: # Show only still on the hub models
274
+ filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True][current_columns]
275
+
276
+ type_emoji = [t[0] for t in type_query]
277
+ filtered_df = filtered_df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
278
+
279
+ numeric_interval = [NUMERIC_INTERVALS[s] for s in size_query]
280
+ params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
281
+ filtered_df = filtered_df[params_column.between(numeric_interval[0][0], numeric_interval[-1][1])]
282
+
283
+ return filtered_df
284
+
285
+
286
+ demo = gr.Blocks(css=custom_css)
287
+ with demo:
288
+ gr.HTML(TITLE)
289
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
290
+
291
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
292
+ with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
293
+ with gr.Row():
294
+ with gr.Column():
295
+ with gr.Row():
296
+ shown_columns = gr.CheckboxGroup(
297
+ choices=[
298
+ c
299
+ for c in COLS
300
+ if c
301
+ not in [
302
+ AutoEvalColumn.dummy.name,
303
+ AutoEvalColumn.model.name,
304
+ AutoEvalColumn.model_type_symbol.name,
305
+ AutoEvalColumn.still_on_hub.name,
306
+ ]
307
+ ],
308
+ value=[
309
+ c
310
+ for c in COLS_LITE
311
+ if c
312
+ not in [
313
+ AutoEvalColumn.dummy.name,
314
+ AutoEvalColumn.model.name,
315
+ AutoEvalColumn.model_type_symbol.name,
316
+ AutoEvalColumn.still_on_hub.name,
317
+ ]
318
+ ],
319
+ label="Select columns to show",
320
+ elem_id="column-select",
321
+ interactive=True,
322
+ )
323
+ with gr.Row():
324
+ deleted_models_visibility = gr.Checkbox(
325
+ value=True, label="👀 삭제/비공개된 모델도 함께 보기", interactive=True
326
+ )
327
+ with gr.Column(min_width=320):
328
+ search_bar = gr.Textbox(
329
+ placeholder="🔍 찾고자 하는 모델 명을 입력하세요",
330
+ show_label=False,
331
+ elem_id="search-bar",
332
+ )
333
+ with gr.Box(elem_id="box-filter"):
334
+ filter_columns_type = gr.CheckboxGroup(
335
+ label="Model types",
336
+ choices=[
337
+ ModelType.PT.to_str(),
338
+ ModelType.FT.to_str(),
339
+ ModelType.IFT.to_str(),
340
+ ModelType.RL.to_str(),
341
+ ],
342
+ value=[
343
+ ModelType.PT.to_str(),
344
+ ModelType.FT.to_str(),
345
+ ModelType.IFT.to_str(),
346
+ ModelType.RL.to_str(),
347
+ ],
348
+ interactive=True,
349
+ elem_id="filter-columns-type",
350
+ )
351
+ filter_columns_size = gr.CheckboxGroup(
352
+ label="Model sizes",
353
+ choices=list(NUMERIC_INTERVALS.keys()),
354
+ value=list(NUMERIC_INTERVALS.keys()),
355
+ interactive=True,
356
+ elem_id="filter-columns-size",
357
+ )
358
+
359
+ leaderboard_table = gr.components.Dataframe(
360
+ value=leaderboard_df[
361
+ [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]
362
+ + shown_columns.value
363
+ + [AutoEvalColumn.dummy.name]
364
+ ],
365
+ headers=[
366
+ AutoEvalColumn.model_type_symbol.name,
367
+ AutoEvalColumn.model.name,
368
+ ]
369
+ + shown_columns.value
370
+ + [AutoEvalColumn.dummy.name],
371
+ datatype=TYPES,
372
+ max_rows=None,
373
+ elem_id="leaderboard-table",
374
+ interactive=False,
375
+ visible=True,
376
+ )
377
+
378
+ # Dummy leaderboard for handling the case when the user uses backspace key
379
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
380
+ value=original_df,
381
+ headers=COLS,
382
+ datatype=TYPES,
383
+ max_rows=None,
384
+ visible=False,
385
+ )
386
+ search_bar.submit(
387
+ search_table,
388
+ [
389
+ hidden_leaderboard_table_for_search,
390
+ leaderboard_table,
391
+ search_bar,
392
+ ],
393
+ leaderboard_table,
394
+ )
395
+ shown_columns.change(
396
+ select_columns,
397
+ [hidden_leaderboard_table_for_search, shown_columns],
398
+ leaderboard_table,
399
+ queue=False,
400
+ )
401
+ filter_columns_type.change(
402
+ filter_models,
403
+ [
404
+ hidden_leaderboard_table_for_search,
405
+ leaderboard_table,
406
+ filter_columns_type,
407
+ filter_columns_size,
408
+ deleted_models_visibility,
409
+ ],
410
+ leaderboard_table,
411
+ queue=False,
412
+ )
413
+ filter_columns_size.change(
414
+ filter_models,
415
+ [
416
+ hidden_leaderboard_table_for_search,
417
+ leaderboard_table,
418
+ filter_columns_type,
419
+ filter_columns_size,
420
+ deleted_models_visibility,
421
+ ],
422
+ leaderboard_table,
423
+ queue=False,
424
+ )
425
+ deleted_models_visibility.change(
426
+ filter_models,
427
+ [
428
+ hidden_leaderboard_table_for_search,
429
+ leaderboard_table,
430
+ filter_columns_type,
431
+ filter_columns_size,
432
+ deleted_models_visibility,
433
+ ],
434
+ leaderboard_table,
435
+ queue=False,
436
+ )
437
+ with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
438
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
439
+
440
+ with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
441
+ with gr.Column():
442
+ with gr.Row():
443
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
444
+
445
+ with gr.Column():
446
+ with gr.Accordion(
447
+ f"✅ 평가 완료 ({len(finished_eval_queue_df)})",
448
+ open=False,
449
+ ):
450
+ with gr.Row():
451
+ finished_eval_table = gr.components.Dataframe(
452
+ value=finished_eval_queue_df,
453
+ headers=EVAL_COLS,
454
+ datatype=EVAL_TYPES,
455
+ max_rows=5,
456
+ )
457
+ with gr.Accordion(
458
+ f"🔄 평가 진행 중 ({len(running_eval_queue_df)})",
459
+ open=False,
460
+ ):
461
+ with gr.Row():
462
+ running_eval_table = gr.components.Dataframe(
463
+ value=running_eval_queue_df,
464
+ headers=EVAL_COLS,
465
+ datatype=EVAL_TYPES,
466
+ max_rows=5,
467
+ )
468
+
469
+ with gr.Accordion(
470
+ f"⏳ 평가 대기 중 ({len(pending_eval_queue_df)})",
471
+ open=False,
472
+ ):
473
+ with gr.Row():
474
+ pending_eval_table = gr.components.Dataframe(
475
+ value=pending_eval_queue_df,
476
+ headers=EVAL_COLS,
477
+ datatype=EVAL_TYPES,
478
+ max_rows=5,
479
+ )
480
+ with gr.Row():
481
+ gr.Markdown("# ✉️✨ 여기에서 모델을 제출해주세요!", elem_classes="markdown-text")
482
+
483
+ with gr.Row():
484
+ with gr.Column():
485
+ model_name_textbox = gr.Textbox(label="Model name")
486
+ revision_name_textbox = gr.Textbox(label="revision", placeholder="main")
487
+ private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
488
+ model_type = gr.Dropdown(
489
+ choices=[
490
+ ModelType.PT.to_str(" : "),
491
+ ModelType.FT.to_str(" : "),
492
+ ModelType.IFT.to_str(" : "),
493
+ ModelType.RL.to_str(" : "),
494
+ ],
495
+ label="Model type",
496
+ multiselect=False,
497
+ value=None,
498
+ interactive=True,
499
+ )
500
+
501
+ with gr.Column():
502
+ precision = gr.Dropdown(
503
+ choices=[
504
+ "float16",
505
+ "bfloat16",
506
+ "8bit (LLM.int8)",
507
+ "4bit (QLoRA / FP4)",
508
+ "GPTQ"
509
+ ],
510
+ label="Precision",
511
+ multiselect=False,
512
+ value="float16",
513
+ interactive=True,
514
+ )
515
+ weight_type = gr.Dropdown(
516
+ choices=["Original", "Delta", "Adapter"],
517
+ label="Weights type",
518
+ multiselect=False,
519
+ value="Original",
520
+ interactive=True,
521
+ )
522
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
523
+
524
+ submit_button = gr.Button("제출하고 평가받기")
525
+ submission_result = gr.Markdown()
526
+ submit_button.click(
527
+ add_new_eval,
528
+ [
529
+ model_name_textbox,
530
+ base_model_name_textbox,
531
+ revision_name_textbox,
532
+ precision,
533
+ private,
534
+ weight_type,
535
+ model_type,
536
+ ],
537
+ submission_result,
538
+ )
539
+
540
+ with gr.Row():
541
+ refresh_button = gr.Button("새로고침")
542
+ refresh_button.click(
543
+ refresh,
544
+ inputs=[],
545
+ outputs=[
546
+ leaderboard_table,
547
+ finished_eval_table,
548
+ running_eval_table,
549
+ pending_eval_table,
550
+ ],
551
+ api_name='refresh'
552
+ )
553
+
554
+ with gr.Row():
555
+ with gr.Accordion("📙 Citation", open=False):
556
+ citation_button = gr.Textbox(
557
+ value=CITATION_BUTTON_TEXT,
558
+ label=CITATION_BUTTON_LABEL,
559
+ elem_id="citation-button",
560
+ ).style(show_copy_button=True)
561
+
562
+ dummy = gr.Textbox(visible=False)
563
+ demo.load(
564
+ change_tab,
565
+ dummy,
566
+ tabs,
567
+ _js=get_window_url_params,
568
+ )
569
+
570
+ scheduler = BackgroundScheduler()
571
+ scheduler.add_job(restart_space, "interval", seconds=3600)
572
+ scheduler.start()
573
+ demo.queue(concurrency_count=40).launch()
models_backlinks.py ADDED
@@ -0,0 +1 @@
 
 
1
+ models = ['upstage/Llama-2-70b-instruct-v2', 'upstage/Llama-2-70b-instruct', 'upstage/llama-65b-instruct', 'upstage/llama-65b-instruct', 'upstage/llama-30b-instruct-2048', 'upstage/llama-30b-instruct', 'baseline']
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.1.0
2
+ aiohttp==3.8.4
3
+ aiosignal==1.3.1
4
+ altair==4.2.2
5
+ anyio==3.6.2
6
+ APScheduler==3.10.1
7
+ async-timeout==4.0.2
8
+ attrs==23.1.0
9
+ certifi==2022.12.7
10
+ charset-normalizer==3.1.0
11
+ click==8.1.3
12
+ contourpy==1.0.7
13
+ cycler==0.11.0
14
+ datasets==2.12.0
15
+ entrypoints==0.4
16
+ fastapi==0.95.1
17
+ ffmpy==0.3.0
18
+ filelock==3.11.0
19
+ fonttools==4.39.3
20
+ frozenlist==1.3.3
21
+ fsspec==2023.4.0
22
+ gradio==3.27.0
23
+ gradio_client==0.1.3
24
+ h11==0.14.0
25
+ httpcore==0.17.0
26
+ httpx==0.24.0
27
+ huggingface-hub==0.16.4
28
+ idna==3.4
29
+ Jinja2==3.1.2
30
+ jsonschema==4.17.3
31
+ kiwisolver==1.4.4
32
+ linkify-it-py==2.0.0
33
+ markdown-it-py==2.2.0
34
+ MarkupSafe==2.1.2
35
+ matplotlib==3.7.1
36
+ mdit-py-plugins==0.3.3
37
+ mdurl==0.1.2
38
+ multidict==6.0.4
39
+ numpy==1.24.2
40
+ orjson==3.8.10
41
+ packaging==23.1
42
+ pandas==2.0.0
43
+ Pillow==9.5.0
44
+ plotly==5.14.1
45
+ pyarrow==11.0.0
46
+ pydantic==1.10.7
47
+ pydub==0.25.1
48
+ pyparsing==3.0.9
49
+ pyrsistent==0.19.3
50
+ python-dateutil==2.8.2
51
+ python-multipart==0.0.6
52
+ pytz==2023.3
53
+ pytz-deprecation-shim==0.1.0.post0
54
+ PyYAML==6.0
55
+ requests==2.28.2
56
+ semantic-version==2.10.0
57
+ six==1.16.0
58
+ sniffio==1.3.0
59
+ starlette==0.26.1
60
+ toolz==0.12.0
61
+ tqdm==4.65.0
62
+ transformers==4.32.0
63
+ typing_extensions==4.5.0
64
+ tzdata==2023.3
65
+ tzlocal==4.3
66
+ uc-micro-py==1.0.1
67
+ urllib3==1.26.15
68
+ uvicorn==0.21.1
69
+ websockets==11.0.1
70
+ yarl==1.8.2
src/assets/css_html_js.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ #models-to-add-text {
8
+ font-size: 18px !important;
9
+ }
10
+
11
+ #citation-button span {
12
+ font-size: 16px !important;
13
+ }
14
+
15
+ #citation-button textarea {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button > label > button {
20
+ margin: 6px;
21
+ transform: scale(1.3);
22
+ }
23
+
24
+ #leaderboard-table {
25
+ margin-top: 15px
26
+ }
27
+
28
+ #leaderboard-table-lite {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #search-bar-table-box > div:first-child {
33
+ background: none;
34
+ border: none;
35
+ }
36
+
37
+ #search-bar {
38
+ padding: 0px;
39
+ }
40
+
41
+ /* Hides the final AutoEvalColumn */
42
+ #llm-benchmark-tab-table table td:last-child,
43
+ #llm-benchmark-tab-table table th:last-child {
44
+ display: none;
45
+ }
46
+
47
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
48
+ table td:first-child,
49
+ table th:first-child {
50
+ max-width: 400px;
51
+ overflow: auto;
52
+ white-space: nowrap;
53
+ }
54
+
55
+ .tab-buttons button {
56
+ font-size: 20px;
57
+ }
58
+
59
+ #scale-logo {
60
+ border-style: none !important;
61
+ box-shadow: none;
62
+ display: block;
63
+ margin-left: auto;
64
+ margin-right: auto;
65
+ max-width: 600px;
66
+ }
67
+
68
+ #scale-logo .download {
69
+ display: none;
70
+ }
71
+ #filter_type{
72
+ border: 0;
73
+ padding-left: 0;
74
+ padding-top: 0;
75
+ }
76
+ #filter_type label {
77
+ display: flex;
78
+ }
79
+ #filter_type label > span{
80
+ margin-top: var(--spacing-lg);
81
+ margin-right: 0.5em;
82
+ }
83
+ #filter_type label > .wrap{
84
+ width: 103px;
85
+ }
86
+ #filter_type label > .wrap .wrap-inner{
87
+ padding: 2px;
88
+ }
89
+ #filter_type label > .wrap .wrap-inner input{
90
+ width: 1px
91
+ }
92
+ #filter-columns-type{
93
+ border:0;
94
+ padding:0.5;
95
+ }
96
+ #filter-columns-size{
97
+ border:0;
98
+ padding:0.5;
99
+ }
100
+ #box-filter > .form{
101
+ border: 0
102
+ }
103
+ """
104
+
105
+ get_window_url_params = """
106
+ function(url_params) {
107
+ const params = new URLSearchParams(window.location.search);
108
+ url_params = Object.fromEntries(params);
109
+ return url_params;
110
+ }
111
+ """
src/assets/hardcoded_evals.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display_models.utils import AutoEvalColumn, model_hyperlink
2
+
3
+ gpt4_values = {
4
+ AutoEvalColumn.model.name: model_hyperlink("https://arxiv.org/abs/2303.08774", "gpt4"),
5
+ AutoEvalColumn.revision.name: "tech report",
6
+ AutoEvalColumn.precision.name: None,
7
+ AutoEvalColumn.average.name: 84.3,
8
+ AutoEvalColumn.arc.name: 96.3,
9
+ AutoEvalColumn.hellaswag.name: 95.3,
10
+ AutoEvalColumn.mmlu.name: 86.4,
11
+ AutoEvalColumn.truthfulqa.name: 59.0,
12
+ AutoEvalColumn.dummy.name: "GPT-4",
13
+ AutoEvalColumn.model_type.name: "",
14
+ }
15
+
16
+ gpt35_values = {
17
+ AutoEvalColumn.model.name: model_hyperlink("https://arxiv.org/abs/2303.08774", "gpt3.5"),
18
+ AutoEvalColumn.revision.name: "tech report",
19
+ AutoEvalColumn.precision.name: None,
20
+ AutoEvalColumn.average.name: 71.9,
21
+ AutoEvalColumn.arc.name: 85.2,
22
+ AutoEvalColumn.hellaswag.name: 85.5,
23
+ AutoEvalColumn.mmlu.name: 70.0,
24
+ AutoEvalColumn.truthfulqa.name: 47.0,
25
+ AutoEvalColumn.dummy.name: "GPT-3.5",
26
+ AutoEvalColumn.model_type.name: "",
27
+ }
28
+
29
+ baseline = {
30
+ AutoEvalColumn.model.name: "<p>Baseline</p>",
31
+ AutoEvalColumn.revision.name: "N/A",
32
+ AutoEvalColumn.precision.name: None,
33
+ AutoEvalColumn.average.name: 25.0,
34
+ AutoEvalColumn.arc.name: 25.0,
35
+ AutoEvalColumn.hellaswag.name: 25.0,
36
+ AutoEvalColumn.mmlu.name: 25.0,
37
+ AutoEvalColumn.truthfulqa.name: 25.0,
38
+ AutoEvalColumn.dummy.name: "baseline",
39
+ AutoEvalColumn.model_type.name: "",
40
+ }
src/assets/text_content.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display_models.model_metadata_type import ModelType
2
+
3
+ TITLE = """<h1 align="center" id="space-title">🚀 Open Ko-LLM Leaderboard</h1>"""
4
+
5
+ INTRODUCTION_TEXT = f"""
6
+ 🚀 Open Ko-LLM Leaderboard는 한국어 초거대 언어모델의 성능을 객관적으로 평가합니다.
7
+
8
+ "제출" 페이지에서 모델 제출 시 자동으로 평가됩니다. 평가에 사용되는 GPU는 KT의 지원으로 운영됩니다.
9
+ 평가에 사용되는 데이터는 전문 지식, 추론 능력, 환각, 윤리, 상식의 다섯가지 요소를 평가하기 위한 데이터셋으로 구성되어 있습니다.
10
+ 벤치마크 데이터셋에 대한 더 자세한 정보는 "정보" 페이지에서 제공되고 있습니다.
11
+
12
+ 업스테이지와 NIA가 공동 주최하며 업스테이지가 운영합니다.
13
+ """
14
+
15
+ LLM_BENCHMARKS_TEXT = f"""
16
+ # Context
17
+ 뛰어난 LLM 모델들이 앞다투어 공개되고 있지만 이는 대부분 영어 중심의, 영어 문화권에 익숙한 모델입니다. 저희는 한국어 리더보드 🚀 Open Ko-LLM을 운영하여 한국어와 한국 문화의 특성을 반영한 모델을 평가하고자 합니다. 이를 통해 한국어 사용자들이 편리하게 리더보드를 이용하고 참여하여 한국의 연구 수준 향상에 기여할 수 있기를 바랍니다.
18
+
19
+ ## Icons
20
+ {ModelType.PT.to_str(" : ")} model
21
+ {ModelType.FT.to_str(" : ")} model
22
+ {ModelType.IFT.to_str(" : ")} model
23
+ {ModelType.RL.to_str(" : ")} model
24
+ 만약 아이콘이 없다면 아직 모델에 대한 정보가 부족함을 나타냅니다.
25
+ 모델에 대한 정보는 issue를 통해 전달해주세요! 🤩
26
+
27
+ 🏴‍☠️ : 해당 아이콘은 이 모델이 커뮤니티에 의해 주의 대상으로 선정되었으므로 이용 자제를 바란다는 의미입니다. 아이콘을 클릭 시 해당 모델에 대한 discussion으로 이동합니다.
28
+ (높은 리더보드 순위를 위해 평가셋을 학습에 이용한 모델 등이 주의 대상으로 선정됩니다)
29
+
30
+ ## How it works
31
+
32
+ 📈 HuggingFace OpenLLM에서 운영하는 4개의 태스크(HellaSwag, MMLU, Arc, Truthful QA)의 데이터를 한국어로 번역한 데이터셋을 비롯해 총 6가지의 데이터로 벤치마크를 구성했습니다.
33
+ - Ko-HellaSwag (업스테이지 제공)
34
+ - Ko-MMLU (업스테이지 제공)
35
+ - Ko-Arc (업스테이지 제공)
36
+ - Ko-Truthful QA (업스테이지 제공)
37
+ - KoCommongen (NIA 한국지능정보사회진흥원 제공)
38
+ - 텍스트 윤리검증 데이터 (NIA 한국지능정보사회진흥원 제공)
39
+ LLM 시대에 걸맞는 평가를 위해 상식, 전문 지식, 추론, 환각, 윤리의 다섯가지 요소를 평가하기에 적합한 데이터셋들을 벤치마크로 선정했습니다. 최종 점수는 6개의 평가 데이터에 대한 평균 점수로 환산합니다.
40
+
41
+ KT로부터 평가에 사용되는 GPU를 제공받았습니다.
42
+
43
+ ## 좀 더 자세한 정보
44
+ - 좀 더 자세한 수치 정보는: https://huggingface.co/datasets/open-llm-leaderboard/results
45
+ - 모델의 입출력에 대한 자세한 정보는: https://huggingface.co/datasets/open-llm-leaderboard/details
46
+ - 모델의 평가 큐와 평가 상태는: https://huggingface.co/datasets/open-llm-leaderboard/requests
47
+
48
+ ## 결과 재현
49
+ 평가 결과를 재현하기 위해서는 [이 버전](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463)의 데이터셋을 이용하세요. (밑에는 코드 및 평가 환경이라서 일단 skip)
50
+
51
+ ## 더보기
52
+ 질문이 있으시면 [여기](https://huggingface.co/spaces/BearSean/leaderboard-test/discussions/1)서 FAQ를 확인하실 수 있습니다!
53
+ 또한 커뮤니티, 다른 팀 및 연구소에서 제공하는 멋진 자료를 [여기](https://huggingface.co/spaces/BearSean/leaderboard-test/discussions/2)에 모아 두었습니다. 추가로 제안 사항이 있으시다면 이 곳을 이용해주세요.
54
+ """
55
+
56
+ EVALUATION_QUEUE_TEXT = f"""
57
+ # 🚀 Open-Ko LLM 리더보드의 평가 큐입니다.
58
+ 이곳에 추가된 모델들은 곧 자동적으로 KT의 GPU 위에서 평가될 예정입니다!
59
+
60
+ ## <모델 제출 전 확인하면 좋은 것들>
61
+
62
+ ### 1️⃣ 모델과 토크나이저가 AutoClasses로 불러올 수 있나요?
63
+ ```
64
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
65
+ config = AutoConfig.from_pretrained("your model name", revision=revision)
66
+ model = AutoModel.from_pretrained("your model name", revision=revision)
67
+ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
68
+ ```
69
+
70
+ 만약 이 단계가 실패했다면 에러 메세지를 따라 모델을 디버깅한 후에 제출해주세요.
71
+ ⚠️ 모델이 public 상태여야 합니다!
72
+ ⚠️ 만약 모델이 use_remote_code=True여야 한다면 잠시 기다려주세요. 현재로서는 아직 이 옵션을 지원하지 않지만 작동할 수 있도록 하고 있습니다!
73
+
74
+ ### 2️⃣ 모델의 weight를 safetensors로 바꿨나요?
75
+ safetensors는 weight를 보관하는 새로운 포맷으로, 훨씬 안전하고 빠르게 사용할 수 있습니다. 또한 모델의 parameter 개수를 Extended Viewer에 추가할 수 있습니다
76
+
77
+ ### 3️⃣ 모델이 오픈 라이센스를 따르나요?
78
+ 🚀 Open-Ko LLM은 Open LLM을 위한 리더보드로, 많은 사람들이 다양한 모델을 사용하기를 바랍니다
79
+
80
+ ### 4️⃣ 모델 카드를 작성하셨나요?
81
+ 리더보드에 모델에 대한 추가 정보를 업로드할 때 작성하신 모델 카드가 업로드됩니다
82
+
83
+ ## 모델이 실패한 경우:
84
+ 만약 제출한 모델의 상태가 FAILED가 된다면 이는 모델이 실행 중단되었음을 의미합니다. 먼저 위의 네 단계를 모두 따랐는지 확인해보세요. 모든 단계를 따랐음에도 불구하고 실행 중단되었을 때는 EleutherAIHarness 를 로컬에서 실행할 수 있는지 확인하기 위해 위의 코드를 수정 없이 실행하세요. (태스크 별 예시의 수를 제한하기 위해 —limit 파라미터를 추가할 수 있습니다.)
85
+ """
86
+
87
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
88
+ CITATION_BUTTON_TEXT = r"""
89
+ @misc{open-ko-llm-leaderboard,
90
+ author = {Chanjun Park, Hwalsuk Lee, Hyunbyung Park, Sanghoon Kim, Seonghwan Cho, Sunghun Kim, Sukyung Lee},
91
+ title = {Open Ko LLM Leaderboard},
92
+ year = {2023},
93
+ publisher = {Upstage, National Information Society Agency},
94
+ howpublished = "\url{https://huggingface.co/spaces/BearSean/leaderboard-test/discussions/1}"
95
+ }
96
+ """
src/display_models/get_model_metadata.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import re
5
+ from typing import List
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import HfApi
9
+ from tqdm import tqdm
10
+
11
+ from src.display_models.model_metadata_flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
12
+ from src.display_models.model_metadata_type import MODEL_TYPE_METADATA, ModelType, model_type_from_str
13
+ from src.display_models.utils import AutoEvalColumn, model_hyperlink
14
+
15
+ api = HfApi(token=os.environ.get("H4_TOKEN", None))
16
+
17
+
18
+ def get_model_infos_from_hub(leaderboard_data: List[dict]):
19
+ for model_data in tqdm(leaderboard_data):
20
+ model_name = model_data["model_name_for_query"]
21
+ try:
22
+ model_info = api.model_info(model_name)
23
+ except huggingface_hub.utils._errors.RepositoryNotFoundError:
24
+ print("Repo not found!", model_name)
25
+ model_data[AutoEvalColumn.license.name] = None
26
+ model_data[AutoEvalColumn.likes.name] = None
27
+ model_data[AutoEvalColumn.params.name] = get_model_size(model_name, None)
28
+ continue
29
+
30
+ model_data[AutoEvalColumn.license.name] = get_model_license(model_info)
31
+ model_data[AutoEvalColumn.likes.name] = get_model_likes(model_info)
32
+ model_data[AutoEvalColumn.params.name] = get_model_size(model_name, model_info)
33
+
34
+
35
+ def get_model_license(model_info):
36
+ try:
37
+ return model_info.cardData["license"]
38
+ except Exception:
39
+ return None
40
+
41
+
42
+ def get_model_likes(model_info):
43
+ return model_info.likes
44
+
45
+
46
+ size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
47
+
48
+
49
+ def get_model_size(model_name, model_info):
50
+ # In billions
51
+ try:
52
+ return round(model_info.safetensors["total"] / 1e9, 3)
53
+ except AttributeError:
54
+ try:
55
+ size_match = re.search(size_pattern, model_name.lower())
56
+ size = size_match.group(0)
57
+ return round(float(size[:-1]) if size[-1] == "b" else float(size[:-1]) / 1e3, 3)
58
+ except AttributeError:
59
+ return None
60
+
61
+
62
+ def get_model_type(leaderboard_data: List[dict]):
63
+ for model_data in leaderboard_data:
64
+ request_files = os.path.join(
65
+ "eval-queue",
66
+ model_data["model_name_for_query"] + "_eval_request_*" + ".json",
67
+ )
68
+ request_files = glob.glob(request_files)
69
+
70
+ # Select correct request file (precision)
71
+ request_file = ""
72
+ if len(request_files) == 1:
73
+ request_file = request_files[0]
74
+ elif len(request_files) > 1:
75
+ request_files = sorted(request_files, reverse=True)
76
+ for tmp_request_file in request_files:
77
+ with open(tmp_request_file, "r") as f:
78
+ req_content = json.load(f)
79
+ if (
80
+ req_content["status"] == "FINISHED"
81
+ and req_content["precision"] == model_data["Precision"].split(".")[-1]
82
+ ):
83
+ request_file = tmp_request_file
84
+
85
+ try:
86
+ with open(request_file, "r") as f:
87
+ request = json.load(f)
88
+ model_type = model_type_from_str(request["model_type"])
89
+ model_data[AutoEvalColumn.model_type.name] = model_type.value.name
90
+ model_data[AutoEvalColumn.model_type_symbol.name] = model_type.value.symbol # + ("🔺" if is_delta else "")
91
+ except Exception:
92
+ if model_data["model_name_for_query"] in MODEL_TYPE_METADATA:
93
+ model_data[AutoEvalColumn.model_type.name] = MODEL_TYPE_METADATA[
94
+ model_data["model_name_for_query"]
95
+ ].value.name
96
+ model_data[AutoEvalColumn.model_type_symbol.name] = MODEL_TYPE_METADATA[
97
+ model_data["model_name_for_query"]
98
+ ].value.symbol # + ("🔺" if is_delta else "")
99
+ else:
100
+ model_data[AutoEvalColumn.model_type.name] = ModelType.Unknown.value.name
101
+ model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol
102
+
103
+
104
+ def flag_models(leaderboard_data: List[dict]):
105
+ for model_data in leaderboard_data:
106
+ if model_data["model_name_for_query"] in FLAGGED_MODELS:
107
+ issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
108
+ issue_link = model_hyperlink(
109
+ FLAGGED_MODELS[model_data["model_name_for_query"]],
110
+ f"See discussion #{issue_num}",
111
+ )
112
+ model_data[
113
+ AutoEvalColumn.model.name
114
+ ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
115
+
116
+
117
+ def remove_forbidden_models(leaderboard_data: List[dict]):
118
+ indices_to_remove = []
119
+ for ix, model in enumerate(leaderboard_data):
120
+ if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
121
+ indices_to_remove.append(ix)
122
+
123
+ for ix in reversed(indices_to_remove):
124
+ leaderboard_data.pop(ix)
125
+ return leaderboard_data
126
+
127
+
128
+ def apply_metadata(leaderboard_data: List[dict]):
129
+ leaderboard_data = remove_forbidden_models(leaderboard_data)
130
+ get_model_type(leaderboard_data)
131
+ get_model_infos_from_hub(leaderboard_data)
132
+ flag_models(leaderboard_data)
src/display_models/model_metadata_flags.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Models which have been flagged by users as being problematic for a reason or another
2
+ # (Model name to forum discussion link)
3
+ FLAGGED_MODELS = {
4
+ "Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202",
5
+ "deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207",
6
+ "Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213",
7
+ "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
8
+ "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
9
+ "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
10
+ }
11
+
12
+ # Models which have been requested by orgs to not be submitted on the leaderboard
13
+ DO_NOT_SUBMIT_MODELS = [
14
+ "Voicelab/trurl-2-13b", # trained on MMLU
15
+ ]
src/display_models/model_metadata_type.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ from typing import Dict
4
+
5
+
6
+ @dataclass
7
+ class ModelInfo:
8
+ name: str
9
+ symbol: str # emoji
10
+
11
+
12
+ class ModelType(Enum):
13
+ PT = ModelInfo(name="pretrained", symbol="🟢")
14
+ FT = ModelInfo(name="fine-tuned", symbol="🔶")
15
+ IFT = ModelInfo(name="instruction-tuned", symbol="⭕")
16
+ RL = ModelInfo(name="RL-tuned", symbol="🟦")
17
+ Unknown = ModelInfo(name="Unknown, add type to request file!", symbol="?")
18
+
19
+ def to_str(self, separator=" "):
20
+ return f"{self.value.symbol}{separator}{self.value.name}"
21
+
22
+
23
+ MODEL_TYPE_METADATA: Dict[str, ModelType] = {
24
+ "tiiuae/falcon-180B": ModelType.PT,
25
+ "Qwen/Qwen-7B": ModelType.PT,
26
+ "Qwen/Qwen-7B-Chat": ModelType.RL,
27
+ "notstoic/PygmalionCoT-7b": ModelType.IFT,
28
+ "aisquared/dlite-v1-355m": ModelType.IFT,
29
+ "aisquared/dlite-v1-1_5b": ModelType.IFT,
30
+ "aisquared/dlite-v1-774m": ModelType.IFT,
31
+ "aisquared/dlite-v1-124m": ModelType.IFT,
32
+ "aisquared/chopt-2_7b": ModelType.IFT,
33
+ "aisquared/dlite-v2-124m": ModelType.IFT,
34
+ "aisquared/dlite-v2-774m": ModelType.IFT,
35
+ "aisquared/dlite-v2-1_5b": ModelType.IFT,
36
+ "aisquared/chopt-1_3b": ModelType.IFT,
37
+ "aisquared/dlite-v2-355m": ModelType.IFT,
38
+ "augtoma/qCammel-13": ModelType.IFT,
39
+ "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload": ModelType.IFT,
40
+ "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload": ModelType.IFT,
41
+ "TheBloke/alpaca-lora-65B-HF": ModelType.FT,
42
+ "TheBloke/tulu-7B-fp16": ModelType.IFT,
43
+ "TheBloke/guanaco-7B-HF": ModelType.FT,
44
+ "TheBloke/koala-7B-HF": ModelType.FT,
45
+ "TheBloke/wizardLM-7B-HF": ModelType.IFT,
46
+ "TheBloke/airoboros-13B-HF": ModelType.IFT,
47
+ "TheBloke/koala-13B-HF": ModelType.FT,
48
+ "TheBloke/Wizard-Vicuna-7B-Uncensored-HF": ModelType.FT,
49
+ "TheBloke/dromedary-65b-lora-HF": ModelType.IFT,
50
+ "TheBloke/wizardLM-13B-1.0-fp16": ModelType.IFT,
51
+ "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16": ModelType.FT,
52
+ "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16": ModelType.FT,
53
+ "TheBloke/wizard-vicuna-13B-HF": ModelType.IFT,
54
+ "TheBloke/UltraLM-13B-fp16": ModelType.IFT,
55
+ "TheBloke/OpenAssistant-FT-7-Llama-30B-HF": ModelType.FT,
56
+ "TheBloke/vicuna-13B-1.1-HF": ModelType.IFT,
57
+ "TheBloke/guanaco-13B-HF": ModelType.FT,
58
+ "TheBloke/guanaco-65B-HF": ModelType.FT,
59
+ "TheBloke/airoboros-7b-gpt4-fp16": ModelType.IFT,
60
+ "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16": ModelType.IFT,
61
+ "TheBloke/Llama-2-13B-fp16": ModelType.PT,
62
+ "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16": ModelType.FT,
63
+ "TheBloke/landmark-attention-llama7b-fp16": ModelType.IFT,
64
+ "TheBloke/Planner-7B-fp16": ModelType.IFT,
65
+ "TheBloke/Wizard-Vicuna-13B-Uncensored-HF": ModelType.FT,
66
+ "TheBloke/gpt4-alpaca-lora-13B-HF": ModelType.IFT,
67
+ "TheBloke/gpt4-x-vicuna-13B-HF": ModelType.IFT,
68
+ "TheBloke/gpt4-alpaca-lora_mlp-65B-HF": ModelType.IFT,
69
+ "TheBloke/tulu-13B-fp16": ModelType.IFT,
70
+ "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16": ModelType.IFT,
71
+ "TheBloke/Llama-2-70B-fp16": ModelType.IFT,
72
+ "TheBloke/WizardLM-30B-fp16": ModelType.IFT,
73
+ "TheBloke/robin-13B-v2-fp16": ModelType.FT,
74
+ "TheBloke/robin-33B-v2-fp16": ModelType.FT,
75
+ "TheBloke/Vicuna-13B-CoT-fp16": ModelType.IFT,
76
+ "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16": ModelType.IFT,
77
+ "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16": ModelType.FT,
78
+ "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16": ModelType.IFT,
79
+ "TheBloke/GPlatty-30B-SuperHOT-8K-fp16": ModelType.FT,
80
+ "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16": ModelType.IFT,
81
+ "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16": ModelType.IFT,
82
+ "jphme/orca_mini_v2_ger_7b": ModelType.IFT,
83
+ "Ejafa/vicuna_7B_vanilla_1.1": ModelType.FT,
84
+ "kevinpro/Vicuna-13B-CoT": ModelType.IFT,
85
+ "AlekseyKorshuk/pygmalion-6b-vicuna-chatml": ModelType.FT,
86
+ "AlekseyKorshuk/chatml-pyg-v1": ModelType.FT,
87
+ "concedo/Vicuzard-30B-Uncensored": ModelType.FT,
88
+ "concedo/OPT-19M-ChatSalad": ModelType.FT,
89
+ "concedo/Pythia-70M-ChatSalad": ModelType.FT,
90
+ "digitous/13B-HyperMantis": ModelType.IFT,
91
+ "digitous/Adventien-GPTJ": ModelType.FT,
92
+ "digitous/Alpacino13b": ModelType.IFT,
93
+ "digitous/GPT-R": ModelType.IFT,
94
+ "digitous/Javelin-R": ModelType.IFT,
95
+ "digitous/Javalion-GPTJ": ModelType.IFT,
96
+ "digitous/Javalion-R": ModelType.IFT,
97
+ "digitous/Skegma-GPTJ": ModelType.FT,
98
+ "digitous/Alpacino30b": ModelType.IFT,
99
+ "digitous/Janin-GPTJ": ModelType.FT,
100
+ "digitous/Janin-R": ModelType.FT,
101
+ "digitous/Javelin-GPTJ": ModelType.FT,
102
+ "SaylorTwift/gpt2_test": ModelType.PT,
103
+ "anton-l/gpt-j-tiny-random": ModelType.FT,
104
+ "Andron00e/YetAnother_Open-Llama-3B-LoRA-OpenOrca": ModelType.FT,
105
+ "Lazycuber/pyg-instruct-wizardlm": ModelType.FT,
106
+ "Lazycuber/Janemalion-6B": ModelType.FT,
107
+ "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1": ModelType.FT,
108
+ "IDEA-CCNL/Ziya-LLaMA-13B-v1": ModelType.IFT,
109
+ "dsvv-cair/alpaca-cleaned-llama-30b-bf16": ModelType.FT,
110
+ "gpt2-medium": ModelType.PT,
111
+ "camel-ai/CAMEL-13B-Combined-Data": ModelType.IFT,
112
+ "camel-ai/CAMEL-13B-Role-Playing-Data": ModelType.FT,
113
+ "camel-ai/CAMEL-33B-Combined-Data": ModelType.IFT,
114
+ "PygmalionAI/pygmalion-6b": ModelType.FT,
115
+ "PygmalionAI/metharme-1.3b": ModelType.IFT,
116
+ "PygmalionAI/pygmalion-1.3b": ModelType.FT,
117
+ "PygmalionAI/pygmalion-350m": ModelType.FT,
118
+ "PygmalionAI/pygmalion-2.7b": ModelType.FT,
119
+ "medalpaca/medalpaca-7b": ModelType.FT,
120
+ "lilloukas/Platypus-30B": ModelType.IFT,
121
+ "lilloukas/GPlatty-30B": ModelType.FT,
122
+ "mncai/chatdoctor": ModelType.FT,
123
+ "chaoyi-wu/MedLLaMA_13B": ModelType.FT,
124
+ "LoupGarou/WizardCoder-Guanaco-15B-V1.0": ModelType.IFT,
125
+ "LoupGarou/WizardCoder-Guanaco-15B-V1.1": ModelType.FT,
126
+ "hakurei/instruct-12b": ModelType.IFT,
127
+ "hakurei/lotus-12B": ModelType.FT,
128
+ "shibing624/chinese-llama-plus-13b-hf": ModelType.IFT,
129
+ "shibing624/chinese-alpaca-plus-7b-hf": ModelType.IFT,
130
+ "shibing624/chinese-alpaca-plus-13b-hf": ModelType.IFT,
131
+ "mosaicml/mpt-7b-instruct": ModelType.IFT,
132
+ "mosaicml/mpt-30b-chat": ModelType.IFT,
133
+ "mosaicml/mpt-7b-storywriter": ModelType.FT,
134
+ "mosaicml/mpt-30b-instruct": ModelType.IFT,
135
+ "mosaicml/mpt-7b-chat": ModelType.IFT,
136
+ "mosaicml/mpt-30b": ModelType.PT,
137
+ "Corianas/111m": ModelType.IFT,
138
+ "Corianas/Quokka_1.3b": ModelType.IFT,
139
+ "Corianas/256_5epoch": ModelType.FT,
140
+ "Corianas/Quokka_256m": ModelType.IFT,
141
+ "Corianas/Quokka_590m": ModelType.IFT,
142
+ "Corianas/gpt-j-6B-Dolly": ModelType.FT,
143
+ "Corianas/Quokka_2.7b": ModelType.IFT,
144
+ "cyberagent/open-calm-7b": ModelType.FT,
145
+ "Aspik101/Nous-Hermes-13b-pl-lora_unload": ModelType.IFT,
146
+ "THUDM/chatglm2-6b": ModelType.IFT,
147
+ "MetaIX/GPT4-X-Alpasta-30b": ModelType.IFT,
148
+ "NYTK/PULI-GPTrio": ModelType.PT,
149
+ "EleutherAI/pythia-1.3b": ModelType.PT,
150
+ "EleutherAI/pythia-2.8b-deduped": ModelType.PT,
151
+ "EleutherAI/gpt-neo-125m": ModelType.PT,
152
+ "EleutherAI/pythia-160m": ModelType.PT,
153
+ "EleutherAI/gpt-neo-2.7B": ModelType.PT,
154
+ "EleutherAI/pythia-1b-deduped": ModelType.PT,
155
+ "EleutherAI/pythia-6.7b": ModelType.PT,
156
+ "EleutherAI/pythia-70m-deduped": ModelType.PT,
157
+ "EleutherAI/gpt-neox-20b": ModelType.PT,
158
+ "EleutherAI/pythia-1.4b-deduped": ModelType.PT,
159
+ "EleutherAI/pythia-2.7b": ModelType.PT,
160
+ "EleutherAI/pythia-6.9b-deduped": ModelType.PT,
161
+ "EleutherAI/pythia-70m": ModelType.PT,
162
+ "EleutherAI/gpt-j-6b": ModelType.PT,
163
+ "EleutherAI/pythia-12b-deduped": ModelType.PT,
164
+ "EleutherAI/gpt-neo-1.3B": ModelType.PT,
165
+ "EleutherAI/pythia-410m-deduped": ModelType.PT,
166
+ "EleutherAI/pythia-160m-deduped": ModelType.PT,
167
+ "EleutherAI/polyglot-ko-12.8b": ModelType.PT,
168
+ "EleutherAI/pythia-12b": ModelType.PT,
169
+ "roneneldan/TinyStories-33M": ModelType.PT,
170
+ "roneneldan/TinyStories-28M": ModelType.PT,
171
+ "roneneldan/TinyStories-1M": ModelType.PT,
172
+ "roneneldan/TinyStories-8M": ModelType.PT,
173
+ "roneneldan/TinyStories-3M": ModelType.PT,
174
+ "jerryjalapeno/nart-100k-7b": ModelType.FT,
175
+ "lmsys/vicuna-13b-v1.3": ModelType.IFT,
176
+ "lmsys/vicuna-7b-v1.3": ModelType.IFT,
177
+ "lmsys/vicuna-13b-v1.1": ModelType.IFT,
178
+ "lmsys/vicuna-13b-delta-v1.1": ModelType.IFT,
179
+ "lmsys/vicuna-7b-delta-v1.1": ModelType.IFT,
180
+ "abhiramtirumala/DialoGPT-sarcastic-medium": ModelType.FT,
181
+ "haonan-li/bactrian-x-llama-13b-merged": ModelType.IFT,
182
+ "Gryphe/MythoLogic-13b": ModelType.IFT,
183
+ "Gryphe/MythoBoros-13b": ModelType.IFT,
184
+ "pillowtalks-ai/delta13b": ModelType.FT,
185
+ "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard": ModelType.FT,
186
+ "bigscience/bloom-7b1": ModelType.PT,
187
+ "bigcode/tiny_starcoder_py": ModelType.PT,
188
+ "bigcode/starcoderplus": ModelType.FT,
189
+ "bigcode/gpt_bigcode-santacoder": ModelType.PT,
190
+ "bigcode/starcoder": ModelType.PT,
191
+ "Open-Orca/OpenOrca-Preview1-13B": ModelType.IFT,
192
+ "microsoft/DialoGPT-large": ModelType.FT,
193
+ "microsoft/DialoGPT-small": ModelType.FT,
194
+ "microsoft/DialoGPT-medium": ModelType.FT,
195
+ "microsoft/CodeGPT-small-py": ModelType.FT,
196
+ "Tincando/fiction_story_generator": ModelType.FT,
197
+ "Pirr/pythia-13b-deduped-green_devil": ModelType.FT,
198
+ "Aeala/GPT4-x-AlpacaDente2-30b": ModelType.FT,
199
+ "Aeala/GPT4-x-AlpacaDente-30b": ModelType.FT,
200
+ "Aeala/GPT4-x-Alpasta-13b": ModelType.FT,
201
+ "Aeala/VicUnlocked-alpaca-30b": ModelType.IFT,
202
+ "Tap-M/Luna-AI-Llama2-Uncensored": ModelType.FT,
203
+ "illuin/test-custom-llama": ModelType.FT,
204
+ "dvruette/oasst-llama-13b-2-epochs": ModelType.FT,
205
+ "dvruette/oasst-gpt-neox-20b-1000-steps": ModelType.FT,
206
+ "dvruette/llama-13b-pretrained-dropout": ModelType.PT,
207
+ "dvruette/llama-13b-pretrained": ModelType.PT,
208
+ "dvruette/llama-13b-pretrained-sft-epoch-1": ModelType.FT,
209
+ "dvruette/llama-13b-pretrained-sft-do2": ModelType.FT,
210
+ "dvruette/oasst-gpt-neox-20b-3000-steps": ModelType.FT,
211
+ "dvruette/oasst-pythia-12b-pretrained-sft": ModelType.FT,
212
+ "dvruette/oasst-pythia-6.9b-4000-steps": ModelType.FT,
213
+ "dvruette/gpt-neox-20b-full-precision": ModelType.FT,
214
+ "dvruette/oasst-llama-13b-1000-steps": ModelType.FT,
215
+ "openlm-research/open_llama_7b_700bt_preview": ModelType.PT,
216
+ "openlm-research/open_llama_7b": ModelType.PT,
217
+ "openlm-research/open_llama_7b_v2": ModelType.PT,
218
+ "openlm-research/open_llama_3b": ModelType.PT,
219
+ "openlm-research/open_llama_13b": ModelType.PT,
220
+ "openlm-research/open_llama_3b_v2": ModelType.PT,
221
+ "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged": ModelType.IFT,
222
+ "GeorgiaTechResearchInstitute/galpaca-30b": ModelType.IFT,
223
+ "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct": ModelType.IFT,
224
+ "databricks/dolly-v2-7b": ModelType.IFT,
225
+ "databricks/dolly-v2-3b": ModelType.IFT,
226
+ "databricks/dolly-v2-12b": ModelType.IFT,
227
+ "Rachneet/gpt2-xl-alpaca": ModelType.FT,
228
+ "Locutusque/gpt2-conversational-or-qa": ModelType.FT,
229
+ "psyche/kogpt": ModelType.FT,
230
+ "NbAiLab/nb-gpt-j-6B-alpaca": ModelType.IFT,
231
+ "Mikael110/llama-2-7b-guanaco-fp16": ModelType.FT,
232
+ "Mikael110/llama-2-13b-guanaco-fp16": ModelType.FT,
233
+ "Fredithefish/CrimsonPajama": ModelType.IFT,
234
+ "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K": ModelType.FT,
235
+ "Fredithefish/ScarletPajama-3B-HF": ModelType.FT,
236
+ "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4": ModelType.IFT,
237
+ "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1": ModelType.IFT,
238
+ "eachadea/vicuna-13b-1.1": ModelType.FT,
239
+ "eachadea/vicuna-7b-1.1": ModelType.FT,
240
+ "eachadea/vicuna-13b": ModelType.FT,
241
+ "openaccess-ai-collective/wizard-mega-13b": ModelType.IFT,
242
+ "openaccess-ai-collective/manticore-13b": ModelType.IFT,
243
+ "openaccess-ai-collective/manticore-30b-chat-pyg-alpha": ModelType.IFT,
244
+ "openaccess-ai-collective/minotaur-13b": ModelType.IFT,
245
+ "openaccess-ai-collective/minotaur-13b-fixed": ModelType.IFT,
246
+ "openaccess-ai-collective/hippogriff-30b-chat": ModelType.IFT,
247
+ "openaccess-ai-collective/manticore-13b-chat-pyg": ModelType.IFT,
248
+ "pythainlp/wangchanglm-7.5B-sft-enth": ModelType.IFT,
249
+ "pythainlp/wangchanglm-7.5B-sft-en-sharded": ModelType.IFT,
250
+ "euclaise/gpt-neox-122m-minipile-digits": ModelType.FT,
251
+ "stabilityai/StableBeluga1-Delta": ModelType.IFT,
252
+ "stabilityai/stablelm-tuned-alpha-7b": ModelType.IFT,
253
+ "stabilityai/StableBeluga2": ModelType.IFT,
254
+ "stabilityai/StableBeluga-13B": ModelType.IFT,
255
+ "stabilityai/StableBeluga-7B": ModelType.IFT,
256
+ "stabilityai/stablelm-base-alpha-7b": ModelType.PT,
257
+ "stabilityai/stablelm-base-alpha-3b": ModelType.PT,
258
+ "stabilityai/stablelm-tuned-alpha-3b": ModelType.IFT,
259
+ "alibidaran/medical_transcription_generator": ModelType.FT,
260
+ "CalderaAI/30B-Lazarus": ModelType.IFT,
261
+ "CalderaAI/13B-BlueMethod": ModelType.IFT,
262
+ "CalderaAI/13B-Ouroboros": ModelType.IFT,
263
+ "KoboldAI/OPT-13B-Erebus": ModelType.FT,
264
+ "KoboldAI/GPT-J-6B-Janeway": ModelType.FT,
265
+ "KoboldAI/GPT-J-6B-Shinen": ModelType.FT,
266
+ "KoboldAI/fairseq-dense-2.7B": ModelType.PT,
267
+ "KoboldAI/OPT-6B-nerys-v2": ModelType.FT,
268
+ "KoboldAI/GPT-NeoX-20B-Skein": ModelType.FT,
269
+ "KoboldAI/PPO_Pygway-6b-Mix": ModelType.FT,
270
+ "KoboldAI/fairseq-dense-6.7B": ModelType.PT,
271
+ "KoboldAI/fairseq-dense-125M": ModelType.PT,
272
+ "KoboldAI/OPT-13B-Nerybus-Mix": ModelType.FT,
273
+ "KoboldAI/OPT-2.7B-Erebus": ModelType.FT,
274
+ "KoboldAI/OPT-350M-Nerys-v2": ModelType.FT,
275
+ "KoboldAI/OPT-2.7B-Nerys-v2": ModelType.FT,
276
+ "KoboldAI/OPT-2.7B-Nerybus-Mix": ModelType.FT,
277
+ "KoboldAI/OPT-13B-Nerys-v2": ModelType.FT,
278
+ "KoboldAI/GPT-NeoX-20B-Erebus": ModelType.FT,
279
+ "KoboldAI/OPT-6.7B-Erebus": ModelType.FT,
280
+ "KoboldAI/fairseq-dense-355M": ModelType.PT,
281
+ "KoboldAI/OPT-6.7B-Nerybus-Mix": ModelType.FT,
282
+ "KoboldAI/GPT-J-6B-Adventure": ModelType.FT,
283
+ "KoboldAI/OPT-350M-Erebus": ModelType.FT,
284
+ "KoboldAI/GPT-J-6B-Skein": ModelType.FT,
285
+ "KoboldAI/OPT-30B-Erebus": ModelType.FT,
286
+ "klosax/pythia-160m-deduped-step92k-193bt": ModelType.PT,
287
+ "klosax/open_llama_3b_350bt_preview": ModelType.PT,
288
+ "klosax/openllama-3b-350bt": ModelType.PT,
289
+ "klosax/pythia-70m-deduped-step44k-92bt": ModelType.PT,
290
+ "klosax/open_llama_13b_600bt_preview": ModelType.PT,
291
+ "klosax/open_llama_7b_400bt_preview": ModelType.PT,
292
+ "kfkas/Llama-2-ko-7b-Chat": ModelType.IFT,
293
+ "WeOpenML/Alpaca-7B-v1": ModelType.IFT,
294
+ "WeOpenML/PandaLM-Alpaca-7B-v1": ModelType.IFT,
295
+ "TFLai/gpt2-turkish-uncased": ModelType.FT,
296
+ "ehartford/WizardLM-13B-Uncensored": ModelType.IFT,
297
+ "ehartford/dolphin-llama-13b": ModelType.IFT,
298
+ "ehartford/Wizard-Vicuna-30B-Uncensored": ModelType.FT,
299
+ "ehartford/WizardLM-30B-Uncensored": ModelType.IFT,
300
+ "ehartford/Wizard-Vicuna-13B-Uncensored": ModelType.FT,
301
+ "ehartford/WizardLM-7B-Uncensored": ModelType.IFT,
302
+ "ehartford/based-30b": ModelType.FT,
303
+ "ehartford/Wizard-Vicuna-7B-Uncensored": ModelType.FT,
304
+ "wahaha1987/llama_7b_sharegpt94k_fastchat": ModelType.FT,
305
+ "wahaha1987/llama_13b_sharegpt94k_fastchat": ModelType.FT,
306
+ "OpenAssistant/oasst-sft-1-pythia-12b": ModelType.FT,
307
+ "OpenAssistant/stablelm-7b-sft-v7-epoch-3": ModelType.IFT,
308
+ "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": ModelType.FT,
309
+ "OpenAssistant/pythia-12b-sft-v8-2.5k-steps": ModelType.IFT,
310
+ "OpenAssistant/pythia-12b-sft-v8-7k-steps": ModelType.IFT,
311
+ "OpenAssistant/pythia-12b-pre-v8-12.5k-steps": ModelType.IFT,
312
+ "OpenAssistant/llama2-13b-orca-8k-3319": ModelType.IFT,
313
+ "junelee/wizard-vicuna-13b": ModelType.FT,
314
+ "BreadAi/gpt-YA-1-1_160M": ModelType.PT,
315
+ "BreadAi/MuseCan": ModelType.PT,
316
+ "BreadAi/MusePy-1-2": ModelType.PT,
317
+ "BreadAi/DiscordPy": ModelType.PT,
318
+ "BreadAi/PM_modelV2": ModelType.PT,
319
+ "BreadAi/gpt-Youtube": ModelType.PT,
320
+ "BreadAi/StoryPy": ModelType.FT,
321
+ "julianweng/Llama-2-7b-chat-orcah": ModelType.FT,
322
+ "AGI-inc/lora_moe_7b_baseline": ModelType.FT,
323
+ "AGI-inc/lora_moe_7b": ModelType.FT,
324
+ "togethercomputer/GPT-NeoXT-Chat-Base-20B": ModelType.IFT,
325
+ "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1": ModelType.IFT,
326
+ "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1": ModelType.IFT,
327
+ "togethercomputer/RedPajama-INCITE-7B-Base": ModelType.PT,
328
+ "togethercomputer/RedPajama-INCITE-7B-Instruct": ModelType.IFT,
329
+ "togethercomputer/RedPajama-INCITE-Base-3B-v1": ModelType.PT,
330
+ "togethercomputer/Pythia-Chat-Base-7B": ModelType.IFT,
331
+ "togethercomputer/RedPajama-INCITE-Base-7B-v0.1": ModelType.PT,
332
+ "togethercomputer/GPT-JT-6B-v1": ModelType.IFT,
333
+ "togethercomputer/GPT-JT-6B-v0": ModelType.IFT,
334
+ "togethercomputer/RedPajama-INCITE-Chat-3B-v1": ModelType.IFT,
335
+ "togethercomputer/RedPajama-INCITE-7B-Chat": ModelType.IFT,
336
+ "togethercomputer/RedPajama-INCITE-Instruct-3B-v1": ModelType.IFT,
337
+ "Writer/camel-5b-hf": ModelType.IFT,
338
+ "Writer/palmyra-base": ModelType.PT,
339
+ "MBZUAI/LaMini-GPT-1.5B": ModelType.IFT,
340
+ "MBZUAI/lamini-cerebras-111m": ModelType.IFT,
341
+ "MBZUAI/lamini-neo-1.3b": ModelType.IFT,
342
+ "MBZUAI/lamini-cerebras-1.3b": ModelType.IFT,
343
+ "MBZUAI/lamini-cerebras-256m": ModelType.IFT,
344
+ "MBZUAI/LaMini-GPT-124M": ModelType.IFT,
345
+ "MBZUAI/lamini-neo-125m": ModelType.IFT,
346
+ "TehVenom/DiffMerge-DollyGPT-Pygmalion": ModelType.FT,
347
+ "TehVenom/PPO_Shygmalion-6b": ModelType.FT,
348
+ "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2": ModelType.FT,
349
+ "TehVenom/Pygmalion_AlpacaLora-7b": ModelType.FT,
350
+ "TehVenom/PPO_Pygway-V8p4_Dev-6b": ModelType.FT,
351
+ "TehVenom/Dolly_Malion-6b": ModelType.FT,
352
+ "TehVenom/PPO_Shygmalion-V8p4_Dev-6b": ModelType.FT,
353
+ "TehVenom/ChanMalion": ModelType.FT,
354
+ "TehVenom/GPT-J-Pyg_PPO-6B": ModelType.IFT,
355
+ "TehVenom/Pygmalion-13b-Merged": ModelType.FT,
356
+ "TehVenom/Metharme-13b-Merged": ModelType.IFT,
357
+ "TehVenom/Dolly_Shygmalion-6b": ModelType.FT,
358
+ "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4": ModelType.IFT,
359
+ "georgesung/llama2_7b_chat_uncensored": ModelType.FT,
360
+ "vicgalle/gpt2-alpaca": ModelType.IFT,
361
+ "vicgalle/alpaca-7b": ModelType.FT,
362
+ "vicgalle/gpt2-alpaca-gpt4": ModelType.IFT,
363
+ "facebook/opt-350m": ModelType.PT,
364
+ "facebook/opt-125m": ModelType.PT,
365
+ "facebook/xglm-4.5B": ModelType.PT,
366
+ "facebook/opt-2.7b": ModelType.PT,
367
+ "facebook/opt-6.7b": ModelType.PT,
368
+ "facebook/galactica-30b": ModelType.PT,
369
+ "facebook/opt-13b": ModelType.PT,
370
+ "facebook/opt-66b": ModelType.PT,
371
+ "facebook/xglm-7.5B": ModelType.PT,
372
+ "facebook/xglm-564M": ModelType.PT,
373
+ "facebook/opt-30b": ModelType.PT,
374
+ "golaxy/gogpt-7b": ModelType.FT,
375
+ "golaxy/gogpt2-7b": ModelType.FT,
376
+ "golaxy/gogpt-7b-bloom": ModelType.FT,
377
+ "golaxy/gogpt-3b-bloom": ModelType.FT,
378
+ "psmathur/orca_mini_v2_7b": ModelType.IFT,
379
+ "psmathur/orca_mini_7b": ModelType.IFT,
380
+ "psmathur/orca_mini_3b": ModelType.IFT,
381
+ "psmathur/orca_mini_v2_13b": ModelType.IFT,
382
+ "gpt2-xl": ModelType.PT,
383
+ "lxe/Cerebras-GPT-2.7B-Alpaca-SP": ModelType.FT,
384
+ "Monero/Manticore-13b-Chat-Pyg-Guanaco": ModelType.FT,
385
+ "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b": ModelType.IFT,
386
+ "Monero/WizardLM-13b-OpenAssistant-Uncensored": ModelType.IFT,
387
+ "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b": ModelType.IFT,
388
+ "jzjiao/opt-1.3b-rlhf": ModelType.FT,
389
+ "HuggingFaceH4/starchat-beta": ModelType.IFT,
390
+ "KnutJaegersberg/gpt-2-xl-EvolInstruct": ModelType.IFT,
391
+ "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct": ModelType.IFT,
392
+ "KnutJaegersberg/galactica-orca-wizardlm-1.3b": ModelType.IFT,
393
+ "openchat/openchat_8192": ModelType.IFT,
394
+ "openchat/openchat_v2": ModelType.IFT,
395
+ "openchat/openchat_v2_w": ModelType.IFT,
396
+ "ausboss/llama-13b-supercot": ModelType.IFT,
397
+ "ausboss/llama-30b-supercot": ModelType.IFT,
398
+ "Neko-Institute-of-Science/metharme-7b": ModelType.IFT,
399
+ "Neko-Institute-of-Science/pygmalion-7b": ModelType.FT,
400
+ "SebastianSchramm/Cerebras-GPT-111M-instruction": ModelType.IFT,
401
+ "victor123/WizardLM-13B-1.0": ModelType.IFT,
402
+ "OpenBuddy/openbuddy-openllama-13b-v7-fp16": ModelType.FT,
403
+ "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16": ModelType.FT,
404
+ "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16": ModelType.FT,
405
+ "baichuan-inc/Baichuan-7B": ModelType.PT,
406
+ "tiiuae/falcon-40b-instruct": ModelType.IFT,
407
+ "tiiuae/falcon-40b": ModelType.PT,
408
+ "tiiuae/falcon-7b": ModelType.PT,
409
+ "YeungNLP/firefly-llama-13b": ModelType.FT,
410
+ "YeungNLP/firefly-llama-13b-v1.2": ModelType.FT,
411
+ "YeungNLP/firefly-llama2-13b": ModelType.FT,
412
+ "YeungNLP/firefly-ziya-13b": ModelType.FT,
413
+ "shaohang/Sparse0.5_OPT-1.3": ModelType.FT,
414
+ "xzuyn/Alpacino-SuperCOT-13B": ModelType.IFT,
415
+ "xzuyn/MedicWizard-7B": ModelType.FT,
416
+ "xDAN-AI/xDAN_13b_l2_lora": ModelType.FT,
417
+ "beomi/KoAlpaca-Polyglot-5.8B": ModelType.FT,
418
+ "beomi/llama-2-ko-7b": ModelType.IFT,
419
+ "Salesforce/codegen-6B-multi": ModelType.PT,
420
+ "Salesforce/codegen-16B-nl": ModelType.PT,
421
+ "Salesforce/codegen-6B-nl": ModelType.PT,
422
+ "ai-forever/rugpt3large_based_on_gpt2": ModelType.FT,
423
+ "gpt2-large": ModelType.PT,
424
+ "frank098/orca_mini_3b_juniper": ModelType.FT,
425
+ "frank098/WizardLM_13B_juniper": ModelType.FT,
426
+ "FPHam/Free_Sydney_13b_HF": ModelType.FT,
427
+ "huggingface/llama-13b": ModelType.PT,
428
+ "huggingface/llama-7b": ModelType.PT,
429
+ "huggingface/llama-65b": ModelType.PT,
430
+ "huggingface/llama-30b": ModelType.PT,
431
+ "Henk717/chronoboros-33B": ModelType.IFT,
432
+ "jondurbin/airoboros-13b-gpt4-1.4": ModelType.IFT,
433
+ "jondurbin/airoboros-7b": ModelType.IFT,
434
+ "jondurbin/airoboros-7b-gpt4": ModelType.IFT,
435
+ "jondurbin/airoboros-7b-gpt4-1.1": ModelType.IFT,
436
+ "jondurbin/airoboros-7b-gpt4-1.2": ModelType.IFT,
437
+ "jondurbin/airoboros-7b-gpt4-1.3": ModelType.IFT,
438
+ "jondurbin/airoboros-7b-gpt4-1.4": ModelType.IFT,
439
+ "jondurbin/airoboros-l2-7b-gpt4-1.4.1": ModelType.IFT,
440
+ "jondurbin/airoboros-l2-13b-gpt4-1.4.1": ModelType.IFT,
441
+ "jondurbin/airoboros-l2-70b-gpt4-1.4.1": ModelType.IFT,
442
+ "jondurbin/airoboros-13b": ModelType.IFT,
443
+ "jondurbin/airoboros-33b-gpt4-1.4": ModelType.IFT,
444
+ "jondurbin/airoboros-33b-gpt4-1.2": ModelType.IFT,
445
+ "jondurbin/airoboros-65b-gpt4-1.2": ModelType.IFT,
446
+ "ariellee/SuperPlatty-30B": ModelType.IFT,
447
+ "danielhanchen/open_llama_3b_600bt_preview": ModelType.FT,
448
+ "cerebras/Cerebras-GPT-256M": ModelType.PT,
449
+ "cerebras/Cerebras-GPT-1.3B": ModelType.PT,
450
+ "cerebras/Cerebras-GPT-13B": ModelType.PT,
451
+ "cerebras/Cerebras-GPT-2.7B": ModelType.PT,
452
+ "cerebras/Cerebras-GPT-111M": ModelType.PT,
453
+ "cerebras/Cerebras-GPT-6.7B": ModelType.PT,
454
+ "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf": ModelType.RL,
455
+ "Yhyu13/llama-30B-hf-openassitant": ModelType.FT,
456
+ "NousResearch/Nous-Hermes-Llama2-13b": ModelType.IFT,
457
+ "NousResearch/Nous-Hermes-llama-2-7b": ModelType.IFT,
458
+ "NousResearch/Redmond-Puffin-13B": ModelType.IFT,
459
+ "NousResearch/Nous-Hermes-13b": ModelType.IFT,
460
+ "project-baize/baize-v2-7b": ModelType.IFT,
461
+ "project-baize/baize-v2-13b": ModelType.IFT,
462
+ "LLMs/WizardLM-13B-V1.0": ModelType.FT,
463
+ "LLMs/AlpacaGPT4-7B-elina": ModelType.FT,
464
+ "wenge-research/yayi-7b": ModelType.FT,
465
+ "wenge-research/yayi-7b-llama2": ModelType.FT,
466
+ "wenge-research/yayi-13b-llama2": ModelType.FT,
467
+ "yhyhy3/open_llama_7b_v2_med_instruct": ModelType.IFT,
468
+ "llama-anon/instruct-13b": ModelType.IFT,
469
+ "huggingtweets/jerma985": ModelType.FT,
470
+ "huggingtweets/gladosystem": ModelType.FT,
471
+ "huggingtweets/bladeecity-jerma985": ModelType.FT,
472
+ "huggyllama/llama-13b": ModelType.PT,
473
+ "huggyllama/llama-65b": ModelType.PT,
474
+ "FabbriSimo01/Facebook_opt_1.3b_Quantized": ModelType.PT,
475
+ "upstage/Llama-2-70b-instruct": ModelType.IFT,
476
+ "upstage/Llama-2-70b-instruct-1024": ModelType.IFT,
477
+ "upstage/llama-65b-instruct": ModelType.IFT,
478
+ "upstage/llama-30b-instruct-2048": ModelType.IFT,
479
+ "upstage/llama-30b-instruct": ModelType.IFT,
480
+ "WizardLM/WizardLM-13B-1.0": ModelType.IFT,
481
+ "WizardLM/WizardLM-13B-V1.1": ModelType.IFT,
482
+ "WizardLM/WizardLM-13B-V1.2": ModelType.IFT,
483
+ "WizardLM/WizardLM-30B-V1.0": ModelType.IFT,
484
+ "WizardLM/WizardCoder-15B-V1.0": ModelType.IFT,
485
+ "gpt2": ModelType.PT,
486
+ "keyfan/vicuna-chinese-replication-v1.1": ModelType.IFT,
487
+ "nthngdy/pythia-owt2-70m-100k": ModelType.FT,
488
+ "nthngdy/pythia-owt2-70m-50k": ModelType.FT,
489
+ "quantumaikr/KoreanLM-hf": ModelType.FT,
490
+ "quantumaikr/open_llama_7b_hf": ModelType.FT,
491
+ "quantumaikr/QuantumLM-70B-hf": ModelType.IFT,
492
+ "MayaPH/FinOPT-Lincoln": ModelType.FT,
493
+ "MayaPH/FinOPT-Franklin": ModelType.FT,
494
+ "MayaPH/GodziLLa-30B": ModelType.IFT,
495
+ "MayaPH/GodziLLa-30B-plus": ModelType.IFT,
496
+ "MayaPH/FinOPT-Washington": ModelType.FT,
497
+ "ogimgio/gpt-neo-125m-neurallinguisticpioneers": ModelType.FT,
498
+ "layoric/llama-2-13b-code-alpaca": ModelType.FT,
499
+ "CobraMamba/mamba-gpt-3b": ModelType.FT,
500
+ "CobraMamba/mamba-gpt-3b-v2": ModelType.FT,
501
+ "CobraMamba/mamba-gpt-3b-v3": ModelType.FT,
502
+ "timdettmers/guanaco-33b-merged": ModelType.FT,
503
+ "elinas/chronos-33b": ModelType.IFT,
504
+ "heegyu/RedTulu-Uncensored-3B-0719": ModelType.IFT,
505
+ "heegyu/WizardVicuna-Uncensored-3B-0719": ModelType.IFT,
506
+ "heegyu/WizardVicuna-3B-0719": ModelType.IFT,
507
+ "meta-llama/Llama-2-7b-chat-hf": ModelType.RL,
508
+ "meta-llama/Llama-2-7b-hf": ModelType.PT,
509
+ "meta-llama/Llama-2-13b-chat-hf": ModelType.RL,
510
+ "meta-llama/Llama-2-13b-hf": ModelType.PT,
511
+ "meta-llama/Llama-2-70b-chat-hf": ModelType.RL,
512
+ "meta-llama/Llama-2-70b-hf": ModelType.PT,
513
+ "xhyi/PT_GPTNEO350_ATG": ModelType.FT,
514
+ "h2oai/h2ogpt-gm-oasst1-en-1024-20b": ModelType.FT,
515
+ "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt": ModelType.FT,
516
+ "h2oai/h2ogpt-oig-oasst1-512-6_9b": ModelType.IFT,
517
+ "h2oai/h2ogpt-oasst1-512-12b": ModelType.IFT,
518
+ "h2oai/h2ogpt-oig-oasst1-256-6_9b": ModelType.IFT,
519
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt": ModelType.FT,
520
+ "h2oai/h2ogpt-oasst1-512-20b": ModelType.IFT,
521
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2": ModelType.FT,
522
+ "h2oai/h2ogpt-gm-oasst1-en-1024-12b": ModelType.FT,
523
+ "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b": ModelType.FT,
524
+ "bofenghuang/vigogne-13b-instruct": ModelType.IFT,
525
+ "bofenghuang/vigogne-13b-chat": ModelType.FT,
526
+ "bofenghuang/vigogne-2-7b-instruct": ModelType.IFT,
527
+ "bofenghuang/vigogne-7b-instruct": ModelType.IFT,
528
+ "bofenghuang/vigogne-7b-chat": ModelType.FT,
529
+ "Vmware/open-llama-7b-v2-open-instruct": ModelType.IFT,
530
+ "VMware/open-llama-0.7T-7B-open-instruct-v1.1": ModelType.IFT,
531
+ "ewof/koishi-instruct-3b": ModelType.IFT,
532
+ "gywy/llama2-13b-chinese-v1": ModelType.FT,
533
+ "GOAT-AI/GOAT-7B-Community": ModelType.FT,
534
+ "psyche/kollama2-7b": ModelType.FT,
535
+ "TheTravellingEngineer/llama2-7b-hf-guanaco": ModelType.FT,
536
+ "beaugogh/pythia-1.4b-deduped-sharegpt": ModelType.FT,
537
+ "augtoma/qCammel-70-x": ModelType.IFT,
538
+ "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload": ModelType.IFT,
539
+ "anhnv125/pygmalion-6b-roleplay": ModelType.FT,
540
+ "64bits/LexPodLM-13B": ModelType.FT,
541
+ }
542
+
543
+
544
+ def model_type_from_str(type):
545
+ if "fine-tuned" in type or "🔶" in type:
546
+ return ModelType.FT
547
+ if "pretrained" in type or "🟢" in type:
548
+ return ModelType.PT
549
+ if "RL-tuned" in type or "🟦" in type:
550
+ return ModelType.RL
551
+ if "instruction-tuned" in type or "⭕" in type:
552
+ return ModelType.IFT
553
+ return ModelType.Unknown
src/display_models/read_results.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Tuple
5
+
6
+ import dateutil
7
+ import numpy as np
8
+
9
+ from src.display_models.utils import AutoEvalColumn, make_clickable_model
10
+
11
+ METRICS = ["acc_norm", "acc_norm", "acc", "mc2"]
12
+ BENCHMARKS = ["arc:challenge", "hellaswag", "hendrycksTest", "truthfulqa:mc", "commongen", "ethicalverification"]
13
+ BENCH_TO_NAME = {
14
+ "arc:challenge": AutoEvalColumn.arc.name,
15
+ "hellaswag": AutoEvalColumn.hellaswag.name,
16
+ "hendrycksTest": AutoEvalColumn.mmlu.name,
17
+ "truthfulqa:mc": AutoEvalColumn.truthfulqa.name,
18
+ "commongen": AutoEvalColumn.commongen.name,
19
+ "ethicalverification": AutoEvalColumn.ethicalverification.name,
20
+ }
21
+
22
+
23
+ @dataclass
24
+ class EvalResult:
25
+ eval_name: str
26
+ org: str
27
+ model: str
28
+ revision: str
29
+ results: dict
30
+ precision: str = ""
31
+ model_type: str = ""
32
+ weight_type: str = ""
33
+
34
+ def to_dict(self):
35
+ from src.load_from_hub import is_model_on_hub
36
+
37
+ if self.org is not None:
38
+ base_model = f"{self.org}/{self.model}"
39
+ else:
40
+ base_model = f"{self.model}"
41
+ data_dict = {}
42
+
43
+ data_dict["eval_name"] = self.eval_name # not a column, just a save name
44
+ data_dict["weight_type"] = self.weight_type # not a column, just a save name
45
+ data_dict[AutoEvalColumn.precision.name] = self.precision
46
+ data_dict[AutoEvalColumn.model_type.name] = self.model_type
47
+ data_dict[AutoEvalColumn.model.name] = make_clickable_model(base_model)
48
+ data_dict[AutoEvalColumn.dummy.name] = base_model
49
+ data_dict[AutoEvalColumn.revision.name] = self.revision
50
+ data_dict[AutoEvalColumn.average.name] = sum([v for k, v in self.results.items()]) / 4.0
51
+ data_dict[AutoEvalColumn.still_on_hub.name] = (
52
+ is_model_on_hub(base_model, self.revision)[0] or base_model == "baseline"
53
+ )
54
+
55
+ for benchmark in BENCHMARKS:
56
+ if benchmark not in self.results.keys():
57
+ self.results[benchmark] = None
58
+
59
+ for k, v in BENCH_TO_NAME.items():
60
+ data_dict[v] = self.results[k]
61
+
62
+ return data_dict
63
+
64
+
65
+ def parse_eval_result(json_filepath: str) -> Tuple[str, list[dict]]:
66
+ with open(json_filepath) as fp:
67
+ data = json.load(fp)
68
+
69
+ for mmlu_k in ["harness|hendrycksTest-abstract_algebra|5", "hendrycksTest-abstract_algebra"]:
70
+ if mmlu_k in data["versions"] and data["versions"][mmlu_k] == 0:
71
+ return None, [] # we skip models with the wrong version
72
+
73
+ try:
74
+ config = data["config"]
75
+ except KeyError:
76
+ config = data["config_general"]
77
+ model = config.get("model_name", None)
78
+ if model is None:
79
+ model = config.get("model_args", None)
80
+
81
+ model_sha = config.get("model_sha", "")
82
+ model_split = model.split("/", 1)
83
+
84
+ precision = config.get("model_dtype")
85
+
86
+ model = model_split[-1]
87
+
88
+ if len(model_split) == 1:
89
+ org = None
90
+ model = model_split[0]
91
+ result_key = f"{model}_{model_sha}_{precision}"
92
+ else:
93
+ org = model_split[0]
94
+ model = model_split[1]
95
+ result_key = f"{org}_{model}_{model_sha}_{precision}"
96
+
97
+ eval_results = []
98
+ for benchmark, metric in zip(BENCHMARKS, METRICS):
99
+ accs = np.array([v.get(metric, None) for k, v in data["results"].items() if benchmark in k])
100
+ if accs.size == 0 or any([acc is None for acc in accs]):
101
+ continue
102
+ mean_acc = np.mean(accs) * 100.0
103
+ eval_results.append(
104
+ EvalResult(
105
+ eval_name=result_key,
106
+ org=org,
107
+ model=model,
108
+ revision=model_sha,
109
+ results={benchmark: mean_acc},
110
+ precision=precision, # todo model_type=, weight_type=
111
+ )
112
+ )
113
+
114
+ return result_key, eval_results
115
+
116
+
117
+ def get_eval_results() -> List[EvalResult]:
118
+ json_filepaths = []
119
+
120
+ for root, dir, files in os.walk("eval-results"):
121
+ # We should only have json files in model results
122
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
123
+ continue
124
+
125
+ # Sort the files by date
126
+ # store results by precision maybe?
127
+ try:
128
+ files.sort(key=lambda x: dateutil.parser.parse(x.split("_", 1)[-1][:-5]))
129
+ except dateutil.parser._parser.ParserError:
130
+ files = [files[-1]]
131
+
132
+ # up_to_date = files[-1]
133
+ for file in files:
134
+ json_filepaths.append(os.path.join(root, file))
135
+
136
+ eval_results = {}
137
+ for json_filepath in json_filepaths:
138
+ result_key, results = parse_eval_result(json_filepath)
139
+ for eval_result in results:
140
+ if result_key in eval_results.keys():
141
+ eval_results[result_key].results.update(eval_result.results)
142
+ else:
143
+ eval_results[result_key] = eval_result
144
+
145
+ eval_results = [v for v in eval_results.values()]
146
+
147
+ return eval_results
148
+
149
+
150
+ def get_eval_results_dicts() -> List[Dict]:
151
+ eval_results = get_eval_results()
152
+
153
+ return [e.to_dict() for e in eval_results]
src/display_models/utils.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass
3
+
4
+ from huggingface_hub import HfApi
5
+
6
+ API = HfApi()
7
+
8
+
9
+ # These classes are for user facing column names, to avoid having to change them
10
+ # all around the code when a modif is needed
11
+ @dataclass
12
+ class ColumnContent:
13
+ name: str
14
+ type: str
15
+ displayed_by_default: bool
16
+ hidden: bool = False
17
+
18
+
19
+ def fields(raw_class):
20
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class AutoEvalColumn: # Auto evals column
25
+ model_type_symbol = ColumnContent("T", "str", True)
26
+ model = ColumnContent("Model", "markdown", True)
27
+ average = ColumnContent("Average ⬆️", "number", True)
28
+ arc = ColumnContent("Ko-ARC", "number", True)
29
+ hellaswag = ColumnContent("Ko-HellaSwag", "number", True)
30
+ mmlu = ColumnContent("Ko-MMLU", "number", True)
31
+ truthfulqa = ColumnContent("Ko-TruthfulQA", "number", True)
32
+ commongen = ColumnContent("Ko-CommonGen", "number", True)
33
+ ethicalverification = ColumnContent("EthicalVerification", "number", True)
34
+ model_type = ColumnContent("Type", "str", False)
35
+ precision = ColumnContent("Precision", "str", False) # , True)
36
+ license = ColumnContent("Hub License", "str", False)
37
+ params = ColumnContent("#Params (B)", "number", False)
38
+ likes = ColumnContent("Hub ❤️", "number", False)
39
+ still_on_hub = ColumnContent("Available on the hub", "bool", False)
40
+ revision = ColumnContent("Model sha", "str", False, False)
41
+ dummy = ColumnContent(
42
+ "model_name_for_query", "str", True
43
+ ) # dummy col to implement search bar (hidden by custom CSS)
44
+
45
+
46
+ @dataclass(frozen=True)
47
+ class EloEvalColumn: # Elo evals column
48
+ model = ColumnContent("Model", "markdown", True)
49
+ gpt4 = ColumnContent("GPT-4 (all)", "number", True)
50
+ human_all = ColumnContent("Human (all)", "number", True)
51
+ human_instruct = ColumnContent("Human (instruct)", "number", True)
52
+ human_code_instruct = ColumnContent("Human (code-instruct)", "number", True)
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class EvalQueueColumn: # Queue column
57
+ model = ColumnContent("model", "markdown", True)
58
+ revision = ColumnContent("revision", "str", True)
59
+ private = ColumnContent("private", "bool", True)
60
+ precision = ColumnContent("precision", "str", True)
61
+ weight_type = ColumnContent("weight_type", "str", "Original")
62
+ status = ColumnContent("status", "str", True)
63
+
64
+
65
+ LLAMAS = [
66
+ "huggingface/llama-7b",
67
+ "huggingface/llama-13b",
68
+ "huggingface/llama-30b",
69
+ "huggingface/llama-65b",
70
+ ]
71
+
72
+
73
+ KOALA_LINK = "https://huggingface.co/TheBloke/koala-13B-HF"
74
+ VICUNA_LINK = "https://huggingface.co/lmsys/vicuna-13b-delta-v1.1"
75
+ OASST_LINK = "https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
76
+ DOLLY_LINK = "https://huggingface.co/databricks/dolly-v2-12b"
77
+ MODEL_PAGE = "https://huggingface.co/models"
78
+ LLAMA_LINK = "https://ai.facebook.com/blog/large-language-model-llama-meta-ai/"
79
+ VICUNA_LINK = "https://huggingface.co/CarperAI/stable-vicuna-13b-delta"
80
+ ALPACA_LINK = "https://crfm.stanford.edu/2023/03/13/alpaca.html"
81
+
82
+
83
+ def model_hyperlink(link, model_name):
84
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
85
+
86
+
87
+ def make_clickable_model(model_name):
88
+ link = f"https://huggingface.co/{model_name}"
89
+
90
+ if model_name in LLAMAS:
91
+ link = LLAMA_LINK
92
+ model_name = model_name.split("/")[1]
93
+ elif model_name == "HuggingFaceH4/stable-vicuna-13b-2904":
94
+ link = VICUNA_LINK
95
+ model_name = "stable-vicuna-13b"
96
+ elif model_name == "HuggingFaceH4/llama-7b-ift-alpaca":
97
+ link = ALPACA_LINK
98
+ model_name = "alpaca-13b"
99
+ if model_name == "dolly-12b":
100
+ link = DOLLY_LINK
101
+ elif model_name == "vicuna-13b":
102
+ link = VICUNA_LINK
103
+ elif model_name == "koala-13b":
104
+ link = KOALA_LINK
105
+ elif model_name == "oasst-12b":
106
+ link = OASST_LINK
107
+
108
+ details_model_name = model_name.replace("/", "__")
109
+ details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/details_{details_model_name}"
110
+
111
+ if not bool(os.getenv("DEBUG", "False")):
112
+ # We only add these checks when not debugging, as they are extremely slow
113
+ print(f"details_link: {details_link}")
114
+ try:
115
+ check_path = list(
116
+ API.list_files_info(
117
+ repo_id=f"open-llm-leaderboard/details_{details_model_name}",
118
+ paths="README.md",
119
+ repo_type="dataset",
120
+ )
121
+ )
122
+ print(f"check_path: {check_path}")
123
+ except Exception as err:
124
+ # No details repo for this model
125
+ print(f"No details repo for this model: {err}")
126
+ return model_hyperlink(link, model_name)
127
+
128
+ return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑")
129
+
130
+
131
+ def styled_error(error):
132
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
133
+
134
+
135
+ def styled_warning(warn):
136
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
137
+
138
+
139
+ def styled_message(message):
140
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
141
+
142
+
143
+ def has_no_nan_values(df, columns):
144
+ return df[columns].notna().all(axis=1)
145
+
146
+
147
+ def has_nan_values(df, columns):
148
+ return df[columns].isna().any(axis=1)
src/load_from_hub.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+ from huggingface_hub import Repository
6
+ from transformers import AutoConfig
7
+ from collections import defaultdict
8
+
9
+ from src.assets.hardcoded_evals import baseline, gpt4_values, gpt35_values
10
+ from src.display_models.get_model_metadata import apply_metadata
11
+ from src.display_models.read_results import get_eval_results_dicts, make_clickable_model
12
+ from src.display_models.utils import AutoEvalColumn, EvalQueueColumn, has_no_nan_values
13
+
14
+ IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
15
+
16
+
17
+ def get_all_requested_models(requested_models_dir: str) -> set[str]:
18
+ depth = 1
19
+ file_names = []
20
+ users_to_submission_dates = defaultdict(list)
21
+
22
+ for root, _, files in os.walk(requested_models_dir):
23
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
24
+ if current_depth == depth:
25
+ for file in files:
26
+ if not file.endswith(".json"): continue
27
+ with open(os.path.join(root, file), "r") as f:
28
+ info = json.load(f)
29
+ file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
30
+
31
+ # Select organisation
32
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
33
+ continue
34
+ organisation, _ = info["model"].split("/")
35
+ users_to_submission_dates[organisation].append(info["submitted_time"])
36
+
37
+ return set(file_names), users_to_submission_dates
38
+
39
+
40
+ def load_all_info_from_hub(QUEUE_REPO: str, RESULTS_REPO: str, QUEUE_PATH: str, RESULTS_PATH: str) -> list[Repository]:
41
+ eval_queue_repo = None
42
+ eval_results_repo = None
43
+ requested_models = None
44
+
45
+ print("Pulling evaluation requests and results.")
46
+
47
+ eval_queue_repo = Repository(
48
+ local_dir=QUEUE_PATH,
49
+ clone_from=QUEUE_REPO,
50
+ repo_type="dataset",
51
+ )
52
+ eval_queue_repo.git_pull()
53
+
54
+ eval_results_repo = Repository(
55
+ local_dir=RESULTS_PATH,
56
+ clone_from=RESULTS_REPO,
57
+ repo_type="dataset",
58
+ )
59
+ eval_results_repo.git_pull()
60
+
61
+ requested_models, users_to_submission_dates = get_all_requested_models("eval-queue")
62
+
63
+ return eval_queue_repo, requested_models, eval_results_repo, users_to_submission_dates
64
+
65
+
66
+ def get_leaderboard_df(
67
+ eval_results: Repository, eval_results_private: Repository, cols: list, benchmark_cols: list
68
+ ) -> pd.DataFrame:
69
+ if eval_results:
70
+ print("Pulling evaluation results for the leaderboard.")
71
+ eval_results.git_pull()
72
+ if eval_results_private:
73
+ print("Pulling evaluation results for the leaderboard.")
74
+ eval_results_private.git_pull()
75
+
76
+ all_data = get_eval_results_dicts()
77
+
78
+ if not IS_PUBLIC:
79
+ all_data.append(gpt4_values)
80
+ all_data.append(gpt35_values)
81
+
82
+ all_data.append(baseline)
83
+ apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py`
84
+
85
+ df = pd.DataFrame.from_records(all_data)
86
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
87
+ df = df[cols].round(decimals=2)
88
+
89
+ # filter out if any of the benchmarks have not been produced
90
+ df = df[has_no_nan_values(df, benchmark_cols)]
91
+ return df
92
+
93
+
94
+ def get_evaluation_queue_df(
95
+ eval_queue: Repository, eval_queue_private: Repository, save_path: str, cols: list
96
+ ) -> list[pd.DataFrame]:
97
+ if eval_queue:
98
+ print("Pulling changes for the evaluation queue.")
99
+ eval_queue.git_pull()
100
+ if eval_queue_private:
101
+ print("Pulling changes for the evaluation queue.")
102
+ eval_queue_private.git_pull()
103
+
104
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
105
+ all_evals = []
106
+
107
+ for entry in entries:
108
+ if ".json" in entry:
109
+ file_path = os.path.join(save_path, entry)
110
+ with open(file_path) as fp:
111
+ data = json.load(fp)
112
+
113
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
114
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
115
+
116
+ all_evals.append(data)
117
+ elif ".md" not in entry:
118
+ # this is a folder
119
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
120
+ for sub_entry in sub_entries:
121
+ file_path = os.path.join(save_path, entry, sub_entry)
122
+ with open(file_path) as fp:
123
+ data = json.load(fp)
124
+
125
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
126
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
127
+ all_evals.append(data)
128
+
129
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
130
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
131
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED")]
132
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
133
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
134
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
135
+ return df_finished[cols], df_running[cols], df_pending[cols]
136
+
137
+
138
+ def is_model_on_hub(model_name: str, revision: str) -> bool:
139
+ try:
140
+ AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=False)
141
+ return True, None
142
+
143
+ except ValueError:
144
+ return (
145
+ False,
146
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
147
+ )
148
+
149
+ except Exception as e:
150
+ print(f"Could not get the model config from the hub.: {e}")
151
+ return False, "was not found on hub!"
src/rate_limiting.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datetime import datetime, timezone, timedelta
3
+
4
+
5
+ def user_submission_permission(submission_name, users_to_submission_dates, rate_limit_period):
6
+ org_or_user, _ = submission_name.split("/")
7
+ if org_or_user not in users_to_submission_dates:
8
+ return 0
9
+ submission_dates = sorted(users_to_submission_dates[org_or_user])
10
+
11
+ time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ")
12
+ submissions_after_timelimit = [d for d in submission_dates if d > time_limit]
13
+
14
+ return len(submissions_after_timelimit)
15
+
16
+