choco9966 commited on
Commit
3a8c5ba
1 Parent(s): 0dcb3f2

make a leaderboard

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
@@ -33,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ gpt_4_evals/
10
+ human_evals/
11
+ eval-queue/
12
+ eval-results/
13
+ eval-queue-private/
14
+ eval-results-private/
15
+ auto_evals/
16
+
17
+ src/assets/model_counts.html
18
+
19
+ **/.DS_Store
20
+ .venv
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
app.py ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+ import re
5
+ from distutils.util import strtobool
6
+
7
+ import gradio as gr
8
+ import pandas as pd
9
+ from apscheduler.schedulers.background import BackgroundScheduler
10
+ from huggingface_hub import HfApi
11
+
12
+ from src.assets.css_html_js import custom_css, get_window_url_params
13
+ from src.assets.text_content import (
14
+ CITATION_BUTTON_LABEL,
15
+ CITATION_BUTTON_TEXT,
16
+ EVALUATION_QUEUE_TEXT,
17
+ INTRODUCTION_TEXT,
18
+ LLM_BENCHMARKS_TEXT,
19
+ TITLE,
20
+ BOTTOM_LOGO,
21
+ )
22
+ from src.display_models.get_model_metadata import DO_NOT_SUBMIT_MODELS, ModelType
23
+ from src.display_models.utils import (
24
+ AutoEvalColumn,
25
+ EvalQueueColumn,
26
+ fields,
27
+ styled_error,
28
+ styled_message,
29
+ styled_warning,
30
+ )
31
+ from src.load_from_hub import get_evaluation_queue_df, get_leaderboard_df, is_model_on_hub, load_all_info_from_hub
32
+ from src.rate_limiting import user_submission_permission
33
+
34
+ pd.set_option("display.precision", 1)
35
+
36
+ # clone / pull the lmeh eval data
37
+ H4_TOKEN = os.environ.get("H4_TOKEN", None)
38
+
39
+ QUEUE_REPO = "open-ko-llm-leaderboard/requests"
40
+ RESULTS_REPO = "open-ko-llm-leaderboard/results"
41
+
42
+ PRIVATE_QUEUE_REPO = "open-ko-llm-leaderboard/private-requests"
43
+ PRIVATE_RESULTS_REPO = "open-ko-llm-leaderboard/private-results"
44
+
45
+ IS_PUBLIC = bool(strtobool(os.environ.get("IS_PUBLIC", "True")))
46
+
47
+ EVAL_REQUESTS_PATH = "eval-queue"
48
+ EVAL_RESULTS_PATH = "eval-results"
49
+
50
+ EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
51
+ EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
52
+
53
+ api = HfApi(token=H4_TOKEN)
54
+
55
+
56
+ def restart_space():
57
+ api.restart_space(repo_id="upstage/open-ko-llm-leaderboard", token=H4_TOKEN)
58
+
59
+ # Rate limit variables
60
+ RATE_LIMIT_PERIOD = 7
61
+ RATE_LIMIT_QUOTA = 5
62
+
63
+ # Column selection
64
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
65
+ TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
66
+ COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
67
+ TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
68
+
69
+ if not IS_PUBLIC:
70
+ COLS.insert(2, AutoEvalColumn.precision.name)
71
+ TYPES.insert(2, AutoEvalColumn.precision.type)
72
+
73
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
74
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
75
+
76
+ BENCHMARK_COLS = [
77
+ c.name
78
+ for c in [
79
+ AutoEvalColumn.arc,
80
+ AutoEvalColumn.hellaswag,
81
+ AutoEvalColumn.mmlu,
82
+ AutoEvalColumn.truthfulqa,
83
+ AutoEvalColumn.commongen_v2,
84
+ # TODO: Uncomment when we have results for these
85
+ # AutoEvalColumn.ethicalverification,
86
+ ]
87
+ ]
88
+
89
+ ## LOAD INFO FROM HUB
90
+ eval_queue, requested_models, eval_results, users_to_submission_dates = load_all_info_from_hub(
91
+ QUEUE_REPO, RESULTS_REPO, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH
92
+ )
93
+
94
+ if not IS_PUBLIC:
95
+ (eval_queue_private, requested_models_private, eval_results_private, _) = load_all_info_from_hub(
96
+ PRIVATE_QUEUE_REPO,
97
+ PRIVATE_RESULTS_REPO,
98
+ EVAL_REQUESTS_PATH_PRIVATE,
99
+ EVAL_RESULTS_PATH_PRIVATE,
100
+ )
101
+ else:
102
+ eval_queue_private, eval_results_private = None, None
103
+
104
+ original_df = get_leaderboard_df(eval_results, eval_results_private, COLS, BENCHMARK_COLS)
105
+ models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
106
+
107
+ # Commented out because it causes infinite restart loops in local
108
+ # to_be_dumped = f"models = {repr(models)}\n"
109
+
110
+ # with open("models_backlinks.py", "w") as f:
111
+ # f.write(to_be_dumped)
112
+
113
+ # print(to_be_dumped)
114
+
115
+ leaderboard_df = original_df.copy()
116
+ (
117
+ finished_eval_queue_df,
118
+ running_eval_queue_df,
119
+ pending_eval_queue_df,
120
+ ) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
121
+
122
+ ## INTERACTION FUNCTIONS
123
+ def add_new_eval(
124
+ model: str,
125
+ base_model: str,
126
+ revision: str,
127
+ precision: str,
128
+ private: bool,
129
+ weight_type: str,
130
+ model_type: str,
131
+ ):
132
+ precision = precision.split(" ")[0]
133
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
134
+
135
+ num_models_submitted_in_period = user_submission_permission(model, users_to_submission_dates, RATE_LIMIT_PERIOD)
136
+ if num_models_submitted_in_period > RATE_LIMIT_QUOTA:
137
+ error_msg = f"Organisation or user `{model.split('/')[0]}`"
138
+ error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard "
139
+ error_msg += f"in the last {RATE_LIMIT_PERIOD} days.\n"
140
+ error_msg += "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗"
141
+ return styled_error(error_msg)
142
+
143
+ if model_type is None or model_type == "":
144
+ return styled_error("Please select a model type.")
145
+
146
+ # check the model actually exists before adding the eval
147
+ if revision == "":
148
+ revision = "main"
149
+
150
+ if weight_type in ["Delta", "Adapter"]:
151
+ base_model_on_hub, error = is_model_on_hub(base_model, revision)
152
+ if not base_model_on_hub:
153
+ return styled_error(f'Base model "{base_model}" {error}')
154
+
155
+ if not weight_type == "Adapter":
156
+ model_on_hub, error = is_model_on_hub(model, revision)
157
+ if not model_on_hub:
158
+ return styled_error(f'Model "{model}" {error}')
159
+
160
+ print("adding new eval")
161
+
162
+ eval_entry = {
163
+ "model": model,
164
+ "base_model": base_model,
165
+ "revision": revision,
166
+ "private": private,
167
+ "precision": precision,
168
+ "weight_type": weight_type,
169
+ "status": "PENDING",
170
+ "submitted_time": current_time,
171
+ "model_type": model_type,
172
+ }
173
+
174
+ user_name = ""
175
+ model_path = model
176
+ if "/" in model:
177
+ user_name = model.split("/")[0]
178
+ model_path = model.split("/")[1]
179
+
180
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
181
+ os.makedirs(OUT_DIR, exist_ok=True)
182
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
183
+
184
+ # Check if the model has been forbidden:
185
+ if out_path.split("eval-queue/")[1] in DO_NOT_SUBMIT_MODELS:
186
+ return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
187
+
188
+ # Check for duplicate submission
189
+ if f"{model}_{revision}_{precision}" in requested_models:
190
+ return styled_warning("This model has been already submitted.")
191
+
192
+ with open(out_path, "w") as f:
193
+ f.write(json.dumps(eval_entry))
194
+
195
+ api.upload_file(
196
+ path_or_fileobj=out_path,
197
+ path_in_repo=out_path.split("eval-queue/")[1],
198
+ repo_id=QUEUE_REPO,
199
+ repo_type="dataset",
200
+ commit_message=f"Add {model} to eval queue",
201
+ )
202
+
203
+ # remove the local file
204
+ os.remove(out_path)
205
+
206
+ return styled_message(
207
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
208
+ )
209
+
210
+
211
+ # Basics
212
+ def change_tab(query_param: str):
213
+ query_param = query_param.replace("'", '"')
214
+ query_param = json.loads(query_param)
215
+
216
+ if isinstance(query_param, dict) and "tab" in query_param and query_param["tab"] == "evaluation":
217
+ return gr.Tabs.update(selected=1)
218
+ else:
219
+ return gr.Tabs.update(selected=0)
220
+
221
+
222
+ # Searching and filtering
223
+ def update_table(hidden_df: pd.DataFrame, current_columns_df: pd.DataFrame, columns: list, type_query: list, precision_query: str, size_query: list, show_deleted: bool, query: str):
224
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
225
+ if query != "":
226
+ filtered_df = search_table(filtered_df, query)
227
+ df = select_columns(filtered_df, columns)
228
+
229
+ return df
230
+
231
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
232
+ return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
233
+
234
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
235
+ always_here_cols = [
236
+ AutoEvalColumn.model_type_symbol.name,
237
+ AutoEvalColumn.model.name,
238
+ ]
239
+ # We use COLS to maintain sorting
240
+ filtered_df = df[
241
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
242
+ ]
243
+ return filtered_df
244
+
245
+ NUMERIC_INTERVALS = {
246
+ "Unknown": pd.Interval(-1, 0, closed="right"),
247
+ "0~3B": pd.Interval(0, 3, closed="right"),
248
+ "3~7B": pd.Interval(3, 7, closed="right"),
249
+ "7~13B": pd.Interval(7, 13, closed="right"),
250
+ "13~35B": pd.Interval(13, 35, closed="right"),
251
+ "35~60B": pd.Interval(35, 60, closed="right"),
252
+ "60B+": pd.Interval(60, 10000, closed="right"),
253
+ }
254
+
255
+ def filter_models(
256
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
257
+ ) -> pd.DataFrame:
258
+ # Show all models
259
+ if show_deleted:
260
+ filtered_df = df
261
+ else: # Show only still on the hub models
262
+ filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
263
+
264
+ type_emoji = [t[0] for t in type_query]
265
+ filtered_df = filtered_df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
266
+ filtered_df = filtered_df[df[AutoEvalColumn.precision.name].isin(precision_query)]
267
+
268
+ numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
269
+ params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
270
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
271
+ filtered_df = filtered_df.loc[mask]
272
+
273
+ return filtered_df
274
+
275
+
276
+ demo = gr.Blocks(css=custom_css)
277
+ with demo:
278
+ gr.HTML(TITLE)
279
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
280
+
281
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
282
+ with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
283
+ with gr.Row():
284
+ with gr.Column():
285
+ with gr.Row():
286
+ search_bar = gr.Textbox(
287
+ placeholder=" 🔍 Search for your model and press ENTER...",
288
+ show_label=False,
289
+ elem_id="search-bar",
290
+ )
291
+ with gr.Row():
292
+ shown_columns = gr.CheckboxGroup(
293
+ choices=[
294
+ c
295
+ for c in COLS
296
+ if c
297
+ not in [
298
+ AutoEvalColumn.dummy.name,
299
+ AutoEvalColumn.model.name,
300
+ AutoEvalColumn.model_type_symbol.name,
301
+ AutoEvalColumn.still_on_hub.name,
302
+ ]
303
+ ],
304
+ value=[
305
+ c
306
+ for c in COLS_LITE
307
+ if c
308
+ not in [
309
+ AutoEvalColumn.dummy.name,
310
+ AutoEvalColumn.model.name,
311
+ AutoEvalColumn.model_type_symbol.name,
312
+ AutoEvalColumn.still_on_hub.name,
313
+ ]
314
+ ],
315
+ label="Select columns to show",
316
+ elem_id="column-select",
317
+ interactive=True,
318
+ )
319
+ with gr.Row():
320
+ deleted_models_visibility = gr.Checkbox(
321
+ value=True, label="👀 Show gated/private/deleted models", interactive=True
322
+ )
323
+ with gr.Column(min_width=320):
324
+ with gr.Box(elem_id="box-filter"):
325
+ filter_columns_type = gr.CheckboxGroup(
326
+ label="Model types",
327
+ choices=[
328
+ ModelType.PT.to_str(),
329
+ # ModelType.FT.to_str(),
330
+ ModelType.IFT.to_str(),
331
+ ModelType.RL.to_str(),
332
+ ],
333
+ value=[
334
+ ModelType.PT.to_str(),
335
+ # ModelType.FT.to_str(),
336
+ ModelType.IFT.to_str(),
337
+ ModelType.RL.to_str(),
338
+ ],
339
+ interactive=True,
340
+ elem_id="filter-columns-type",
341
+ )
342
+ filter_columns_precision = gr.CheckboxGroup(
343
+ label="Precision",
344
+ choices=["torch.float16"], #, "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
345
+ value=["torch.float16"], #, "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
346
+ interactive=False,
347
+ elem_id="filter-columns-precision",
348
+ )
349
+ filter_columns_size = gr.CheckboxGroup(
350
+ label="Model sizes",
351
+ choices=list(NUMERIC_INTERVALS.keys()),
352
+ value=list(NUMERIC_INTERVALS.keys()),
353
+ interactive=True,
354
+ elem_id="filter-columns-size",
355
+ )
356
+
357
+ leaderboard_table = gr.components.Dataframe(
358
+ value=leaderboard_df[
359
+ [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]
360
+ + shown_columns.value
361
+ + [AutoEvalColumn.dummy.name]
362
+ ],
363
+ headers=[
364
+ AutoEvalColumn.model_type_symbol.name,
365
+ AutoEvalColumn.model.name,
366
+ ]
367
+ + shown_columns.value
368
+ + [AutoEvalColumn.dummy.name],
369
+ datatype=TYPES,
370
+ max_rows=None,
371
+ elem_id="leaderboard-table",
372
+ interactive=False,
373
+ visible=True,
374
+ )
375
+
376
+ # Dummy leaderboard for handling the case when the user uses backspace key
377
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
378
+ value=original_df,
379
+ headers=COLS,
380
+ datatype=TYPES,
381
+ max_rows=None,
382
+ visible=False,
383
+ )
384
+ search_bar.submit(
385
+ update_table,
386
+ [
387
+ hidden_leaderboard_table_for_search,
388
+ leaderboard_table,
389
+ shown_columns,
390
+ filter_columns_type,
391
+ filter_columns_precision,
392
+ filter_columns_size,
393
+ deleted_models_visibility,
394
+ search_bar,
395
+ ],
396
+ leaderboard_table,
397
+ )
398
+ shown_columns.change(
399
+ update_table,
400
+ [
401
+ hidden_leaderboard_table_for_search,
402
+ leaderboard_table,
403
+ shown_columns,
404
+ filter_columns_type,
405
+ filter_columns_precision,
406
+ filter_columns_size,
407
+ deleted_models_visibility,
408
+ search_bar,
409
+ ],
410
+ leaderboard_table,
411
+ queue=True,
412
+ )
413
+ filter_columns_type.change(
414
+ update_table,
415
+ [
416
+ hidden_leaderboard_table_for_search,
417
+ leaderboard_table,
418
+ shown_columns,
419
+ filter_columns_type,
420
+ filter_columns_precision,
421
+ filter_columns_size,
422
+ deleted_models_visibility,
423
+ search_bar,
424
+ ],
425
+ leaderboard_table,
426
+ queue=True,
427
+ )
428
+ filter_columns_precision.change(
429
+ update_table,
430
+ [
431
+ hidden_leaderboard_table_for_search,
432
+ leaderboard_table,
433
+ shown_columns,
434
+ filter_columns_type,
435
+ filter_columns_precision,
436
+ filter_columns_size,
437
+ deleted_models_visibility,
438
+ search_bar,
439
+ ],
440
+ leaderboard_table,
441
+ queue=True,
442
+ )
443
+ filter_columns_size.change(
444
+ update_table,
445
+ [
446
+ hidden_leaderboard_table_for_search,
447
+ leaderboard_table,
448
+ shown_columns,
449
+ filter_columns_type,
450
+ filter_columns_precision,
451
+ filter_columns_size,
452
+ deleted_models_visibility,
453
+ search_bar,
454
+ ],
455
+ leaderboard_table,
456
+ queue=True,
457
+ )
458
+ deleted_models_visibility.change(
459
+ update_table,
460
+ [
461
+ hidden_leaderboard_table_for_search,
462
+ leaderboard_table,
463
+ shown_columns,
464
+ filter_columns_type,
465
+ filter_columns_precision,
466
+ filter_columns_size,
467
+ deleted_models_visibility,
468
+ search_bar,
469
+ ],
470
+ leaderboard_table,
471
+ queue=True,
472
+ )
473
+ with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
474
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
475
+
476
+ with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
477
+ with gr.Column():
478
+ with gr.Row():
479
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
480
+
481
+ with gr.Column():
482
+ with gr.Accordion(
483
+ f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
484
+ open=False,
485
+ ):
486
+ with gr.Row():
487
+ finished_eval_table = gr.components.Dataframe(
488
+ value=finished_eval_queue_df,
489
+ headers=EVAL_COLS,
490
+ datatype=EVAL_TYPES,
491
+ max_rows=5,
492
+ )
493
+ with gr.Accordion(
494
+ f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
495
+ open=False,
496
+ ):
497
+ with gr.Row():
498
+ running_eval_table = gr.components.Dataframe(
499
+ value=running_eval_queue_df,
500
+ headers=EVAL_COLS,
501
+ datatype=EVAL_TYPES,
502
+ max_rows=5,
503
+ )
504
+
505
+ with gr.Accordion(
506
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
507
+ open=False,
508
+ ):
509
+ with gr.Row():
510
+ pending_eval_table = gr.components.Dataframe(
511
+ value=pending_eval_queue_df,
512
+ headers=EVAL_COLS,
513
+ datatype=EVAL_TYPES,
514
+ max_rows=5,
515
+ )
516
+ with gr.Row():
517
+ gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
518
+
519
+ with gr.Row():
520
+ with gr.Column():
521
+ model_name_textbox = gr.Textbox(label="Model name")
522
+ revision_name_textbox = gr.Textbox(label="Revision", placeholder="main")
523
+ private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
524
+ model_type = gr.Dropdown(
525
+ choices=[
526
+ ModelType.PT.to_str(" : "),
527
+ # ModelType.FT.to_str(" : "),
528
+ ModelType.IFT.to_str(" : "),
529
+ ModelType.RL.to_str(" : "),
530
+ ],
531
+ label="Model type",
532
+ multiselect=False,
533
+ value=None,
534
+ interactive=True,
535
+ )
536
+
537
+ with gr.Column():
538
+ precision = gr.Dropdown(
539
+ choices=[
540
+ "float16",
541
+ # "bfloat16",
542
+ # "8bit (LLM.int8)",
543
+ # "4bit (QLoRA / FP4)",
544
+ # "GPTQ"
545
+ ],
546
+ label="Precision",
547
+ multiselect=False,
548
+ value="float16",
549
+ interactive=True,
550
+ )
551
+ weight_type = gr.Dropdown(
552
+ choices=["Original", "Delta", "Adapter"],
553
+ label="Weights type",
554
+ multiselect=False,
555
+ value="Original",
556
+ interactive=True,
557
+ )
558
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
559
+
560
+ submit_button = gr.Button("Submit Evalulation!")
561
+ submission_result = gr.Markdown()
562
+ submit_button.click(
563
+ add_new_eval,
564
+ [
565
+ model_name_textbox,
566
+ base_model_name_textbox,
567
+ revision_name_textbox,
568
+ precision,
569
+ private,
570
+ weight_type,
571
+ model_type,
572
+ ],
573
+ submission_result,
574
+ )
575
+
576
+ with gr.Row():
577
+ with gr.Accordion("📙 Citation", open=False):
578
+ citation_button = gr.Textbox(
579
+ value=CITATION_BUTTON_TEXT,
580
+ label=CITATION_BUTTON_LABEL,
581
+ elem_id="citation-button",
582
+ ).style(show_copy_button=True)
583
+
584
+ gr.HTML(BOTTOM_LOGO)
585
+
586
+ dummy = gr.Textbox(visible=False)
587
+ demo.load(
588
+ change_tab,
589
+ dummy,
590
+ tabs,
591
+ _js=get_window_url_params,
592
+ )
593
+
594
+ scheduler = BackgroundScheduler()
595
+ scheduler.add_job(restart_space, "interval", seconds=1800)
596
+ scheduler.start()
597
+ demo.queue(concurrency_count=40).launch()
model_info_cache.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6579e330063066b049d778cb4dbb548289e9fb570492ff444d42ef490234e379
3
+ size 216682
model_size_cache.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:316daa2fa787ce5e521a0c39fc0f40b5ba7e084ec033b6e893b7712269ec11a3
3
+ size 5559
models_backlinks.py ADDED
@@ -0,0 +1 @@
 
 
1
+ models = ['upstage/Llama-2-70b-instruct-v2', 'upstage/Llama-2-70b-instruct', 'upstage/llama-65b-instruct', 'upstage/llama-65b-instruct', 'upstage/llama-30b-instruct-2048', 'upstage/llama-30b-instruct', 'baseline']
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.23.0
2
+ aiofiles==23.1.0
3
+ aiohttp==3.8.4
4
+ aiosignal==1.3.1
5
+ altair==4.2.2
6
+ anyio==3.6.2
7
+ APScheduler==3.10.1
8
+ async-timeout==4.0.2
9
+ attrs==23.1.0
10
+ certifi==2022.12.7
11
+ charset-normalizer==3.1.0
12
+ click==8.1.3
13
+ contourpy==1.0.7
14
+ cycler==0.11.0
15
+ datasets==2.12.0
16
+ entrypoints==0.4
17
+ fastapi==0.95.1
18
+ ffmpy==0.3.0
19
+ filelock==3.11.0
20
+ fonttools==4.39.3
21
+ frozenlist==1.3.3
22
+ fsspec==2023.4.0
23
+ gradio==3.43.2
24
+ gradio-client==0.5.0
25
+ h11==0.14.0
26
+ httpcore==0.17.0
27
+ httpx==0.24.0
28
+ huggingface-hub==0.16.4
29
+ idna==3.4
30
+ Jinja2==3.1.2
31
+ jsonschema==4.17.3
32
+ kiwisolver==1.4.4
33
+ linkify-it-py==2.0.0
34
+ markdown-it-py==2.2.0
35
+ MarkupSafe==2.1.2
36
+ matplotlib==3.7.1
37
+ mdit-py-plugins==0.3.3
38
+ mdurl==0.1.2
39
+ multidict==6.0.4
40
+ numpy==1.24.2
41
+ orjson==3.8.10
42
+ packaging==23.1
43
+ pandas==2.0.0
44
+ Pillow==9.5.0
45
+ plotly==5.14.1
46
+ pyarrow==11.0.0
47
+ pydantic==1.10.7
48
+ pydub==0.25.1
49
+ pyparsing==3.0.9
50
+ pyrsistent==0.19.3
51
+ python-dateutil==2.8.2
52
+ python-multipart==0.0.6
53
+ pytz==2023.3
54
+ pytz-deprecation-shim==0.1.0.post0
55
+ PyYAML==6.0
56
+ requests==2.28.2
57
+ semantic-version==2.10.0
58
+ six==1.16.0
59
+ sniffio==1.3.0
60
+ starlette==0.26.1
61
+ toolz==0.12.0
62
+ tqdm==4.65.0
63
+ transformers==4.34.0
64
+ typing_extensions==4.5.0
65
+ tzdata==2023.3
66
+ tzlocal==4.3
67
+ uc-micro-py==1.0.1
68
+ urllib3==1.26.15
69
+ uvicorn==0.21.1
70
+ websockets==11.0.1
71
+ yarl==1.8.2
src/assets/css_html_js.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ #models-to-add-text {
8
+ font-size: 18px !important;
9
+ }
10
+
11
+ #citation-button span {
12
+ font-size: 16px !important;
13
+ }
14
+
15
+ #citation-button textarea {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button > label > button {
20
+ margin: 6px;
21
+ transform: scale(1.3);
22
+ }
23
+
24
+ #leaderboard-table {
25
+ margin-top: 15px
26
+ }
27
+
28
+ #leaderboard-table-lite {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #search-bar-table-box > div:first-child {
33
+ background: none;
34
+ border: none;
35
+ }
36
+
37
+ #search-bar {
38
+ padding: 0px;
39
+ }
40
+
41
+ /* Hides the final AutoEvalColumn */
42
+ #llm-benchmark-tab-table table td:last-child,
43
+ #llm-benchmark-tab-table table th:last-child {
44
+ display: none;
45
+ }
46
+
47
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
48
+ table td:first-child,
49
+ table th:first-child {
50
+ max-width: 400px;
51
+ overflow: auto;
52
+ white-space: nowrap;
53
+ }
54
+
55
+ .tab-buttons button {
56
+ font-size: 20px;
57
+ }
58
+
59
+ #scale-logo {
60
+ border-style: none !important;
61
+ box-shadow: none;
62
+ display: block;
63
+ margin-left: auto;
64
+ margin-right: auto;
65
+ max-width: 600px;
66
+ }
67
+
68
+ #scale-logo .download {
69
+ display: none;
70
+ }
71
+ #filter_type{
72
+ border: 0;
73
+ padding-left: 0;
74
+ padding-top: 0;
75
+ }
76
+ #filter_type label {
77
+ display: flex;
78
+ }
79
+ #filter_type label > span{
80
+ margin-top: var(--spacing-lg);
81
+ margin-right: 0.5em;
82
+ }
83
+ #filter_type label > .wrap{
84
+ width: 103px;
85
+ }
86
+ #filter_type label > .wrap .wrap-inner{
87
+ padding: 2px;
88
+ }
89
+ #filter_type label > .wrap .wrap-inner input{
90
+ width: 1px
91
+ }
92
+ #filter-columns-type{
93
+ border:0;
94
+ padding:0.5;
95
+ }
96
+ #filter-columns-size{
97
+ border:0;
98
+ padding:0.5;
99
+ }
100
+ #box-filter > .form{
101
+ border: 0
102
+ }
103
+ """
104
+
105
+ get_window_url_params = """
106
+ function(url_params) {
107
+ const params = new URLSearchParams(window.location.search);
108
+ url_params = Object.fromEntries(params);
109
+ return url_params;
110
+ }
111
+ """
src/assets/hardcoded_evals.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display_models.utils import AutoEvalColumn, model_hyperlink
2
+
3
+ baseline = {
4
+ AutoEvalColumn.model.name: "<p>Baseline</p>",
5
+ AutoEvalColumn.revision.name: "N/A",
6
+ AutoEvalColumn.precision.name: None,
7
+ AutoEvalColumn.average.name: 25.0,
8
+ AutoEvalColumn.arc.name: 25.0,
9
+ AutoEvalColumn.hellaswag.name: 25.0,
10
+ AutoEvalColumn.mmlu.name: 25.0,
11
+ AutoEvalColumn.truthfulqa.name: 25.0,
12
+ AutoEvalColumn.dummy.name: "baseline",
13
+ AutoEvalColumn.model_type.name: "",
14
+ }
src/assets/text_content.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display_models.model_metadata_type import ModelType
2
+
3
+ TITLE = """<img src="https://upstage-open-ko-llm-leaderboard-logos.s3.ap-northeast-2.amazonaws.com/header_logo.png" style="width:30%;display:block;margin-left:auto;margin-right:auto">"""
4
+ BOTTOM_LOGO = """<img src="https://upstage-open-ko-llm-leaderboard-logos.s3.ap-northeast-2.amazonaws.com/footer_logo_1.png" style="width:50%;display:block;margin-left:auto;margin-right:auto">"""
5
+
6
+ INTRODUCTION_TEXT = f"""
7
+ 🚀 The Open Ko-LLM Leaderboard 🇰🇷 objectively evaluates the performance of Korean Large Language Model (LLM).
8
+
9
+ When you submit a model on the "Submit here!" page, it is automatically evaluated. The GPU used for evaluation is operated with the support of __[KT](https://cloud.kt.com/)__.
10
+ The data used for evaluation consists of datasets to assess reasoning, language understanding, hallucination, and commonsense.
11
+ The evaluation dataset is exclusively private and only available for evaluation process.
12
+ More detailed information about the benchmark dataset is provided on the “About” page.
13
+
14
+ This leaderboard is co-hosted by __[Upstage](https://www.upstage.ai)__, and __[NIA](https://www.nia.or.kr/site/nia_kor/main.do)__ that provides various Korean Data Sets through __[AI-Hub](https://aihub.or.kr)__, and operated by __[Upstage](https://www.upstage.ai)__.
15
+ """
16
+
17
+ LLM_BENCHMARKS_TEXT = f"""
18
+ # Context
19
+ While outstanding LLM models are being released competitively, most of them are centered on English and are familiar with the English cultural sphere. We operate the Korean leaderboard, 🚀 Open Ko-LLM, to evaluate models that reflect the characteristics of the Korean language and Korean culture. Through this, we hope that users can conveniently use the leaderboard, participate, and contribute to the advancement of research in Korean.
20
+
21
+ ## Icons
22
+ {ModelType.PT.to_str(" : ")} model
23
+ {ModelType.FT.to_str(" : ")} model
24
+ {ModelType.IFT.to_str(" : ")} model
25
+ {ModelType.RL.to_str(" : ")} model
26
+ If there is no icon, it indicates that there is insufficient information about the model.
27
+ Please provide information about the model through an issue! 🤩
28
+
29
+ 🏴‍☠️ : This icon indicates that the model has been selected as a subject of caution by the community, implying that users should exercise restraint when using it. Clicking on the icon will take you to a discussion about that model.
30
+ (Models that have used the evaluation set for training to achieve a high leaderboard ranking, among others, are selected as subjects of caution.)
31
+
32
+ ## How it works
33
+
34
+ 📈 We evaluate models using the [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), a unified framework to test generative language models on a large number of different evaluation tasks.
35
+
36
+ We have set up a benchmark using datasets translated into Korean, and applied variations by human experts, from the four tasks (HellaSwag, MMLU, Arc, Truthful QA) operated by HuggingFace OpenLLM. We have also added a new dataset prepared from scratch.
37
+ - Ko-HellaSwag (provided by __[Upstage](https://www.upstage.ai/)__, machine translation)
38
+ - Ko-MMLU (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation)
39
+ - Ko-Arc (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation)
40
+ - Ko-Truthful QA (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation)
41
+ - Ko-CommonGen V2 (provided by __[Korea University NLP&AI Lab](http://nlp.korea.ac.kr/)__, created from scratch)
42
+
43
+ To provide an evaluation befitting the LLM era, we've selected benchmark datasets suitable for assessing these elements: expertise, inference, hallucination, and common sense. The final score is converted to the average score from each evaluation datasets.
44
+
45
+ GPUs are provided by __[KT](https://cloud.kt.com/)__ for the evaluations.
46
+
47
+ ## Details and Logs
48
+ - Detailed numerical results in the `results` Upstage dataset: https://huggingface.co/datasets/open-ko-llm-leaderboard/results
49
+ - Community queries and running status in the `requests` Upstage dataset: https://huggingface.co/datasets/open-ko-llm-leaderboard/requests
50
+
51
+ ## More resources
52
+ If you still have questions, you can check our FAQ [here](https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard/discussions/1)!
53
+ """
54
+
55
+ EVALUATION_QUEUE_TEXT = f"""
56
+ # Evaluation Queue for the 🚀 Open Ko-LLM Leaderboard
57
+ Models added here will be automatically evaluated on the KT GPU cluster.
58
+
59
+ ## <Some good practices before submitting a model>
60
+
61
+ ### 1️⃣ Make sure you can load your model and tokenizer using AutoClasses
62
+ ```python
63
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
64
+ config = AutoConfig.from_pretrained("your model name", revision=revision)
65
+ model = AutoModel.from_pretrained("your model name", revision=revision)
66
+ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
67
+ ```
68
+
69
+ If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
70
+
71
+ ⚠️ Make sure your model is public!
72
+
73
+ ⚠️ Maker sure your model runs with [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness)
74
+
75
+ ⚠️ If your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
76
+
77
+ ### 2️⃣ Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
78
+ It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
79
+
80
+ ### 3️⃣ Make sure your model has an open license!
81
+ This is a leaderboard for 🚀 Open Ko-LLMs, and we'd love for as many people as possible to know they can use your model
82
+
83
+ ### 4️⃣ Fill up your model card
84
+ When we add extra information about models to the leaderboard, it will be automatically taken from the model card
85
+
86
+ ## In case of model failure
87
+ If your model is displayed in the `FAILED` category, its execution stopped. Make sure you have followed the above steps first. If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
88
+ """
89
+
90
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results. Authors of open-ko-llm-leaderboard are ordered alphabetically."
91
+ CITATION_BUTTON_TEXT = r"""
92
+ @misc{open-ko-llm-leaderboard,
93
+ author = {Chanjun Park, Hwalsuk Lee, Hyunbyung Park, Hyeonwoo Kim, Sanghoon Kim, Seonghwan Cho, Sunghun Kim, Sukyung Lee},
94
+ title = {Open Ko-LLM Leaderboard},
95
+ year = {2023},
96
+ publisher = {Upstage, National Information Society Agency},
97
+ howpublished = "\url{https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard}"
98
+ }
99
+ @software{eval-harness,
100
+ author = {Gao, Leo and
101
+ Tow, Jonathan and
102
+ Biderman, Stella and
103
+ Black, Sid and
104
+ DiPofi, Anthony and
105
+ Foster, Charles and
106
+ Golding, Laurence and
107
+ Hsu, Jeffrey and
108
+ McDonell, Kyle and
109
+ Muennighoff, Niklas and
110
+ Phang, Jason and
111
+ Reynolds, Laria and
112
+ Tang, Eric and
113
+ Thite, Anish and
114
+ Wang, Ben and
115
+ Wang, Kevin and
116
+ Zou, Andy},
117
+ title = {A framework for few-shot language model evaluation},
118
+ month = sep,
119
+ year = 2021,
120
+ publisher = {Zenodo},
121
+ version = {v0.0.1},
122
+ doi = {10.5281/zenodo.5371628},
123
+ url = {https://doi.org/10.5281/zenodo.5371628}
124
+ }
125
+ @misc{seo2023kocommongen,
126
+ title={Korean Commonsense Reasoning Evaluation for Large Language Models},
127
+ author={Jaehyung Seo, Chanjun Park, Hyeonseok Moon, Sugyeong Eo, Aram So, Heuiseok Lim},
128
+ year={2023},
129
+ affilation={Korea University, NLP&AI},
130
+ booktitle={Proceedings of the 35th Annual Conference on Human & Cognitive Language Technology}}
131
+ @misc{park2023koarc,
132
+ title={Ko-ARC},
133
+ original_title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
134
+ author={Hyunbyung Park, Chanjun Park},
135
+ original_author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
136
+ year={2023}
137
+ }
138
+ @misc{park2023kohellaswag,
139
+ title={Ko-HellaSwag},
140
+ original_title={HellaSwag: Can a Machine Really Finish Your Sentence?},
141
+ author={Hyunbyung Park, Chanjun Park},
142
+ original_author={Rowan Zellers and Ari Holtzman and Yonatan Bisk and Ali Farhadi and Yejin Choi},
143
+ year={2023}
144
+ }
145
+ @misc{park2023kommlu,
146
+ title={Ko-MMLU},
147
+ original_title={Measuring Massive Multitask Language Understanding},
148
+ author={Hyunbyung Park, Chanjun Park},
149
+ original_author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
150
+ year={2023}
151
+ }
152
+ @misc{park2023kotruthfulqa,
153
+ title={Ko-TruthfulQA},
154
+ original_title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
155
+ author={Hyunbyung Park, Chanjun Park},
156
+ original_author={Stephanie Lin and Jacob Hilton and Owain Evans},
157
+ year={2023}
158
+ }
159
+ """
src/display_models/get_model_metadata.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import re
5
+ import pickle
6
+ from typing import List
7
+
8
+ import huggingface_hub
9
+ from huggingface_hub import HfApi
10
+ from tqdm import tqdm
11
+ from transformers import AutoModel, AutoConfig
12
+ from accelerate import init_empty_weights
13
+
14
+ from src.display_models.model_metadata_flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
15
+ from src.display_models.model_metadata_type import MODEL_TYPE_METADATA, ModelType, model_type_from_str
16
+ from src.display_models.utils import AutoEvalColumn, model_hyperlink
17
+
18
+ api = HfApi(token=os.environ.get("H4_TOKEN", None))
19
+
20
+
21
+ def get_model_infos_from_hub(leaderboard_data: List[dict]):
22
+ # load cache from disk
23
+ try:
24
+ with open("model_info_cache.pkl", "rb") as f:
25
+ model_info_cache = pickle.load(f)
26
+ except (EOFError, FileNotFoundError):
27
+ model_info_cache = {}
28
+ try:
29
+ with open("model_size_cache.pkl", "rb") as f:
30
+ model_size_cache = pickle.load(f)
31
+ except (EOFError, FileNotFoundError):
32
+ model_size_cache = {}
33
+
34
+ for model_data in tqdm(leaderboard_data):
35
+ model_name = model_data["model_name_for_query"]
36
+
37
+ if model_name in model_info_cache:
38
+ model_info = model_info_cache[model_name]
39
+ else:
40
+ try:
41
+ model_info = api.model_info(model_name)
42
+ model_info_cache[model_name] = model_info
43
+ except huggingface_hub.utils._errors.RepositoryNotFoundError:
44
+ print("Repo not found!", model_name)
45
+ model_data[AutoEvalColumn.license.name] = None
46
+ model_data[AutoEvalColumn.likes.name] = None
47
+ if model_name not in model_size_cache:
48
+ model_size_cache[model_name] = get_model_size(model_name, None)
49
+ model_data[AutoEvalColumn.params.name] = model_size_cache[model_name]
50
+
51
+ model_data[AutoEvalColumn.license.name] = get_model_license(model_info)
52
+ model_data[AutoEvalColumn.likes.name] = get_model_likes(model_info)
53
+ if model_name not in model_size_cache:
54
+ model_size_cache[model_name] = get_model_size(model_name, model_info)
55
+ model_data[AutoEvalColumn.params.name] = model_size_cache[model_name]
56
+
57
+ # save cache to disk in pickle format
58
+ with open("model_info_cache.pkl", "wb") as f:
59
+ pickle.dump(model_info_cache, f)
60
+ with open("model_size_cache.pkl", "wb") as f:
61
+ pickle.dump(model_size_cache, f)
62
+
63
+
64
+ def get_model_license(model_info):
65
+ try:
66
+ return model_info.cardData["license"]
67
+ except Exception:
68
+ return "?"
69
+
70
+
71
+ def get_model_likes(model_info):
72
+ return model_info.likes
73
+
74
+
75
+ size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
76
+
77
+
78
+ def get_model_size(model_name, model_info):
79
+ # In billions
80
+ try:
81
+ return round(model_info.safetensors["total"] / 1e9, 3)
82
+ except AttributeError:
83
+ try:
84
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=False)
85
+ with init_empty_weights():
86
+ model = AutoModel.from_config(config, trust_remote_code=False)
87
+ return round(sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e9, 3)
88
+ except (EnvironmentError, ValueError): # model config not found, likely private
89
+ try:
90
+ size_match = re.search(size_pattern, model_name.lower())
91
+ size = size_match.group(0)
92
+ return round(float(size[:-1]) if size[-1] == "b" else float(size[:-1]) / 1e3, 3)
93
+ except AttributeError:
94
+ return 0
95
+
96
+
97
+ def get_model_type(leaderboard_data: List[dict]):
98
+ for model_data in leaderboard_data:
99
+ request_files = os.path.join(
100
+ "eval-queue",
101
+ model_data["model_name_for_query"] + "_eval_request_*" + ".json",
102
+ )
103
+ request_files = glob.glob(request_files)
104
+
105
+ # Select correct request file (precision)
106
+ request_file = ""
107
+ if len(request_files) == 1:
108
+ request_file = request_files[0]
109
+ elif len(request_files) > 1:
110
+ request_files = sorted(request_files, reverse=True)
111
+ for tmp_request_file in request_files:
112
+ with open(tmp_request_file, "r") as f:
113
+ req_content = json.load(f)
114
+ if (
115
+ req_content["status"] == "FINISHED"
116
+ and req_content["precision"] == model_data["Precision"].split(".")[-1]
117
+ ):
118
+ request_file = tmp_request_file
119
+
120
+ try:
121
+ with open(request_file, "r") as f:
122
+ request = json.load(f)
123
+ model_type = model_type_from_str(request["model_type"])
124
+ model_data[AutoEvalColumn.model_type.name] = model_type.value.name
125
+ model_data[AutoEvalColumn.model_type_symbol.name] = model_type.value.symbol # + ("🔺" if is_delta else "")
126
+ except Exception:
127
+ if model_data["model_name_for_query"] in MODEL_TYPE_METADATA:
128
+ model_data[AutoEvalColumn.model_type.name] = MODEL_TYPE_METADATA[
129
+ model_data["model_name_for_query"]
130
+ ].value.name
131
+ model_data[AutoEvalColumn.model_type_symbol.name] = MODEL_TYPE_METADATA[
132
+ model_data["model_name_for_query"]
133
+ ].value.symbol # + ("🔺" if is_delta else "")
134
+ else:
135
+ model_data[AutoEvalColumn.model_type.name] = ModelType.Unknown.value.name
136
+ model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol
137
+
138
+
139
+ def flag_models(leaderboard_data: List[dict]):
140
+ for model_data in leaderboard_data:
141
+ if model_data["model_name_for_query"] in FLAGGED_MODELS:
142
+ issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
143
+ issue_link = model_hyperlink(
144
+ FLAGGED_MODELS[model_data["model_name_for_query"]],
145
+ f"See discussion #{issue_num}",
146
+ )
147
+ model_data[
148
+ AutoEvalColumn.model.name
149
+ ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
150
+
151
+
152
+ def remove_forbidden_models(leaderboard_data: List[dict]):
153
+ indices_to_remove = []
154
+ for ix, model in enumerate(leaderboard_data):
155
+ if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
156
+ indices_to_remove.append(ix)
157
+
158
+ for ix in reversed(indices_to_remove):
159
+ leaderboard_data.pop(ix)
160
+ return leaderboard_data
161
+
162
+
163
+ def apply_metadata(leaderboard_data: List[dict]):
164
+ leaderboard_data = remove_forbidden_models(leaderboard_data)
165
+ get_model_type(leaderboard_data)
166
+ get_model_infos_from_hub(leaderboard_data)
167
+ flag_models(leaderboard_data)
src/display_models/model_metadata_flags.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Models which have been flagged by users as being problematic for a reason or another
2
+ # (Model name to forum discussion link)
3
+ FLAGGED_MODELS = {
4
+ }
5
+
6
+ # Models which have been requested by orgs to not be submitted on the leaderboard
7
+ DO_NOT_SUBMIT_MODELS = [
8
+ ]
src/display_models/model_metadata_type.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ from typing import Dict
4
+
5
+
6
+ @dataclass
7
+ class ModelInfo:
8
+ name: str
9
+ symbol: str # emoji
10
+
11
+
12
+ class ModelType(Enum):
13
+ PT = ModelInfo(name="pretrained", symbol="🟢")
14
+ FT = ModelInfo(name="fine-tuned", symbol="🔶")
15
+ IFT = ModelInfo(name="instruction-tuned", symbol="⭕")
16
+ RL = ModelInfo(name="RL-tuned", symbol="🟦")
17
+ Unknown = ModelInfo(name="Unknown, add type to request file!", symbol="?")
18
+
19
+ def to_str(self, separator=" "):
20
+ return f"{self.value.symbol}{separator}{self.value.name}"
21
+
22
+
23
+ MODEL_TYPE_METADATA: Dict[str, ModelType] = {
24
+ "tiiuae/falcon-180B": ModelType.PT,
25
+ "Qwen/Qwen-7B": ModelType.PT,
26
+ "Qwen/Qwen-7B-Chat": ModelType.RL,
27
+ "notstoic/PygmalionCoT-7b": ModelType.IFT,
28
+ "aisquared/dlite-v1-355m": ModelType.IFT,
29
+ "aisquared/dlite-v1-1_5b": ModelType.IFT,
30
+ "aisquared/dlite-v1-774m": ModelType.IFT,
31
+ "aisquared/dlite-v1-124m": ModelType.IFT,
32
+ "aisquared/chopt-2_7b": ModelType.IFT,
33
+ "aisquared/dlite-v2-124m": ModelType.IFT,
34
+ "aisquared/dlite-v2-774m": ModelType.IFT,
35
+ "aisquared/dlite-v2-1_5b": ModelType.IFT,
36
+ "aisquared/chopt-1_3b": ModelType.IFT,
37
+ "aisquared/dlite-v2-355m": ModelType.IFT,
38
+ "augtoma/qCammel-13": ModelType.IFT,
39
+ "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload": ModelType.IFT,
40
+ "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload": ModelType.IFT,
41
+ "TheBloke/alpaca-lora-65B-HF": ModelType.FT,
42
+ "TheBloke/tulu-7B-fp16": ModelType.IFT,
43
+ "TheBloke/guanaco-7B-HF": ModelType.FT,
44
+ "TheBloke/koala-7B-HF": ModelType.FT,
45
+ "TheBloke/wizardLM-7B-HF": ModelType.IFT,
46
+ "TheBloke/airoboros-13B-HF": ModelType.IFT,
47
+ "TheBloke/koala-13B-HF": ModelType.FT,
48
+ "TheBloke/Wizard-Vicuna-7B-Uncensored-HF": ModelType.FT,
49
+ "TheBloke/dromedary-65b-lora-HF": ModelType.IFT,
50
+ "TheBloke/wizardLM-13B-1.0-fp16": ModelType.IFT,
51
+ "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16": ModelType.FT,
52
+ "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16": ModelType.FT,
53
+ "TheBloke/wizard-vicuna-13B-HF": ModelType.IFT,
54
+ "TheBloke/UltraLM-13B-fp16": ModelType.IFT,
55
+ "TheBloke/OpenAssistant-FT-7-Llama-30B-HF": ModelType.FT,
56
+ "TheBloke/vicuna-13B-1.1-HF": ModelType.IFT,
57
+ "TheBloke/guanaco-13B-HF": ModelType.FT,
58
+ "TheBloke/guanaco-65B-HF": ModelType.FT,
59
+ "TheBloke/airoboros-7b-gpt4-fp16": ModelType.IFT,
60
+ "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16": ModelType.IFT,
61
+ "TheBloke/Llama-2-13B-fp16": ModelType.PT,
62
+ "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16": ModelType.FT,
63
+ "TheBloke/landmark-attention-llama7b-fp16": ModelType.IFT,
64
+ "TheBloke/Planner-7B-fp16": ModelType.IFT,
65
+ "TheBloke/Wizard-Vicuna-13B-Uncensored-HF": ModelType.FT,
66
+ "TheBloke/gpt4-alpaca-lora-13B-HF": ModelType.IFT,
67
+ "TheBloke/gpt4-x-vicuna-13B-HF": ModelType.IFT,
68
+ "TheBloke/gpt4-alpaca-lora_mlp-65B-HF": ModelType.IFT,
69
+ "TheBloke/tulu-13B-fp16": ModelType.IFT,
70
+ "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16": ModelType.IFT,
71
+ "TheBloke/Llama-2-70B-fp16": ModelType.IFT,
72
+ "TheBloke/WizardLM-30B-fp16": ModelType.IFT,
73
+ "TheBloke/robin-13B-v2-fp16": ModelType.FT,
74
+ "TheBloke/robin-33B-v2-fp16": ModelType.FT,
75
+ "TheBloke/Vicuna-13B-CoT-fp16": ModelType.IFT,
76
+ "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16": ModelType.IFT,
77
+ "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16": ModelType.FT,
78
+ "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16": ModelType.IFT,
79
+ "TheBloke/GPlatty-30B-SuperHOT-8K-fp16": ModelType.FT,
80
+ "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16": ModelType.IFT,
81
+ "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16": ModelType.IFT,
82
+ "jphme/orca_mini_v2_ger_7b": ModelType.IFT,
83
+ "Ejafa/vicuna_7B_vanilla_1.1": ModelType.FT,
84
+ "kevinpro/Vicuna-13B-CoT": ModelType.IFT,
85
+ "AlekseyKorshuk/pygmalion-6b-vicuna-chatml": ModelType.FT,
86
+ "AlekseyKorshuk/chatml-pyg-v1": ModelType.FT,
87
+ "concedo/Vicuzard-30B-Uncensored": ModelType.FT,
88
+ "concedo/OPT-19M-ChatSalad": ModelType.FT,
89
+ "concedo/Pythia-70M-ChatSalad": ModelType.FT,
90
+ "digitous/13B-HyperMantis": ModelType.IFT,
91
+ "digitous/Adventien-GPTJ": ModelType.FT,
92
+ "digitous/Alpacino13b": ModelType.IFT,
93
+ "digitous/GPT-R": ModelType.IFT,
94
+ "digitous/Javelin-R": ModelType.IFT,
95
+ "digitous/Javalion-GPTJ": ModelType.IFT,
96
+ "digitous/Javalion-R": ModelType.IFT,
97
+ "digitous/Skegma-GPTJ": ModelType.FT,
98
+ "digitous/Alpacino30b": ModelType.IFT,
99
+ "digitous/Janin-GPTJ": ModelType.FT,
100
+ "digitous/Janin-R": ModelType.FT,
101
+ "digitous/Javelin-GPTJ": ModelType.FT,
102
+ "SaylorTwift/gpt2_test": ModelType.PT,
103
+ "anton-l/gpt-j-tiny-random": ModelType.FT,
104
+ "Andron00e/YetAnother_Open-Llama-3B-LoRA-OpenOrca": ModelType.FT,
105
+ "Lazycuber/pyg-instruct-wizardlm": ModelType.FT,
106
+ "Lazycuber/Janemalion-6B": ModelType.FT,
107
+ "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1": ModelType.FT,
108
+ "IDEA-CCNL/Ziya-LLaMA-13B-v1": ModelType.IFT,
109
+ "dsvv-cair/alpaca-cleaned-llama-30b-bf16": ModelType.FT,
110
+ "gpt2-medium": ModelType.PT,
111
+ "camel-ai/CAMEL-13B-Combined-Data": ModelType.IFT,
112
+ "camel-ai/CAMEL-13B-Role-Playing-Data": ModelType.FT,
113
+ "camel-ai/CAMEL-33B-Combined-Data": ModelType.IFT,
114
+ "PygmalionAI/pygmalion-6b": ModelType.FT,
115
+ "PygmalionAI/metharme-1.3b": ModelType.IFT,
116
+ "PygmalionAI/pygmalion-1.3b": ModelType.FT,
117
+ "PygmalionAI/pygmalion-350m": ModelType.FT,
118
+ "PygmalionAI/pygmalion-2.7b": ModelType.FT,
119
+ "medalpaca/medalpaca-7b": ModelType.FT,
120
+ "lilloukas/Platypus-30B": ModelType.IFT,
121
+ "lilloukas/GPlatty-30B": ModelType.FT,
122
+ "mncai/chatdoctor": ModelType.FT,
123
+ "chaoyi-wu/MedLLaMA_13B": ModelType.FT,
124
+ "LoupGarou/WizardCoder-Guanaco-15B-V1.0": ModelType.IFT,
125
+ "LoupGarou/WizardCoder-Guanaco-15B-V1.1": ModelType.FT,
126
+ "hakurei/instruct-12b": ModelType.IFT,
127
+ "hakurei/lotus-12B": ModelType.FT,
128
+ "shibing624/chinese-llama-plus-13b-hf": ModelType.IFT,
129
+ "shibing624/chinese-alpaca-plus-7b-hf": ModelType.IFT,
130
+ "shibing624/chinese-alpaca-plus-13b-hf": ModelType.IFT,
131
+ "mosaicml/mpt-7b-instruct": ModelType.IFT,
132
+ "mosaicml/mpt-30b-chat": ModelType.IFT,
133
+ "mosaicml/mpt-7b-storywriter": ModelType.FT,
134
+ "mosaicml/mpt-30b-instruct": ModelType.IFT,
135
+ "mosaicml/mpt-7b-chat": ModelType.IFT,
136
+ "mosaicml/mpt-30b": ModelType.PT,
137
+ "Corianas/111m": ModelType.IFT,
138
+ "Corianas/Quokka_1.3b": ModelType.IFT,
139
+ "Corianas/256_5epoch": ModelType.FT,
140
+ "Corianas/Quokka_256m": ModelType.IFT,
141
+ "Corianas/Quokka_590m": ModelType.IFT,
142
+ "Corianas/gpt-j-6B-Dolly": ModelType.FT,
143
+ "Corianas/Quokka_2.7b": ModelType.IFT,
144
+ "cyberagent/open-calm-7b": ModelType.FT,
145
+ "Aspik101/Nous-Hermes-13b-pl-lora_unload": ModelType.IFT,
146
+ "THUDM/chatglm2-6b": ModelType.IFT,
147
+ "MetaIX/GPT4-X-Alpasta-30b": ModelType.IFT,
148
+ "NYTK/PULI-GPTrio": ModelType.PT,
149
+ "EleutherAI/pythia-1.3b": ModelType.PT,
150
+ "EleutherAI/pythia-2.8b-deduped": ModelType.PT,
151
+ "EleutherAI/gpt-neo-125m": ModelType.PT,
152
+ "EleutherAI/pythia-160m": ModelType.PT,
153
+ "EleutherAI/gpt-neo-2.7B": ModelType.PT,
154
+ "EleutherAI/pythia-1b-deduped": ModelType.PT,
155
+ "EleutherAI/pythia-6.7b": ModelType.PT,
156
+ "EleutherAI/pythia-70m-deduped": ModelType.PT,
157
+ "EleutherAI/gpt-neox-20b": ModelType.PT,
158
+ "EleutherAI/pythia-1.4b-deduped": ModelType.PT,
159
+ "EleutherAI/pythia-2.7b": ModelType.PT,
160
+ "EleutherAI/pythia-6.9b-deduped": ModelType.PT,
161
+ "EleutherAI/pythia-70m": ModelType.PT,
162
+ "EleutherAI/gpt-j-6b": ModelType.PT,
163
+ "EleutherAI/pythia-12b-deduped": ModelType.PT,
164
+ "EleutherAI/gpt-neo-1.3B": ModelType.PT,
165
+ "EleutherAI/pythia-410m-deduped": ModelType.PT,
166
+ "EleutherAI/pythia-160m-deduped": ModelType.PT,
167
+ "EleutherAI/polyglot-ko-12.8b": ModelType.PT,
168
+ "EleutherAI/pythia-12b": ModelType.PT,
169
+ "roneneldan/TinyStories-33M": ModelType.PT,
170
+ "roneneldan/TinyStories-28M": ModelType.PT,
171
+ "roneneldan/TinyStories-1M": ModelType.PT,
172
+ "roneneldan/TinyStories-8M": ModelType.PT,
173
+ "roneneldan/TinyStories-3M": ModelType.PT,
174
+ "jerryjalapeno/nart-100k-7b": ModelType.FT,
175
+ "lmsys/vicuna-13b-v1.3": ModelType.IFT,
176
+ "lmsys/vicuna-7b-v1.3": ModelType.IFT,
177
+ "lmsys/vicuna-13b-v1.1": ModelType.IFT,
178
+ "lmsys/vicuna-13b-delta-v1.1": ModelType.IFT,
179
+ "lmsys/vicuna-7b-delta-v1.1": ModelType.IFT,
180
+ "abhiramtirumala/DialoGPT-sarcastic-medium": ModelType.FT,
181
+ "haonan-li/bactrian-x-llama-13b-merged": ModelType.IFT,
182
+ "Gryphe/MythoLogic-13b": ModelType.IFT,
183
+ "Gryphe/MythoBoros-13b": ModelType.IFT,
184
+ "pillowtalks-ai/delta13b": ModelType.FT,
185
+ "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard": ModelType.FT,
186
+ "bigscience/bloom-7b1": ModelType.PT,
187
+ "bigcode/tiny_starcoder_py": ModelType.PT,
188
+ "bigcode/starcoderplus": ModelType.FT,
189
+ "bigcode/gpt_bigcode-santacoder": ModelType.PT,
190
+ "bigcode/starcoder": ModelType.PT,
191
+ "Open-Orca/OpenOrca-Preview1-13B": ModelType.IFT,
192
+ "microsoft/DialoGPT-large": ModelType.FT,
193
+ "microsoft/DialoGPT-small": ModelType.FT,
194
+ "microsoft/DialoGPT-medium": ModelType.FT,
195
+ "microsoft/CodeGPT-small-py": ModelType.FT,
196
+ "Tincando/fiction_story_generator": ModelType.FT,
197
+ "Pirr/pythia-13b-deduped-green_devil": ModelType.FT,
198
+ "Aeala/GPT4-x-AlpacaDente2-30b": ModelType.FT,
199
+ "Aeala/GPT4-x-AlpacaDente-30b": ModelType.FT,
200
+ "Aeala/GPT4-x-Alpasta-13b": ModelType.FT,
201
+ "Aeala/VicUnlocked-alpaca-30b": ModelType.IFT,
202
+ "Tap-M/Luna-AI-Llama2-Uncensored": ModelType.FT,
203
+ "illuin/test-custom-llama": ModelType.FT,
204
+ "dvruette/oasst-llama-13b-2-epochs": ModelType.FT,
205
+ "dvruette/oasst-gpt-neox-20b-1000-steps": ModelType.FT,
206
+ "dvruette/llama-13b-pretrained-dropout": ModelType.PT,
207
+ "dvruette/llama-13b-pretrained": ModelType.PT,
208
+ "dvruette/llama-13b-pretrained-sft-epoch-1": ModelType.FT,
209
+ "dvruette/llama-13b-pretrained-sft-do2": ModelType.FT,
210
+ "dvruette/oasst-gpt-neox-20b-3000-steps": ModelType.FT,
211
+ "dvruette/oasst-pythia-12b-pretrained-sft": ModelType.FT,
212
+ "dvruette/oasst-pythia-6.9b-4000-steps": ModelType.FT,
213
+ "dvruette/gpt-neox-20b-full-precision": ModelType.FT,
214
+ "dvruette/oasst-llama-13b-1000-steps": ModelType.FT,
215
+ "openlm-research/open_llama_7b_700bt_preview": ModelType.PT,
216
+ "openlm-research/open_llama_7b": ModelType.PT,
217
+ "openlm-research/open_llama_7b_v2": ModelType.PT,
218
+ "openlm-research/open_llama_3b": ModelType.PT,
219
+ "openlm-research/open_llama_13b": ModelType.PT,
220
+ "openlm-research/open_llama_3b_v2": ModelType.PT,
221
+ "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged": ModelType.IFT,
222
+ "GeorgiaTechResearchInstitute/galpaca-30b": ModelType.IFT,
223
+ "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct": ModelType.IFT,
224
+ "databricks/dolly-v2-7b": ModelType.IFT,
225
+ "databricks/dolly-v2-3b": ModelType.IFT,
226
+ "databricks/dolly-v2-12b": ModelType.IFT,
227
+ "Rachneet/gpt2-xl-alpaca": ModelType.FT,
228
+ "Locutusque/gpt2-conversational-or-qa": ModelType.FT,
229
+ "psyche/kogpt": ModelType.FT,
230
+ "NbAiLab/nb-gpt-j-6B-alpaca": ModelType.IFT,
231
+ "Mikael110/llama-2-7b-guanaco-fp16": ModelType.FT,
232
+ "Mikael110/llama-2-13b-guanaco-fp16": ModelType.FT,
233
+ "Fredithefish/CrimsonPajama": ModelType.IFT,
234
+ "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K": ModelType.FT,
235
+ "Fredithefish/ScarletPajama-3B-HF": ModelType.FT,
236
+ "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4": ModelType.IFT,
237
+ "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1": ModelType.IFT,
238
+ "eachadea/vicuna-13b-1.1": ModelType.FT,
239
+ "eachadea/vicuna-7b-1.1": ModelType.FT,
240
+ "eachadea/vicuna-13b": ModelType.FT,
241
+ "openaccess-ai-collective/wizard-mega-13b": ModelType.IFT,
242
+ "openaccess-ai-collective/manticore-13b": ModelType.IFT,
243
+ "openaccess-ai-collective/manticore-30b-chat-pyg-alpha": ModelType.IFT,
244
+ "openaccess-ai-collective/minotaur-13b": ModelType.IFT,
245
+ "openaccess-ai-collective/minotaur-13b-fixed": ModelType.IFT,
246
+ "openaccess-ai-collective/hippogriff-30b-chat": ModelType.IFT,
247
+ "openaccess-ai-collective/manticore-13b-chat-pyg": ModelType.IFT,
248
+ "pythainlp/wangchanglm-7.5B-sft-enth": ModelType.IFT,
249
+ "pythainlp/wangchanglm-7.5B-sft-en-sharded": ModelType.IFT,
250
+ "euclaise/gpt-neox-122m-minipile-digits": ModelType.FT,
251
+ "stabilityai/StableBeluga1-Delta": ModelType.IFT,
252
+ "stabilityai/stablelm-tuned-alpha-7b": ModelType.IFT,
253
+ "stabilityai/StableBeluga2": ModelType.IFT,
254
+ "stabilityai/StableBeluga-13B": ModelType.IFT,
255
+ "stabilityai/StableBeluga-7B": ModelType.IFT,
256
+ "stabilityai/stablelm-base-alpha-7b": ModelType.PT,
257
+ "stabilityai/stablelm-base-alpha-3b": ModelType.PT,
258
+ "stabilityai/stablelm-tuned-alpha-3b": ModelType.IFT,
259
+ "alibidaran/medical_transcription_generator": ModelType.FT,
260
+ "CalderaAI/30B-Lazarus": ModelType.IFT,
261
+ "CalderaAI/13B-BlueMethod": ModelType.IFT,
262
+ "CalderaAI/13B-Ouroboros": ModelType.IFT,
263
+ "KoboldAI/OPT-13B-Erebus": ModelType.FT,
264
+ "KoboldAI/GPT-J-6B-Janeway": ModelType.FT,
265
+ "KoboldAI/GPT-J-6B-Shinen": ModelType.FT,
266
+ "KoboldAI/fairseq-dense-2.7B": ModelType.PT,
267
+ "KoboldAI/OPT-6B-nerys-v2": ModelType.FT,
268
+ "KoboldAI/GPT-NeoX-20B-Skein": ModelType.FT,
269
+ "KoboldAI/PPO_Pygway-6b-Mix": ModelType.FT,
270
+ "KoboldAI/fairseq-dense-6.7B": ModelType.PT,
271
+ "KoboldAI/fairseq-dense-125M": ModelType.PT,
272
+ "KoboldAI/OPT-13B-Nerybus-Mix": ModelType.FT,
273
+ "KoboldAI/OPT-2.7B-Erebus": ModelType.FT,
274
+ "KoboldAI/OPT-350M-Nerys-v2": ModelType.FT,
275
+ "KoboldAI/OPT-2.7B-Nerys-v2": ModelType.FT,
276
+ "KoboldAI/OPT-2.7B-Nerybus-Mix": ModelType.FT,
277
+ "KoboldAI/OPT-13B-Nerys-v2": ModelType.FT,
278
+ "KoboldAI/GPT-NeoX-20B-Erebus": ModelType.FT,
279
+ "KoboldAI/OPT-6.7B-Erebus": ModelType.FT,
280
+ "KoboldAI/fairseq-dense-355M": ModelType.PT,
281
+ "KoboldAI/OPT-6.7B-Nerybus-Mix": ModelType.FT,
282
+ "KoboldAI/GPT-J-6B-Adventure": ModelType.FT,
283
+ "KoboldAI/OPT-350M-Erebus": ModelType.FT,
284
+ "KoboldAI/GPT-J-6B-Skein": ModelType.FT,
285
+ "KoboldAI/OPT-30B-Erebus": ModelType.FT,
286
+ "klosax/pythia-160m-deduped-step92k-193bt": ModelType.PT,
287
+ "klosax/open_llama_3b_350bt_preview": ModelType.PT,
288
+ "klosax/openllama-3b-350bt": ModelType.PT,
289
+ "klosax/pythia-70m-deduped-step44k-92bt": ModelType.PT,
290
+ "klosax/open_llama_13b_600bt_preview": ModelType.PT,
291
+ "klosax/open_llama_7b_400bt_preview": ModelType.PT,
292
+ "kfkas/Llama-2-ko-7b-Chat": ModelType.IFT,
293
+ "WeOpenML/Alpaca-7B-v1": ModelType.IFT,
294
+ "WeOpenML/PandaLM-Alpaca-7B-v1": ModelType.IFT,
295
+ "TFLai/gpt2-turkish-uncased": ModelType.FT,
296
+ "ehartford/WizardLM-13B-Uncensored": ModelType.IFT,
297
+ "ehartford/dolphin-llama-13b": ModelType.IFT,
298
+ "ehartford/Wizard-Vicuna-30B-Uncensored": ModelType.FT,
299
+ "ehartford/WizardLM-30B-Uncensored": ModelType.IFT,
300
+ "ehartford/Wizard-Vicuna-13B-Uncensored": ModelType.FT,
301
+ "ehartford/WizardLM-7B-Uncensored": ModelType.IFT,
302
+ "ehartford/based-30b": ModelType.FT,
303
+ "ehartford/Wizard-Vicuna-7B-Uncensored": ModelType.FT,
304
+ "wahaha1987/llama_7b_sharegpt94k_fastchat": ModelType.FT,
305
+ "wahaha1987/llama_13b_sharegpt94k_fastchat": ModelType.FT,
306
+ "OpenAssistant/oasst-sft-1-pythia-12b": ModelType.FT,
307
+ "OpenAssistant/stablelm-7b-sft-v7-epoch-3": ModelType.IFT,
308
+ "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": ModelType.FT,
309
+ "OpenAssistant/pythia-12b-sft-v8-2.5k-steps": ModelType.IFT,
310
+ "OpenAssistant/pythia-12b-sft-v8-7k-steps": ModelType.IFT,
311
+ "OpenAssistant/pythia-12b-pre-v8-12.5k-steps": ModelType.IFT,
312
+ "OpenAssistant/llama2-13b-orca-8k-3319": ModelType.IFT,
313
+ "junelee/wizard-vicuna-13b": ModelType.FT,
314
+ "BreadAi/gpt-YA-1-1_160M": ModelType.PT,
315
+ "BreadAi/MuseCan": ModelType.PT,
316
+ "BreadAi/MusePy-1-2": ModelType.PT,
317
+ "BreadAi/DiscordPy": ModelType.PT,
318
+ "BreadAi/PM_modelV2": ModelType.PT,
319
+ "BreadAi/gpt-Youtube": ModelType.PT,
320
+ "BreadAi/StoryPy": ModelType.FT,
321
+ "julianweng/Llama-2-7b-chat-orcah": ModelType.FT,
322
+ "AGI-inc/lora_moe_7b_baseline": ModelType.FT,
323
+ "AGI-inc/lora_moe_7b": ModelType.FT,
324
+ "togethercomputer/GPT-NeoXT-Chat-Base-20B": ModelType.IFT,
325
+ "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1": ModelType.IFT,
326
+ "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1": ModelType.IFT,
327
+ "togethercomputer/RedPajama-INCITE-7B-Base": ModelType.PT,
328
+ "togethercomputer/RedPajama-INCITE-7B-Instruct": ModelType.IFT,
329
+ "togethercomputer/RedPajama-INCITE-Base-3B-v1": ModelType.PT,
330
+ "togethercomputer/Pythia-Chat-Base-7B": ModelType.IFT,
331
+ "togethercomputer/RedPajama-INCITE-Base-7B-v0.1": ModelType.PT,
332
+ "togethercomputer/GPT-JT-6B-v1": ModelType.IFT,
333
+ "togethercomputer/GPT-JT-6B-v0": ModelType.IFT,
334
+ "togethercomputer/RedPajama-INCITE-Chat-3B-v1": ModelType.IFT,
335
+ "togethercomputer/RedPajama-INCITE-7B-Chat": ModelType.IFT,
336
+ "togethercomputer/RedPajama-INCITE-Instruct-3B-v1": ModelType.IFT,
337
+ "Writer/camel-5b-hf": ModelType.IFT,
338
+ "Writer/palmyra-base": ModelType.PT,
339
+ "MBZUAI/LaMini-GPT-1.5B": ModelType.IFT,
340
+ "MBZUAI/lamini-cerebras-111m": ModelType.IFT,
341
+ "MBZUAI/lamini-neo-1.3b": ModelType.IFT,
342
+ "MBZUAI/lamini-cerebras-1.3b": ModelType.IFT,
343
+ "MBZUAI/lamini-cerebras-256m": ModelType.IFT,
344
+ "MBZUAI/LaMini-GPT-124M": ModelType.IFT,
345
+ "MBZUAI/lamini-neo-125m": ModelType.IFT,
346
+ "TehVenom/DiffMerge-DollyGPT-Pygmalion": ModelType.FT,
347
+ "TehVenom/PPO_Shygmalion-6b": ModelType.FT,
348
+ "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2": ModelType.FT,
349
+ "TehVenom/Pygmalion_AlpacaLora-7b": ModelType.FT,
350
+ "TehVenom/PPO_Pygway-V8p4_Dev-6b": ModelType.FT,
351
+ "TehVenom/Dolly_Malion-6b": ModelType.FT,
352
+ "TehVenom/PPO_Shygmalion-V8p4_Dev-6b": ModelType.FT,
353
+ "TehVenom/ChanMalion": ModelType.FT,
354
+ "TehVenom/GPT-J-Pyg_PPO-6B": ModelType.IFT,
355
+ "TehVenom/Pygmalion-13b-Merged": ModelType.FT,
356
+ "TehVenom/Metharme-13b-Merged": ModelType.IFT,
357
+ "TehVenom/Dolly_Shygmalion-6b": ModelType.FT,
358
+ "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4": ModelType.IFT,
359
+ "georgesung/llama2_7b_chat_uncensored": ModelType.FT,
360
+ "vicgalle/gpt2-alpaca": ModelType.IFT,
361
+ "vicgalle/alpaca-7b": ModelType.FT,
362
+ "vicgalle/gpt2-alpaca-gpt4": ModelType.IFT,
363
+ "facebook/opt-350m": ModelType.PT,
364
+ "facebook/opt-125m": ModelType.PT,
365
+ "facebook/xglm-4.5B": ModelType.PT,
366
+ "facebook/opt-2.7b": ModelType.PT,
367
+ "facebook/opt-6.7b": ModelType.PT,
368
+ "facebook/galactica-30b": ModelType.PT,
369
+ "facebook/opt-13b": ModelType.PT,
370
+ "facebook/opt-66b": ModelType.PT,
371
+ "facebook/xglm-7.5B": ModelType.PT,
372
+ "facebook/xglm-564M": ModelType.PT,
373
+ "facebook/opt-30b": ModelType.PT,
374
+ "golaxy/gogpt-7b": ModelType.FT,
375
+ "golaxy/gogpt2-7b": ModelType.FT,
376
+ "golaxy/gogpt-7b-bloom": ModelType.FT,
377
+ "golaxy/gogpt-3b-bloom": ModelType.FT,
378
+ "psmathur/orca_mini_v2_7b": ModelType.IFT,
379
+ "psmathur/orca_mini_7b": ModelType.IFT,
380
+ "psmathur/orca_mini_3b": ModelType.IFT,
381
+ "psmathur/orca_mini_v2_13b": ModelType.IFT,
382
+ "gpt2-xl": ModelType.PT,
383
+ "lxe/Cerebras-GPT-2.7B-Alpaca-SP": ModelType.FT,
384
+ "Monero/Manticore-13b-Chat-Pyg-Guanaco": ModelType.FT,
385
+ "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b": ModelType.IFT,
386
+ "Monero/WizardLM-13b-OpenAssistant-Uncensored": ModelType.IFT,
387
+ "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b": ModelType.IFT,
388
+ "jzjiao/opt-1.3b-rlhf": ModelType.FT,
389
+ "HuggingFaceH4/starchat-beta": ModelType.IFT,
390
+ "KnutJaegersberg/gpt-2-xl-EvolInstruct": ModelType.IFT,
391
+ "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct": ModelType.IFT,
392
+ "KnutJaegersberg/galactica-orca-wizardlm-1.3b": ModelType.IFT,
393
+ "openchat/openchat_8192": ModelType.IFT,
394
+ "openchat/openchat_v2": ModelType.IFT,
395
+ "openchat/openchat_v2_w": ModelType.IFT,
396
+ "ausboss/llama-13b-supercot": ModelType.IFT,
397
+ "ausboss/llama-30b-supercot": ModelType.IFT,
398
+ "Neko-Institute-of-Science/metharme-7b": ModelType.IFT,
399
+ "Neko-Institute-of-Science/pygmalion-7b": ModelType.FT,
400
+ "SebastianSchramm/Cerebras-GPT-111M-instruction": ModelType.IFT,
401
+ "victor123/WizardLM-13B-1.0": ModelType.IFT,
402
+ "OpenBuddy/openbuddy-openllama-13b-v7-fp16": ModelType.FT,
403
+ "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16": ModelType.FT,
404
+ "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16": ModelType.FT,
405
+ "baichuan-inc/Baichuan-7B": ModelType.PT,
406
+ "tiiuae/falcon-40b-instruct": ModelType.IFT,
407
+ "tiiuae/falcon-40b": ModelType.PT,
408
+ "tiiuae/falcon-7b": ModelType.PT,
409
+ "YeungNLP/firefly-llama-13b": ModelType.FT,
410
+ "YeungNLP/firefly-llama-13b-v1.2": ModelType.FT,
411
+ "YeungNLP/firefly-llama2-13b": ModelType.FT,
412
+ "YeungNLP/firefly-ziya-13b": ModelType.FT,
413
+ "shaohang/Sparse0.5_OPT-1.3": ModelType.FT,
414
+ "xzuyn/Alpacino-SuperCOT-13B": ModelType.IFT,
415
+ "xzuyn/MedicWizard-7B": ModelType.FT,
416
+ "xDAN-AI/xDAN_13b_l2_lora": ModelType.FT,
417
+ "beomi/KoAlpaca-Polyglot-5.8B": ModelType.FT,
418
+ "beomi/llama-2-ko-7b": ModelType.IFT,
419
+ "Salesforce/codegen-6B-multi": ModelType.PT,
420
+ "Salesforce/codegen-16B-nl": ModelType.PT,
421
+ "Salesforce/codegen-6B-nl": ModelType.PT,
422
+ "ai-forever/rugpt3large_based_on_gpt2": ModelType.FT,
423
+ "gpt2-large": ModelType.PT,
424
+ "frank098/orca_mini_3b_juniper": ModelType.FT,
425
+ "frank098/WizardLM_13B_juniper": ModelType.FT,
426
+ "FPHam/Free_Sydney_13b_HF": ModelType.FT,
427
+ "huggingface/llama-13b": ModelType.PT,
428
+ "huggingface/llama-7b": ModelType.PT,
429
+ "huggingface/llama-65b": ModelType.PT,
430
+ "huggingface/llama-30b": ModelType.PT,
431
+ "Henk717/chronoboros-33B": ModelType.IFT,
432
+ "jondurbin/airoboros-13b-gpt4-1.4": ModelType.IFT,
433
+ "jondurbin/airoboros-7b": ModelType.IFT,
434
+ "jondurbin/airoboros-7b-gpt4": ModelType.IFT,
435
+ "jondurbin/airoboros-7b-gpt4-1.1": ModelType.IFT,
436
+ "jondurbin/airoboros-7b-gpt4-1.2": ModelType.IFT,
437
+ "jondurbin/airoboros-7b-gpt4-1.3": ModelType.IFT,
438
+ "jondurbin/airoboros-7b-gpt4-1.4": ModelType.IFT,
439
+ "jondurbin/airoboros-l2-7b-gpt4-1.4.1": ModelType.IFT,
440
+ "jondurbin/airoboros-l2-13b-gpt4-1.4.1": ModelType.IFT,
441
+ "jondurbin/airoboros-l2-70b-gpt4-1.4.1": ModelType.IFT,
442
+ "jondurbin/airoboros-13b": ModelType.IFT,
443
+ "jondurbin/airoboros-33b-gpt4-1.4": ModelType.IFT,
444
+ "jondurbin/airoboros-33b-gpt4-1.2": ModelType.IFT,
445
+ "jondurbin/airoboros-65b-gpt4-1.2": ModelType.IFT,
446
+ "ariellee/SuperPlatty-30B": ModelType.IFT,
447
+ "danielhanchen/open_llama_3b_600bt_preview": ModelType.FT,
448
+ "cerebras/Cerebras-GPT-256M": ModelType.PT,
449
+ "cerebras/Cerebras-GPT-1.3B": ModelType.PT,
450
+ "cerebras/Cerebras-GPT-13B": ModelType.PT,
451
+ "cerebras/Cerebras-GPT-2.7B": ModelType.PT,
452
+ "cerebras/Cerebras-GPT-111M": ModelType.PT,
453
+ "cerebras/Cerebras-GPT-6.7B": ModelType.PT,
454
+ "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf": ModelType.RL,
455
+ "Yhyu13/llama-30B-hf-openassitant": ModelType.FT,
456
+ "NousResearch/Nous-Hermes-Llama2-13b": ModelType.IFT,
457
+ "NousResearch/Nous-Hermes-llama-2-7b": ModelType.IFT,
458
+ "NousResearch/Redmond-Puffin-13B": ModelType.IFT,
459
+ "NousResearch/Nous-Hermes-13b": ModelType.IFT,
460
+ "project-baize/baize-v2-7b": ModelType.IFT,
461
+ "project-baize/baize-v2-13b": ModelType.IFT,
462
+ "LLMs/WizardLM-13B-V1.0": ModelType.FT,
463
+ "LLMs/AlpacaGPT4-7B-elina": ModelType.FT,
464
+ "wenge-research/yayi-7b": ModelType.FT,
465
+ "wenge-research/yayi-7b-llama2": ModelType.FT,
466
+ "wenge-research/yayi-13b-llama2": ModelType.FT,
467
+ "yhyhy3/open_llama_7b_v2_med_instruct": ModelType.IFT,
468
+ "llama-anon/instruct-13b": ModelType.IFT,
469
+ "huggingtweets/jerma985": ModelType.FT,
470
+ "huggingtweets/gladosystem": ModelType.FT,
471
+ "huggingtweets/bladeecity-jerma985": ModelType.FT,
472
+ "huggyllama/llama-13b": ModelType.PT,
473
+ "huggyllama/llama-65b": ModelType.PT,
474
+ "FabbriSimo01/Facebook_opt_1.3b_Quantized": ModelType.PT,
475
+ "upstage/Llama-2-70b-instruct": ModelType.IFT,
476
+ "upstage/Llama-2-70b-instruct-1024": ModelType.IFT,
477
+ "upstage/llama-65b-instruct": ModelType.IFT,
478
+ "upstage/llama-30b-instruct-2048": ModelType.IFT,
479
+ "upstage/llama-30b-instruct": ModelType.IFT,
480
+ "WizardLM/WizardLM-13B-1.0": ModelType.IFT,
481
+ "WizardLM/WizardLM-13B-V1.1": ModelType.IFT,
482
+ "WizardLM/WizardLM-13B-V1.2": ModelType.IFT,
483
+ "WizardLM/WizardLM-30B-V1.0": ModelType.IFT,
484
+ "WizardLM/WizardCoder-15B-V1.0": ModelType.IFT,
485
+ "gpt2": ModelType.PT,
486
+ "keyfan/vicuna-chinese-replication-v1.1": ModelType.IFT,
487
+ "nthngdy/pythia-owt2-70m-100k": ModelType.FT,
488
+ "nthngdy/pythia-owt2-70m-50k": ModelType.FT,
489
+ "quantumaikr/KoreanLM-hf": ModelType.FT,
490
+ "quantumaikr/open_llama_7b_hf": ModelType.FT,
491
+ "quantumaikr/QuantumLM-70B-hf": ModelType.IFT,
492
+ "MayaPH/FinOPT-Lincoln": ModelType.FT,
493
+ "MayaPH/FinOPT-Franklin": ModelType.FT,
494
+ "MayaPH/GodziLLa-30B": ModelType.IFT,
495
+ "MayaPH/GodziLLa-30B-plus": ModelType.IFT,
496
+ "MayaPH/FinOPT-Washington": ModelType.FT,
497
+ "ogimgio/gpt-neo-125m-neurallinguisticpioneers": ModelType.FT,
498
+ "layoric/llama-2-13b-code-alpaca": ModelType.FT,
499
+ "CobraMamba/mamba-gpt-3b": ModelType.FT,
500
+ "CobraMamba/mamba-gpt-3b-v2": ModelType.FT,
501
+ "CobraMamba/mamba-gpt-3b-v3": ModelType.FT,
502
+ "timdettmers/guanaco-33b-merged": ModelType.FT,
503
+ "elinas/chronos-33b": ModelType.IFT,
504
+ "heegyu/RedTulu-Uncensored-3B-0719": ModelType.IFT,
505
+ "heegyu/WizardVicuna-Uncensored-3B-0719": ModelType.IFT,
506
+ "heegyu/WizardVicuna-3B-0719": ModelType.IFT,
507
+ "meta-llama/Llama-2-7b-chat-hf": ModelType.RL,
508
+ "meta-llama/Llama-2-7b-hf": ModelType.PT,
509
+ "meta-llama/Llama-2-13b-chat-hf": ModelType.RL,
510
+ "meta-llama/Llama-2-13b-hf": ModelType.PT,
511
+ "meta-llama/Llama-2-70b-chat-hf": ModelType.RL,
512
+ "meta-llama/Llama-2-70b-hf": ModelType.PT,
513
+ "xhyi/PT_GPTNEO350_ATG": ModelType.FT,
514
+ "h2oai/h2ogpt-gm-oasst1-en-1024-20b": ModelType.FT,
515
+ "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt": ModelType.FT,
516
+ "h2oai/h2ogpt-oig-oasst1-512-6_9b": ModelType.IFT,
517
+ "h2oai/h2ogpt-oasst1-512-12b": ModelType.IFT,
518
+ "h2oai/h2ogpt-oig-oasst1-256-6_9b": ModelType.IFT,
519
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt": ModelType.FT,
520
+ "h2oai/h2ogpt-oasst1-512-20b": ModelType.IFT,
521
+ "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2": ModelType.FT,
522
+ "h2oai/h2ogpt-gm-oasst1-en-1024-12b": ModelType.FT,
523
+ "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b": ModelType.FT,
524
+ "bofenghuang/vigogne-13b-instruct": ModelType.IFT,
525
+ "bofenghuang/vigogne-13b-chat": ModelType.FT,
526
+ "bofenghuang/vigogne-2-7b-instruct": ModelType.IFT,
527
+ "bofenghuang/vigogne-7b-instruct": ModelType.IFT,
528
+ "bofenghuang/vigogne-7b-chat": ModelType.FT,
529
+ "Vmware/open-llama-7b-v2-open-instruct": ModelType.IFT,
530
+ "VMware/open-llama-0.7T-7B-open-instruct-v1.1": ModelType.IFT,
531
+ "ewof/koishi-instruct-3b": ModelType.IFT,
532
+ "gywy/llama2-13b-chinese-v1": ModelType.FT,
533
+ "GOAT-AI/GOAT-7B-Community": ModelType.FT,
534
+ "psyche/kollama2-7b": ModelType.FT,
535
+ "TheTravellingEngineer/llama2-7b-hf-guanaco": ModelType.FT,
536
+ "beaugogh/pythia-1.4b-deduped-sharegpt": ModelType.FT,
537
+ "augtoma/qCammel-70-x": ModelType.IFT,
538
+ "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload": ModelType.IFT,
539
+ "anhnv125/pygmalion-6b-roleplay": ModelType.FT,
540
+ "64bits/LexPodLM-13B": ModelType.FT,
541
+ }
542
+
543
+
544
+ def model_type_from_str(type):
545
+ if "fine-tuned" in type or "🔶" in type:
546
+ return ModelType.FT
547
+ if "pretrained" in type or "🟢" in type:
548
+ return ModelType.PT
549
+ if "RL-tuned" in type or "🟦" in type:
550
+ return ModelType.RL
551
+ if "instruction-tuned" in type or "⭕" in type:
552
+ return ModelType.IFT
553
+ return ModelType.Unknown
src/display_models/read_results.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Tuple
5
+ from distutils.util import strtobool
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display_models.utils import AutoEvalColumn, make_clickable_model
11
+
12
+ # 현우 - ko_commongen_v2 : acc_norm인지 체크 필요함
13
+ METRICS = ["acc_norm", "acc_norm", "acc", "mc2", "acc_norm"]
14
+ BENCHMARKS = ["ko_arc_challenge", "ko_hellaswag", "ko_mmlu", "ko_truthfulqa_mc", "ko_commongen_v2"] #, "ethicalverification"]
15
+ BENCH_TO_NAME = {
16
+ "ko_arc_challenge": AutoEvalColumn.arc.name,
17
+ "ko_hellaswag": AutoEvalColumn.hellaswag.name,
18
+ "ko_mmlu": AutoEvalColumn.mmlu.name,
19
+ "ko_truthfulqa_mc": AutoEvalColumn.truthfulqa.name,
20
+ "ko_commongen_v2": AutoEvalColumn.commongen_v2.name,
21
+ # TODO: Uncomment when we have results for these
22
+ # "ethicalverification": AutoEvalColumn.ethicalverification.name,
23
+ }
24
+ IS_PUBLIC = bool(strtobool(os.environ.get("IS_PUBLIC", "True")))
25
+
26
+ @dataclass
27
+ class EvalResult:
28
+ eval_name: str
29
+ org: str
30
+ model: str
31
+ revision: str
32
+ results: dict
33
+ precision: str = ""
34
+ model_type: str = ""
35
+ weight_type: str = ""
36
+
37
+ def to_dict(self):
38
+ from src.load_from_hub import is_model_on_hub
39
+
40
+ if self.org is not None:
41
+ base_model = f"{self.org}/{self.model}"
42
+ else:
43
+ base_model = f"{self.model}"
44
+ data_dict = {}
45
+
46
+ data_dict["eval_name"] = self.eval_name # not a column, just a save name
47
+ data_dict["weight_type"] = self.weight_type # not a column, just a save name
48
+ data_dict[AutoEvalColumn.precision.name] = self.precision
49
+ data_dict[AutoEvalColumn.model_type.name] = self.model_type
50
+ data_dict[AutoEvalColumn.model.name] = make_clickable_model(base_model)
51
+ data_dict[AutoEvalColumn.dummy.name] = base_model
52
+ data_dict[AutoEvalColumn.revision.name] = self.revision
53
+ data_dict[AutoEvalColumn.average.name] = sum([v for k, v in self.results.items()]) / 5.0
54
+ data_dict[AutoEvalColumn.still_on_hub.name] = (
55
+ is_model_on_hub(base_model, self.revision)[0] or base_model == "baseline"
56
+ )
57
+
58
+ for benchmark in BENCHMARKS:
59
+ if benchmark not in self.results.keys():
60
+ self.results[benchmark] = None
61
+
62
+ for k, v in BENCH_TO_NAME.items():
63
+ data_dict[v] = self.results[k]
64
+
65
+ return data_dict
66
+
67
+
68
+ def parse_eval_result(json_filepath: str) -> Tuple[str, list[dict]]:
69
+ with open(json_filepath) as fp:
70
+ data = json.load(fp)
71
+
72
+ try:
73
+ config = data["config"]
74
+ except KeyError:
75
+ config = data["config_general"]
76
+ model = config.get("model_name", None)
77
+ if model is None:
78
+ model = config.get("model_args", None)
79
+
80
+ model_sha = config.get("model_sha", "")
81
+ model_split = model.split("/", 1)
82
+
83
+ precision = config.get("model_dtype")
84
+
85
+ model = model_split[-1]
86
+
87
+ if len(model_split) == 1:
88
+ org = None
89
+ model = model_split[0]
90
+ result_key = f"{model}_{precision}"
91
+ else:
92
+ org = model_split[0]
93
+ model = model_split[1]
94
+ result_key = f"{org}_{model}_{precision}"
95
+
96
+ eval_results = []
97
+ for benchmark, metric in zip(BENCHMARKS, METRICS):
98
+ accs = np.array([v.get(metric, None) for k, v in data["results"].items() if benchmark in k])
99
+ if accs.size == 0 or any([acc is None for acc in accs]):
100
+ continue
101
+ mean_acc = np.mean(accs) * 100.0
102
+ eval_results.append(
103
+ EvalResult(
104
+ eval_name=result_key,
105
+ org=org,
106
+ model=model,
107
+ revision=model_sha,
108
+ results={benchmark: mean_acc},
109
+ precision=precision, # todo model_type=, weight_type=
110
+ )
111
+ )
112
+
113
+ return result_key, eval_results
114
+
115
+
116
+ def get_eval_results() -> List[EvalResult]:
117
+ json_filepaths = []
118
+
119
+ for root, dir, files in os.walk("eval-results" + ("-private" if not IS_PUBLIC else "")):
120
+ # We should only have json files in model results
121
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
122
+ continue
123
+
124
+ # Sort the files by date
125
+ # store results by precision maybe?
126
+ try:
127
+ files.sort(key=lambda x: dateutil.parser.parse(x.split("_", 1)[-1][:-5]))
128
+ except dateutil.parser._parser.ParserError:
129
+ files = [files[-1]]
130
+
131
+ # up_to_date = files[-1]
132
+ for file in files:
133
+ json_filepaths.append(os.path.join(root, file))
134
+
135
+ eval_results = {}
136
+ for json_filepath in json_filepaths:
137
+ result_key, results = parse_eval_result(json_filepath)
138
+ for eval_result in results:
139
+ if result_key in eval_results.keys():
140
+ eval_results[result_key].results.update(eval_result.results)
141
+ else:
142
+ eval_results[result_key] = eval_result
143
+
144
+ eval_results = [v for v in eval_results.values()]
145
+
146
+ return eval_results
147
+
148
+
149
+ def get_eval_results_dicts() -> List[Dict]:
150
+ eval_results = get_eval_results()
151
+
152
+ return [e.to_dict() for e in eval_results]
src/display_models/utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass
3
+
4
+ from huggingface_hub import HfApi
5
+
6
+ API = HfApi()
7
+
8
+
9
+ # These classes are for user facing column names, to avoid having to change them
10
+ # all around the code when a modif is needed
11
+ @dataclass
12
+ class ColumnContent:
13
+ name: str
14
+ type: str
15
+ displayed_by_default: bool
16
+ hidden: bool = False
17
+
18
+
19
+ def fields(raw_class):
20
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class AutoEvalColumn: # Auto evals column
25
+ model_type_symbol = ColumnContent("T", "str", True)
26
+ model = ColumnContent("Model", "markdown", True)
27
+ average = ColumnContent("Average ⬆️", "number", True)
28
+ arc = ColumnContent("Ko-ARC", "number", True)
29
+ hellaswag = ColumnContent("Ko-HellaSwag", "number", True)
30
+ mmlu = ColumnContent("Ko-MMLU", "number", True)
31
+ truthfulqa = ColumnContent("Ko-TruthfulQA", "number", True)
32
+ commongen_v2 = ColumnContent("Ko-CommonGen V2", "number", True)
33
+ # TODO: Uncomment when we have results for these
34
+ # ethicalverification = ColumnContent("EthicalVerification", "number", True)
35
+ model_type = ColumnContent("Type", "str", False)
36
+ precision = ColumnContent("Precision", "str", False) # , True)
37
+ license = ColumnContent("Hub License", "str", False)
38
+ params = ColumnContent("#Params (B)", "number", False)
39
+ likes = ColumnContent("Hub ❤️", "number", False)
40
+ still_on_hub = ColumnContent("Available on the hub", "bool", False)
41
+ revision = ColumnContent("Model sha", "str", False, False)
42
+ dummy = ColumnContent(
43
+ "model_name_for_query", "str", True
44
+ ) # dummy col to implement search bar (hidden by custom CSS)
45
+
46
+
47
+ @dataclass(frozen=True)
48
+ class EloEvalColumn: # Elo evals column
49
+ model = ColumnContent("Model", "markdown", True)
50
+ gpt4 = ColumnContent("GPT-4 (all)", "number", True)
51
+ human_all = ColumnContent("Human (all)", "number", True)
52
+ human_instruct = ColumnContent("Human (instruct)", "number", True)
53
+ human_code_instruct = ColumnContent("Human (code-instruct)", "number", True)
54
+
55
+
56
+ @dataclass(frozen=True)
57
+ class EvalQueueColumn: # Queue column
58
+ model = ColumnContent("model", "markdown", True)
59
+ revision = ColumnContent("revision", "str", True)
60
+ private = ColumnContent("private", "bool", True)
61
+ precision = ColumnContent("precision", "str", True)
62
+ weight_type = ColumnContent("weight_type", "str", "Original")
63
+ status = ColumnContent("status", "str", True)
64
+
65
+
66
+ LLAMAS = [
67
+ "huggingface/llama-7b",
68
+ "huggingface/llama-13b",
69
+ "huggingface/llama-30b",
70
+ "huggingface/llama-65b",
71
+ ]
72
+
73
+
74
+ KOALA_LINK = "https://huggingface.co/TheBloke/koala-13B-HF"
75
+ VICUNA_LINK = "https://huggingface.co/lmsys/vicuna-13b-delta-v1.1"
76
+ OASST_LINK = "https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
77
+ DOLLY_LINK = "https://huggingface.co/databricks/dolly-v2-12b"
78
+ MODEL_PAGE = "https://huggingface.co/models"
79
+ LLAMA_LINK = "https://ai.facebook.com/blog/large-language-model-llama-meta-ai/"
80
+ VICUNA_LINK = "https://huggingface.co/CarperAI/stable-vicuna-13b-delta"
81
+ ALPACA_LINK = "https://crfm.stanford.edu/2023/03/13/alpaca.html"
82
+
83
+
84
+ def model_hyperlink(link, model_name):
85
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
86
+
87
+
88
+ def make_clickable_model(model_name):
89
+ link = f"https://huggingface.co/{model_name}"
90
+
91
+ if model_name in LLAMAS:
92
+ link = LLAMA_LINK
93
+ model_name = model_name.split("/")[1]
94
+ elif model_name == "HuggingFaceH4/stable-vicuna-13b-2904":
95
+ link = VICUNA_LINK
96
+ model_name = "stable-vicuna-13b"
97
+ elif model_name == "HuggingFaceH4/llama-7b-ift-alpaca":
98
+ link = ALPACA_LINK
99
+ model_name = "alpaca-13b"
100
+ if model_name == "dolly-12b":
101
+ link = DOLLY_LINK
102
+ elif model_name == "vicuna-13b":
103
+ link = VICUNA_LINK
104
+ elif model_name == "koala-13b":
105
+ link = KOALA_LINK
106
+ elif model_name == "oasst-12b":
107
+ link = OASST_LINK
108
+
109
+ details_model_name = model_name.replace("/", "__")
110
+ # details_link = f"https://huggingface.co/datasets/open-ko-llm-leaderboard/details_{details_model_name}"
111
+
112
+ # if not bool(os.getenv("DEBUG", "False")):
113
+ # # We only add these checks when not debugging, as they are extremely slow
114
+ # print(f"details_link: {details_link}")
115
+ # try:
116
+ # check_path = list(
117
+ # API.list_files_info(
118
+ # repo_id=f"open-ko-llm-leaderboard/details_{details_model_name}",
119
+ # paths="README.md",
120
+ # repo_type="dataset",
121
+ # )
122
+ # )
123
+ # print(f"check_path: {check_path}")
124
+ # except Exception as err:
125
+ # # No details repo for this model
126
+ # print(f"No details repo for this model: {err}")
127
+ # return model_hyperlink(link, model_name)
128
+
129
+ return model_hyperlink(link, model_name) # + " " + model_hyperlink(details_link, "📑")
130
+
131
+
132
+ def styled_error(error):
133
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
134
+
135
+
136
+ def styled_warning(warn):
137
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
138
+
139
+
140
+ def styled_message(message):
141
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
142
+
143
+
144
+ def has_no_nan_values(df, columns):
145
+ return df[columns].notna().all(axis=1)
146
+
147
+
148
+ def has_nan_values(df, columns):
149
+ return df[columns].isna().any(axis=1)
src/load_from_hub.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+ from huggingface_hub import Repository
6
+ from transformers import AutoConfig
7
+ from collections import defaultdict
8
+
9
+ from src.assets.hardcoded_evals import baseline
10
+ from src.display_models.get_model_metadata import apply_metadata
11
+ from src.display_models.read_results import get_eval_results_dicts, make_clickable_model
12
+ from src.display_models.utils import AutoEvalColumn, EvalQueueColumn, has_no_nan_values
13
+
14
+
15
+ def get_all_requested_models(requested_models_dir: str) -> set[str]:
16
+ depth = 1
17
+ file_names = []
18
+ users_to_submission_dates = defaultdict(list)
19
+
20
+ for root, _, files in os.walk(requested_models_dir):
21
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
22
+ if current_depth == depth:
23
+ for file in files:
24
+ if not file.endswith(".json"): continue
25
+ with open(os.path.join(root, file), "r") as f:
26
+ info = json.load(f)
27
+ file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
28
+
29
+ # Select organisation
30
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
31
+ continue
32
+ organisation, _ = info["model"].split("/")
33
+ users_to_submission_dates[organisation].append(info["submitted_time"])
34
+
35
+ return set(file_names), users_to_submission_dates
36
+
37
+
38
+ def load_all_info_from_hub(QUEUE_REPO: str, RESULTS_REPO: str, QUEUE_PATH: str, RESULTS_PATH: str) -> list[Repository]:
39
+ eval_queue_repo = None
40
+ eval_results_repo = None
41
+ requested_models = None
42
+
43
+ print("Pulling evaluation requests and results.")
44
+
45
+ eval_queue_repo = Repository(
46
+ local_dir=QUEUE_PATH,
47
+ clone_from=QUEUE_REPO,
48
+ repo_type="dataset",
49
+ )
50
+ eval_queue_repo.git_pull()
51
+
52
+ eval_results_repo = Repository(
53
+ local_dir=RESULTS_PATH,
54
+ clone_from=RESULTS_REPO,
55
+ repo_type="dataset",
56
+ )
57
+ eval_results_repo.git_pull()
58
+
59
+ requested_models, users_to_submission_dates = get_all_requested_models("eval-queue")
60
+
61
+ return eval_queue_repo, requested_models, eval_results_repo, users_to_submission_dates
62
+
63
+
64
+ def get_leaderboard_df(
65
+ eval_results: Repository, eval_results_private: Repository, cols: list, benchmark_cols: list
66
+ ) -> pd.DataFrame:
67
+ if eval_results:
68
+ print("Pulling evaluation results for the leaderboard.")
69
+ eval_results.git_pull()
70
+ if eval_results_private:
71
+ print("Pulling evaluation results for the leaderboard.")
72
+ eval_results_private.git_pull()
73
+
74
+ all_data = get_eval_results_dicts()
75
+
76
+ # all_data.append(baseline)
77
+ apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py`
78
+
79
+ df = pd.DataFrame.from_records(all_data)
80
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
81
+ df = df[cols].round(decimals=2)
82
+
83
+ # filter out if any of the benchmarks have not been produced
84
+ df = df[has_no_nan_values(df, benchmark_cols)]
85
+ return df
86
+
87
+
88
+ def get_evaluation_queue_df(
89
+ eval_queue: Repository, eval_queue_private: Repository, save_path: str, cols: list
90
+ ) -> list[pd.DataFrame]:
91
+ if eval_queue:
92
+ print("Pulling changes for the evaluation queue.")
93
+ eval_queue.git_pull()
94
+ if eval_queue_private:
95
+ print("Pulling changes for the evaluation queue.")
96
+ eval_queue_private.git_pull()
97
+
98
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
99
+ all_evals = []
100
+
101
+ for entry in entries:
102
+ if ".json" in entry:
103
+ file_path = os.path.join(save_path, entry)
104
+ with open(file_path) as fp:
105
+ data = json.load(fp)
106
+
107
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
108
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
109
+
110
+ all_evals.append(data)
111
+ elif ".md" not in entry:
112
+ # this is a folder
113
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
114
+ for sub_entry in sub_entries:
115
+ file_path = os.path.join(save_path, entry, sub_entry)
116
+ with open(file_path) as fp:
117
+ data = json.load(fp)
118
+
119
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
120
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
121
+ all_evals.append(data)
122
+
123
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
124
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
125
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
126
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
127
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
128
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
129
+ return df_finished[cols], df_running[cols], df_pending[cols]
130
+
131
+
132
+ def is_model_on_hub(model_name: str, revision: str) -> bool:
133
+ try:
134
+ AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=False)
135
+ return True, None
136
+
137
+ except ValueError:
138
+ return (
139
+ False,
140
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
141
+ )
142
+
143
+ except Exception as e:
144
+ print(f"Could not get the model config from the hub.: {e}")
145
+ return False, "was not found on hub!"
src/rate_limiting.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datetime import datetime, timezone, timedelta
3
+
4
+
5
+ def user_submission_permission(submission_name, users_to_submission_dates, rate_limit_period):
6
+ org_or_user, _ = submission_name.split("/")
7
+ if org_or_user not in users_to_submission_dates:
8
+ return 0
9
+ submission_dates = sorted(users_to_submission_dates[org_or_user])
10
+
11
+ time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ")
12
+ submissions_after_timelimit = [d for d in submission_dates if d > time_limit]
13
+
14
+ return len(submissions_after_timelimit)
15
+
16
+