Clémentine commited on
Commit
08ae6c5
1 Parent(s): 55cc480

updated backend

Browse files
README.md CHANGED
@@ -10,8 +10,6 @@ pinned: true
10
  license: apache-2.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
-
15
  Most of the variables to change for a default leaderboard are in src/env (replace the path for your leaderboard) and src/about.
16
 
17
  Results files should have the following format:
@@ -37,4 +35,4 @@ Request files are created automatically by this tool.
37
 
38
  If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
39
 
40
- If you want to run your own backend, you only need to change the logic in src/backend/run_eval_suite, which at the moment launches the Eleuther AI Harness.
 
10
  license: apache-2.0
11
  ---
12
 
 
 
13
  Most of the variables to change for a default leaderboard are in src/env (replace the path for your leaderboard) and src/about.
14
 
15
  Results files should have the following format:
 
35
 
36
  If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
37
 
38
+ If you want to run your own backend, you only need to change the logic in src/backend/run_eval_suite_..., which at the moment launches the Eleuther AI Harness or Lighteval, and edit the app.py to point to the correct file.
app.py CHANGED
@@ -1,353 +1,27 @@
 
 
1
  import subprocess
2
  import gradio as gr
3
- import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
 
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- NUMERIC_INTERVALS,
22
- TYPES,
23
- AutoEvalColumn,
24
- ModelType,
25
- fields,
26
- WeightType,
27
- Precision
28
- )
29
- from src.envs import API, DEVICE, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
- from src.submission.submit import add_new_eval
32
 
 
33
 
34
- subprocess.run(["python", "scripts/fix_harness_import.py"])
 
35
 
36
- def restart_space():
37
- API.restart_space(repo_id=REPO_ID)
38
 
39
  def launch_backend():
40
- _ = subprocess.run(["python", "main_backend.py"])
41
-
42
- try:
43
- print(EVAL_REQUESTS_PATH)
44
- snapshot_download(
45
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
46
- )
47
- except Exception:
48
- restart_space()
49
- try:
50
- print(EVAL_RESULTS_PATH)
51
- snapshot_download(
52
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
53
- )
54
- except Exception:
55
- restart_space()
56
-
57
-
58
- raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
59
- leaderboard_df = original_df.copy()
60
-
61
- (
62
- finished_eval_queue_df,
63
- running_eval_queue_df,
64
- pending_eval_queue_df,
65
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
66
-
67
-
68
- # Searching and filtering
69
- def update_table(
70
- hidden_df: pd.DataFrame,
71
- columns: list,
72
- type_query: list,
73
- precision_query: str,
74
- size_query: list,
75
- show_deleted: bool,
76
- query: str,
77
- ):
78
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
79
- filtered_df = filter_queries(query, filtered_df)
80
- df = select_columns(filtered_df, columns)
81
- return df
82
-
83
-
84
- def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
85
- return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
86
-
87
-
88
- def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
89
- always_here_cols = [
90
- AutoEvalColumn.model_type_symbol.name,
91
- AutoEvalColumn.model.name,
92
- ]
93
- # We use COLS to maintain sorting
94
- filtered_df = df[
95
- always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
96
- ]
97
- return filtered_df
98
-
99
-
100
- def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
101
- final_df = []
102
- if query != "":
103
- queries = [q.strip() for q in query.split(";")]
104
- for _q in queries:
105
- _q = _q.strip()
106
- if _q != "":
107
- temp_filtered_df = search_table(filtered_df, _q)
108
- if len(temp_filtered_df) > 0:
109
- final_df.append(temp_filtered_df)
110
- if len(final_df) > 0:
111
- filtered_df = pd.concat(final_df)
112
- filtered_df = filtered_df.drop_duplicates(
113
- subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
114
- )
115
-
116
- return filtered_df
117
-
118
-
119
- def filter_models(
120
- df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
121
- ) -> pd.DataFrame:
122
- # Show all models
123
- if show_deleted:
124
- filtered_df = df
125
- else: # Show only still on the hub models
126
- filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
127
 
128
- type_emoji = [t[0] for t in type_query]
129
- filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
130
- filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
131
-
132
- numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
133
- params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
134
- mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
135
- filtered_df = filtered_df.loc[mask]
136
-
137
- return filtered_df
138
-
139
-
140
- demo = gr.Blocks(css=custom_css)
141
  with demo:
142
- gr.HTML(TITLE)
143
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
144
-
145
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
146
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
147
- with gr.Row():
148
- with gr.Column():
149
- with gr.Row():
150
- search_bar = gr.Textbox(
151
- placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
152
- show_label=False,
153
- elem_id="search-bar",
154
- )
155
- with gr.Row():
156
- shown_columns = gr.CheckboxGroup(
157
- choices=[
158
- c.name
159
- for c in fields(AutoEvalColumn)
160
- if not c.hidden and not c.never_hidden and not c.dummy
161
- ],
162
- value=[
163
- c.name
164
- for c in fields(AutoEvalColumn)
165
- if c.displayed_by_default and not c.hidden and not c.never_hidden
166
- ],
167
- label="Select columns to show",
168
- elem_id="column-select",
169
- interactive=True,
170
- )
171
- with gr.Row():
172
- deleted_models_visibility = gr.Checkbox(
173
- value=False, label="Show gated/private/deleted models", interactive=True
174
- )
175
- with gr.Column(min_width=320):
176
- #with gr.Box(elem_id="box-filter"):
177
- filter_columns_type = gr.CheckboxGroup(
178
- label="Model types",
179
- choices=[t.to_str() for t in ModelType],
180
- value=[t.to_str() for t in ModelType],
181
- interactive=True,
182
- elem_id="filter-columns-type",
183
- )
184
- filter_columns_precision = gr.CheckboxGroup(
185
- label="Precision",
186
- choices=[i.value.name for i in Precision],
187
- value=[i.value.name for i in Precision],
188
- interactive=True,
189
- elem_id="filter-columns-precision",
190
- )
191
- filter_columns_size = gr.CheckboxGroup(
192
- label="Model sizes (in billions of parameters)",
193
- choices=list(NUMERIC_INTERVALS.keys()),
194
- value=list(NUMERIC_INTERVALS.keys()),
195
- interactive=True,
196
- elem_id="filter-columns-size",
197
- )
198
-
199
- leaderboard_table = gr.components.Dataframe(
200
- value=leaderboard_df[
201
- [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
202
- + shown_columns.value
203
- + [AutoEvalColumn.dummy.name]
204
- ],
205
- headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
206
- datatype=TYPES,
207
- elem_id="leaderboard-table",
208
- interactive=False,
209
- visible=True,
210
- column_widths=["2%", "33%"]
211
- )
212
-
213
- # Dummy leaderboard for handling the case when the user uses backspace key
214
- hidden_leaderboard_table_for_search = gr.components.Dataframe(
215
- value=original_df[COLS],
216
- headers=COLS,
217
- datatype=TYPES,
218
- visible=False,
219
- )
220
- search_bar.submit(
221
- update_table,
222
- [
223
- hidden_leaderboard_table_for_search,
224
- shown_columns,
225
- filter_columns_type,
226
- filter_columns_precision,
227
- filter_columns_size,
228
- deleted_models_visibility,
229
- search_bar,
230
- ],
231
- leaderboard_table,
232
- )
233
- for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
234
- selector.change(
235
- update_table,
236
- [
237
- hidden_leaderboard_table_for_search,
238
- shown_columns,
239
- filter_columns_type,
240
- filter_columns_precision,
241
- filter_columns_size,
242
- deleted_models_visibility,
243
- search_bar,
244
- ],
245
- leaderboard_table,
246
- queue=True,
247
- )
248
-
249
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
250
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
251
-
252
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
253
- with gr.Column():
254
- with gr.Row():
255
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
256
-
257
- with gr.Column():
258
- with gr.Accordion(
259
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
260
- open=False,
261
- ):
262
- with gr.Row():
263
- finished_eval_table = gr.components.Dataframe(
264
- value=finished_eval_queue_df,
265
- headers=EVAL_COLS,
266
- datatype=EVAL_TYPES,
267
- row_count=5,
268
- )
269
- with gr.Accordion(
270
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
271
- open=False,
272
- ):
273
- with gr.Row():
274
- running_eval_table = gr.components.Dataframe(
275
- value=running_eval_queue_df,
276
- headers=EVAL_COLS,
277
- datatype=EVAL_TYPES,
278
- row_count=5,
279
- )
280
-
281
- with gr.Accordion(
282
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
283
- open=False,
284
- ):
285
- with gr.Row():
286
- pending_eval_table = gr.components.Dataframe(
287
- value=pending_eval_queue_df,
288
- headers=EVAL_COLS,
289
- datatype=EVAL_TYPES,
290
- row_count=5,
291
- )
292
- with gr.Row():
293
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
294
-
295
- with gr.Row():
296
- with gr.Column():
297
- model_name_textbox = gr.Textbox(label="Model name")
298
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
299
- model_type = gr.Dropdown(
300
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
301
- label="Model type",
302
- multiselect=False,
303
- value=None,
304
- interactive=True,
305
- )
306
-
307
- with gr.Column():
308
- precision = gr.Dropdown(
309
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
310
- label="Precision",
311
- multiselect=False,
312
- value="float16" if DEVICE != "cpu" else "float32",
313
- interactive=True,
314
- )
315
- weight_type = gr.Dropdown(
316
- choices=[i.value.name for i in WeightType],
317
- label="Weights type",
318
- multiselect=False,
319
- value="Original",
320
- interactive=True,
321
- )
322
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
323
-
324
- submit_button = gr.Button("Submit Eval")
325
- submission_result = gr.Markdown()
326
- submit_button.click(
327
- add_new_eval,
328
- [
329
- model_name_textbox,
330
- base_model_name_textbox,
331
- revision_name_textbox,
332
- precision,
333
- weight_type,
334
- model_type,
335
- ],
336
- submission_result,
337
- )
338
-
339
- with gr.Row():
340
- with gr.Accordion("📙 Citation", open=False):
341
- citation_button = gr.Textbox(
342
- value=CITATION_BUTTON_TEXT,
343
- label=CITATION_BUTTON_LABEL,
344
- lines=20,
345
- elem_id="citation-button",
346
- show_copy_button=True,
347
- )
348
-
349
  scheduler = BackgroundScheduler()
350
- scheduler.add_job(restart_space, "interval", seconds=1800)
351
- scheduler.add_job(launch_backend, "interval", seconds=100) # will only allow one job to be run at the same time
352
  scheduler.start()
353
  demo.queue(default_concurrency_limit=40).launch()
 
1
+ import sys
2
+ import logging
3
  import subprocess
4
  import gradio as gr
 
5
  from apscheduler.schedulers.background import BackgroundScheduler
 
6
 
7
+ logging.basicConfig(level=logging.ERROR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ from src.logging import LOGGER, read_logs
10
 
11
+ sys.stdout = LOGGER
12
+ sys.stderr = LOGGER
13
 
14
+ subprocess.run(["python", "scripts/fix_harness_import.py"])
 
15
 
16
  def launch_backend():
17
+ _ = subprocess.run(["python", "main_backend_lighteval.py"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ demo = gr.Blocks()
 
 
 
 
 
 
 
 
 
 
 
 
20
  with demo:
21
+ logs = gr.Code(interactive=False)
22
+ demo.load(read_logs, None, logs, every=1)
23
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  scheduler = BackgroundScheduler()
25
+ scheduler.add_job(launch_backend, "interval", seconds=60) # will only allow one job to be run at the same time
 
26
  scheduler.start()
27
  demo.queue(default_concurrency_limit=40).launch()
custom_tasks.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: F405, F403, F401
2
+ """
3
+ Custom evaluation tasks for lighteval. Copy this file and complete it with the info for your task.
4
+
5
+ This file generally create just a TASKS_TABLE and TASKS_GROUPS which are then imported by LightEval.
6
+
7
+ Author:
8
+ """
9
+ from lighteval.tasks.lighteval_task import LightevalTaskConfig
10
+ from lighteval.tasks.requests import Doc
11
+ from lighteval.tasks.tasks_prompt_formatting import LETTER_INDICES
12
+
13
+
14
+ ## EVAL WITH NO SUBSET ##
15
+ # This is how you create a simple tasks (like hellaswag) which has one single subset
16
+ # attached to it, and one evaluation possible.
17
+ task = LightevalTaskConfig(
18
+ name="myothertask",
19
+ prompt_function="prompt_fn", # must be defined in the file or imported from src/lighteval/tasks/tasks_prompt_formatting.py
20
+ suite=["community"],
21
+ hf_repo="",
22
+ hf_subset="default",
23
+ hf_avail_splits=[],
24
+ evaluation_splits=[],
25
+ few_shots_split="",
26
+ few_shots_select="",
27
+ metric=[""],
28
+ )
29
+
30
+ ## EVALS WITH SUBSET
31
+ # This is how you create a subset task (like MMLU), which has several subset
32
+ # each being its own evaluation task.
33
+
34
+ # fmt: off
35
+ SAMPLE_SUBSETS = [] # list of all the subsets to use for this eval
36
+ # fmt: on
37
+
38
+
39
+ class CustomSubsetTask(LightevalTaskConfig):
40
+ def __init__(
41
+ self,
42
+ name,
43
+ hf_subset,
44
+ ):
45
+ super().__init__(
46
+ name=name,
47
+ hf_subset=hf_subset,
48
+ prompt_function="prompt_fn", # must be defined in the file
49
+ hf_repo="",
50
+ metric=[""],
51
+ hf_avail_splits=[],
52
+ evaluation_splits=[],
53
+ few_shots_split="",
54
+ few_shots_select="",
55
+ suite=["community"],
56
+ generation_size=-1,
57
+ stop_sequence=None,
58
+ output_regex=None,
59
+ frozen=False,
60
+ )
61
+
62
+
63
+ ## DEFINE YOUR PROMPT FUNCTIONS
64
+ # Define as many as you need for your different tasks
65
+ def prompt_fn(line, task_name: str = None):
66
+ """Defines how to go from a dataset line to a doc object.
67
+ Follow examples in src/lighteval/tasks/tasks_prompt_formatting.py, or get more info
68
+ about what this function should do in the README.
69
+ """
70
+ return Doc(
71
+ task_name=task_name,
72
+ query="",
73
+ choices="",
74
+ gold_index=0,
75
+ instruction="",
76
+ )
77
+
78
+
79
+ ## STORE YOUR EVALS
80
+ SUBSET_TASKS = [CustomSubsetTask(name=f"mytask:{subset}", hf_subset=subset) for subset in SAMPLE_SUBSETS]
81
+ _TASKS = SUBSET_TASKS + [task]
82
+
83
+ ## MODULE LOGIC
84
+ # You should not need to touch this
85
+ # Convert to dict for lighteval
86
+ TASKS_TABLE = [task.as_dict() for task in _TASKS]
87
+
88
+ if __name__ == "__main__":
89
+ print(t["name"] for t in TASKS_TABLE)
90
+ print(len(TASKS_TABLE))
main_backend.py → main_backend_harness.py RENAMED
@@ -5,7 +5,7 @@ from huggingface_hub import snapshot_download
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
- from src.backend.run_eval_suite import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
 
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
+ from backend.run_eval_suite_harness import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
main_backend_lighteval.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pprint
3
+
4
+ from huggingface_hub import snapshot_download
5
+
6
+ logging.getLogger("openai").setLevel(logging.WARNING)
7
+
8
+ from backend.run_eval_suite_lighteval import run_evaluation
9
+ from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
+ from src.backend.sort_queue import sort_models_by_priority
11
+
12
+ from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, API, LIMIT, TOKEN, ACCELERATOR, VENDOR, REGION
13
+ from src.about import TASKS_LIGHTEVAL
14
+
15
+ logging.basicConfig(level=logging.ERROR)
16
+ pp = pprint.PrettyPrinter(width=80)
17
+
18
+ PENDING_STATUS = "PENDING"
19
+ RUNNING_STATUS = "RUNNING"
20
+ FINISHED_STATUS = "FINISHED"
21
+ FAILED_STATUS = "FAILED"
22
+
23
+ snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
24
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
25
+
26
+ def run_auto_eval():
27
+ current_pending_status = [PENDING_STATUS]
28
+
29
+ # pull the eval dataset from the hub and parse any eval requests
30
+ # check completed evals and set them to finished
31
+ check_completed_evals(
32
+ api=API,
33
+ checked_status=RUNNING_STATUS,
34
+ completed_status=FINISHED_STATUS,
35
+ failed_status=FAILED_STATUS,
36
+ hf_repo=QUEUE_REPO,
37
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
38
+ hf_repo_results=RESULTS_REPO,
39
+ local_dir_results=EVAL_RESULTS_PATH_BACKEND
40
+ )
41
+
42
+ # Get all eval request that are PENDING, if you want to run other evals, change this parameter
43
+ eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
44
+ # Sort the evals by priority (first submitted first run)
45
+ eval_requests = sort_models_by_priority(api=API, models=eval_requests)
46
+
47
+ print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
48
+
49
+ if len(eval_requests) == 0:
50
+ return
51
+
52
+ eval_request = eval_requests[0]
53
+ pp.pprint(eval_request)
54
+
55
+ set_eval_request(
56
+ api=API,
57
+ eval_request=eval_request,
58
+ set_to_status=RUNNING_STATUS,
59
+ hf_repo=QUEUE_REPO,
60
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
61
+ )
62
+
63
+ # This needs to be done
64
+ instance_size, instance_type = get_instance_for_model(eval_request)
65
+
66
+ run_evaluation(
67
+ eval_request=eval_request,
68
+ task_names=TASKS_LIGHTEVAL,
69
+ local_dir=EVAL_RESULTS_PATH_BACKEND,
70
+ batch_size=1,
71
+ accelerator=ACCELERATOR,
72
+ region=REGION,
73
+ vendor=VENDOR,
74
+ instance_size=instance_size,
75
+ instance_type=instance_type,
76
+ limit=LIMIT
77
+ )
78
+
79
+
80
+ if __name__ == "__main__":
81
+ run_auto_eval()
requirements.txt CHANGED
@@ -14,5 +14,6 @@ tqdm==4.65.0
14
  transformers==4.35.2
15
  tokenizers>=0.15.0
16
  git+https://github.com/EleutherAI/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
 
17
  accelerate==0.24.1
18
  sentencepiece
 
14
  transformers==4.35.2
15
  tokenizers>=0.15.0
16
  git+https://github.com/EleutherAI/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
17
+ git+https://github.com/huggingface/lighteval.git#egg=lighteval
18
  accelerate==0.24.1
19
  sentencepiece
src/about.py CHANGED
@@ -8,7 +8,7 @@ class Task:
8
  col_name: str
9
 
10
 
11
- # Select your tasks here
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
@@ -16,57 +16,9 @@ class Tasks(Enum):
16
  task1 = Task("logiqa", "acc_norm", "LogiQA")
17
 
18
  NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
-
21
-
22
-
23
- # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
-
26
- # What does your leaderboard evaluate?
27
- INTRODUCTION_TEXT = """
28
- Intro text
29
- """
30
-
31
- # Which evaluations are you running? how can people reproduce what you have?
32
- LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
-
38
- """
39
 
40
- EVALUATION_QUEUE_TEXT = """
41
- ## Some good practices before submitting a model
42
-
43
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
- ```python
45
- from transformers import AutoConfig, AutoModel, AutoTokenizer
46
- config = AutoConfig.from_pretrained("your model name", revision=revision)
47
- model = AutoModel.from_pretrained("your model name", revision=revision)
48
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
- ```
50
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
-
52
- Note: make sure your model is public!
53
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
-
55
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
-
58
- ### 3) Make sure your model has an open license!
59
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
-
61
- ### 4) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
-
64
- ## In case of model failure
65
- If your model is displayed in the `FAILED` category, its execution stopped.
66
- Make sure you have followed the above steps first.
67
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
- """
69
 
70
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
- CITATION_BUTTON_TEXT = r"""
72
- """
 
8
  col_name: str
9
 
10
 
11
+ # Change for your tasks here
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
 
16
  task1 = Task("logiqa", "acc_norm", "LogiQA")
17
 
18
  NUM_FEWSHOT = 0 # Change with your few shot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ TASKS_HARNESS = [task.value.benchmark for task in Tasks]
21
+ # ---------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ TASKS_LIGHTEVAL = "lighteval|anli:r1|0|0,lighteval|logiqa|0|0"
24
+ #custom|myothertask|0|0
 
src/backend/{run_eval_suite.py → run_eval_suite_harness.py} RENAMED
File without changes
src/backend/run_eval_suite_lighteval.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import logging
4
+ from datetime import datetime
5
+
6
+ from lighteval.main_accelerate import main
7
+
8
+ from src.envs import RESULTS_REPO, CACHE_PATH
9
+ from src.backend.manage_requests import EvalRequest
10
+
11
+ logging.getLogger("openai").setLevel(logging.WARNING)
12
+
13
+ def run_evaluation(eval_request: EvalRequest, task_names: str, batch_size: int, local_dir: str, accelerator: str, region: str, vendor: str, instance_size: str, instance_type: str, limit=None):
14
+ if limit:
15
+ print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
16
+
17
+ results = main(
18
+ endpoint_model_name=f"{eval_request.model}_{eval_request.precision}".lower(),
19
+ accelerator= accelerator,
20
+ vendor= vendor,
21
+ region= region,
22
+ instance_size= instance_size,
23
+ instance_type= instance_type,
24
+ max_samples= limit,
25
+ job_id= str(datetime.now()),
26
+ push_results_to_hub= True,
27
+ save_details= True,
28
+ push_details_to_hub= True,
29
+ public_run= False,
30
+ cache_dir= CACHE_PATH,
31
+ results_org= RESULTS_REPO,
32
+ output_dir= local_dir,
33
+ override_batch_size= batch_size,
34
+ custom_tasks= "custom_tasks.py",
35
+ tasks= task_names
36
+ )
37
+
38
+ results["config"]["model_dtype"] = eval_request.precision
39
+ results["config"]["model_name"] = eval_request.model
40
+ results["config"]["model_sha"] = eval_request.revision
41
+
42
+ dumped = json.dumps(results, indent=2)
43
+ print(dumped)
44
+
45
+ return results
src/display/css_html_js.py DELETED
@@ -1,111 +0,0 @@
1
- custom_css = """
2
-
3
- .markdown-text {
4
- font-size: 16px !important;
5
- }
6
-
7
- #models-to-add-text {
8
- font-size: 18px !important;
9
- }
10
-
11
- #citation-button span {
12
- font-size: 16px !important;
13
- }
14
-
15
- #citation-button textarea {
16
- font-size: 16px !important;
17
- }
18
-
19
- #citation-button > label > button {
20
- margin: 6px;
21
- transform: scale(1.3);
22
- }
23
-
24
- #leaderboard-table {
25
- margin-top: 15px
26
- }
27
-
28
- #leaderboard-table-lite {
29
- margin-top: 15px
30
- }
31
-
32
- #search-bar-table-box > div:first-child {
33
- background: none;
34
- border: none;
35
- }
36
-
37
- #search-bar {
38
- padding: 0px;
39
- }
40
-
41
- /* Hides the final AutoEvalColumn */
42
- #llm-benchmark-tab-table table td:last-child,
43
- #llm-benchmark-tab-table table th:last-child {
44
- display: none;
45
- }
46
-
47
- /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
48
- table td:first-child,
49
- table th:first-child {
50
- max-width: 400px;
51
- overflow: auto;
52
- white-space: nowrap;
53
- }
54
-
55
- .tab-buttons button {
56
- font-size: 20px;
57
- }
58
-
59
- #scale-logo {
60
- border-style: none !important;
61
- box-shadow: none;
62
- display: block;
63
- margin-left: auto;
64
- margin-right: auto;
65
- max-width: 600px;
66
- }
67
-
68
- #scale-logo .download {
69
- display: none;
70
- }
71
- #filter_type{
72
- border: 0;
73
- padding-left: 0;
74
- padding-top: 0;
75
- }
76
- #filter_type label {
77
- display: flex;
78
- }
79
- #filter_type label > span{
80
- margin-top: var(--spacing-lg);
81
- margin-right: 0.5em;
82
- }
83
- #filter_type label > .wrap{
84
- width: 103px;
85
- }
86
- #filter_type label > .wrap .wrap-inner{
87
- padding: 2px;
88
- }
89
- #filter_type label > .wrap .wrap-inner input{
90
- width: 1px
91
- }
92
- #filter-columns-type{
93
- border:0;
94
- padding:0.5;
95
- }
96
- #filter-columns-size{
97
- border:0;
98
- padding:0.5;
99
- }
100
- #box-filter > .form{
101
- border: 0
102
- }
103
- """
104
-
105
- get_window_url_params = """
106
- function(url_params) {
107
- const params = new URLSearchParams(window.location.search);
108
- url_params = Object.fromEntries(params);
109
- return url_params;
110
- }
111
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/formatting.py DELETED
@@ -1,27 +0,0 @@
1
- def model_hyperlink(link, model_name):
2
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
-
4
-
5
- def make_clickable_model(model_name):
6
- link = f"https://huggingface.co/{model_name}"
7
- return model_hyperlink(link, model_name)
8
-
9
-
10
- def styled_error(error):
11
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
-
13
-
14
- def styled_warning(warn):
15
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
-
17
-
18
- def styled_message(message):
19
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
-
21
-
22
- def has_no_nan_values(df, columns):
23
- return df[columns].notna().all(axis=1)
24
-
25
-
26
- def has_nan_values(df, columns):
27
- return df[columns].isna().any(axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/utils.py DELETED
@@ -1,138 +0,0 @@
1
- from dataclasses import dataclass, make_dataclass
2
- from enum import Enum
3
-
4
- import pandas as pd
5
-
6
- from src.about import Tasks
7
-
8
- def fields(raw_class):
9
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
-
11
-
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
- @dataclass
16
- class ColumnContent:
17
- name: str
18
- type: str
19
- displayed_by_default: bool
20
- hidden: bool = False
21
- never_hidden: bool = False
22
- dummy: bool = False
23
-
24
- ## Leaderboard columns
25
- auto_eval_column_dict = []
26
- # Init
27
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
28
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
29
- #Scores
30
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
31
- for task in Tasks:
32
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
33
- # Model information
34
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
35
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
36
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
37
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
38
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
39
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
- # Dummy column for the search bar (hidden by the custom CSS)
44
- auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
45
-
46
- # We use make dataclass to dynamically fill the scores from Tasks
47
- AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
48
-
49
- ## For the queue columns in the submission tab
50
- @dataclass(frozen=True)
51
- class EvalQueueColumn: # Queue column
52
- model = ColumnContent("model", "markdown", True)
53
- revision = ColumnContent("revision", "str", True)
54
- private = ColumnContent("private", "bool", True)
55
- precision = ColumnContent("precision", "str", True)
56
- weight_type = ColumnContent("weight_type", "str", "Original")
57
- status = ColumnContent("status", "str", True)
58
-
59
- ## All the model information that we might need
60
- @dataclass
61
- class ModelDetails:
62
- name: str
63
- display_name: str = ""
64
- symbol: str = "" # emoji
65
-
66
-
67
- class ModelType(Enum):
68
- PT = ModelDetails(name="pretrained", symbol="🟢")
69
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
70
- IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
71
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
72
- Unknown = ModelDetails(name="", symbol="?")
73
-
74
- def to_str(self, separator=" "):
75
- return f"{self.value.symbol}{separator}{self.value.name}"
76
-
77
- @staticmethod
78
- def from_str(type):
79
- if "fine-tuned" in type or "🔶" in type:
80
- return ModelType.FT
81
- if "pretrained" in type or "🟢" in type:
82
- return ModelType.PT
83
- if "RL-tuned" in type or "🟦" in type:
84
- return ModelType.RL
85
- if "instruction-tuned" in type or "⭕" in type:
86
- return ModelType.IFT
87
- return ModelType.Unknown
88
-
89
- class WeightType(Enum):
90
- Adapter = ModelDetails("Adapter")
91
- Original = ModelDetails("Original")
92
- Delta = ModelDetails("Delta")
93
-
94
- class Precision(Enum):
95
- float16 = ModelDetails("float16")
96
- bfloat16 = ModelDetails("bfloat16")
97
- float32 = ModelDetails("float32")
98
- #qt_8bit = ModelDetails("8bit")
99
- #qt_4bit = ModelDetails("4bit")
100
- #qt_GPTQ = ModelDetails("GPTQ")
101
- Unknown = ModelDetails("?")
102
-
103
- def from_str(precision):
104
- if precision in ["torch.float16", "float16"]:
105
- return Precision.float16
106
- if precision in ["torch.bfloat16", "bfloat16"]:
107
- return Precision.bfloat16
108
- if precision in ["float32"]:
109
- return Precision.float32
110
- #if precision in ["8bit"]:
111
- # return Precision.qt_8bit
112
- #if precision in ["4bit"]:
113
- # return Precision.qt_4bit
114
- #if precision in ["GPTQ", "None"]:
115
- # return Precision.qt_GPTQ
116
- return Precision.Unknown
117
-
118
- # Column selection
119
- COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
120
- TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
121
- COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
122
- TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
123
-
124
- EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
125
- EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
126
-
127
- BENCHMARK_COLS = [t.value.col_name for t in Tasks]
128
-
129
- NUMERIC_INTERVALS = {
130
- "?": pd.Interval(-1, 0, closed="right"),
131
- "~1.5": pd.Interval(0, 2, closed="right"),
132
- "~3": pd.Interval(2, 4, closed="right"),
133
- "~7": pd.Interval(4, 9, closed="right"),
134
- "~13": pd.Interval(9, 20, closed="right"),
135
- "~35": pd.Interval(20, 45, closed="right"),
136
- "~60": pd.Interval(45, 70, closed="right"),
137
- "70+": pd.Interval(70, 10000, closed="right"),
138
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/envs.py CHANGED
@@ -7,11 +7,18 @@ from huggingface_hub import HfApi
7
  TOKEN = os.environ.get("TOKEN") # A read/write token for your org
8
 
9
  OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request file
10
- DEVICE = "cpu" # "cuda:0" if you add compute
 
 
11
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
 
 
 
 
 
12
  # ----------------------------------
13
 
14
- REPO_ID = f"{OWNER}/leaderboard"
15
  QUEUE_REPO = f"{OWNER}/requests"
16
  RESULTS_REPO = f"{OWNER}/results"
17
 
@@ -25,3 +32,4 @@ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
25
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
26
 
27
  API = HfApi(token=TOKEN)
 
 
7
  TOKEN = os.environ.get("TOKEN") # A read/write token for your org
8
 
9
  OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request file
10
+
11
+ # For harness evaluations
12
+ DEVICE = "cpu" # "cuda:0" if you add compute, for harness evaluations
13
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
14
+
15
+ # For lighteval evaluations
16
+ ACCELERATOR = ""
17
+ REGION = ""
18
+ VENDOR = ""
19
  # ----------------------------------
20
 
21
+ REPO_ID = f"{OWNER}/leaderboard-backend"
22
  QUEUE_REPO = f"{OWNER}/requests"
23
  RESULTS_REPO = f"{OWNER}/results"
24
 
 
32
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
33
 
34
  API = HfApi(token=TOKEN)
35
+
src/leaderboard/read_evals.py DELETED
@@ -1,195 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- eval_name: str # org_model_precision (uid)
18
- full_model: str # org/model (path on hub)
19
- org: str
20
- model: str
21
- revision: str # commit hash, "" if main
22
- results: dict
23
- precision: Precision = Precision.Unknown
24
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
25
- weight_type: WeightType = WeightType.Original # Original or Adapter
26
- architecture: str = "Unknown"
27
- license: str = "?"
28
- likes: int = 0
29
- num_params: int = 0
30
- date: str = "" # submission date of request file
31
- still_on_hub: bool = False
32
-
33
- @classmethod
34
- def init_from_json_file(self, json_filepath):
35
- """Inits the result from the specific model result file"""
36
- with open(json_filepath) as fp:
37
- data = json.load(fp)
38
-
39
- config = data.get("config")
40
-
41
- # Precision
42
- precision = Precision.from_str(config.get("model_dtype"))
43
-
44
- # Get model and org
45
- org_and_model = config.get("model_name", config.get("model_args", None))
46
- org_and_model = org_and_model.split("/", 1)
47
-
48
- if len(org_and_model) == 1:
49
- org = None
50
- model = org_and_model[0]
51
- result_key = f"{model}_{precision.value.name}"
52
- else:
53
- org = org_and_model[0]
54
- model = org_and_model[1]
55
- result_key = f"{org}_{model}_{precision.value.name}"
56
- full_model = "/".join(org_and_model)
57
-
58
- still_on_hub, _, model_config = is_model_on_hub(
59
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
60
- )
61
- architecture = "?"
62
- if model_config is not None:
63
- architectures = getattr(model_config, "architectures", None)
64
- if architectures:
65
- architecture = ";".join(architectures)
66
-
67
- # Extract results available in this file (some results are split in several files)
68
- results = {}
69
- for task in Tasks:
70
- task = task.value
71
-
72
- # We average all scores of a given metric (not all metrics are present in all files)
73
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
74
- if accs.size == 0 or any([acc is None for acc in accs]):
75
- continue
76
-
77
- mean_acc = np.mean(accs) * 100.0
78
- results[task.benchmark] = mean_acc
79
-
80
- return self(
81
- eval_name=result_key,
82
- full_model=full_model,
83
- org=org,
84
- model=model,
85
- results=results,
86
- precision=precision,
87
- revision= config.get("model_sha", ""),
88
- still_on_hub=still_on_hub,
89
- architecture=architecture
90
- )
91
-
92
- def update_with_request_file(self, requests_path):
93
- """Finds the relevant request file for the current model and updates info with it"""
94
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
95
-
96
- try:
97
- with open(request_file, "r") as f:
98
- request = json.load(f)
99
- self.model_type = ModelType.from_str(request.get("model_type", ""))
100
- self.weight_type = WeightType[request.get("weight_type", "Original")]
101
- self.license = request.get("license", "?")
102
- self.likes = request.get("likes", 0)
103
- self.num_params = request.get("params", 0)
104
- self.date = request.get("submitted_time", "")
105
- except Exception:
106
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
107
-
108
- def to_dict(self):
109
- """Converts the Eval Result to a dict compatible with our dataframe display"""
110
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
111
- data_dict = {
112
- "eval_name": self.eval_name, # not a column, just a save name,
113
- AutoEvalColumn.precision.name: self.precision.value.name,
114
- AutoEvalColumn.model_type.name: self.model_type.value.name,
115
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
116
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
117
- AutoEvalColumn.architecture.name: self.architecture,
118
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
119
- AutoEvalColumn.dummy.name: self.full_model,
120
- AutoEvalColumn.revision.name: self.revision,
121
- AutoEvalColumn.average.name: average,
122
- AutoEvalColumn.license.name: self.license,
123
- AutoEvalColumn.likes.name: self.likes,
124
- AutoEvalColumn.params.name: self.num_params,
125
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
126
- }
127
-
128
- for task in Tasks:
129
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
130
-
131
- return data_dict
132
-
133
-
134
- def get_request_file_for_model(requests_path, model_name, precision):
135
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
136
- request_files = os.path.join(
137
- requests_path,
138
- f"{model_name}_eval_request_*.json",
139
- )
140
- request_files = glob.glob(request_files)
141
-
142
- # Select correct request file (precision)
143
- request_file = ""
144
- request_files = sorted(request_files, reverse=True)
145
- for tmp_request_file in request_files:
146
- with open(tmp_request_file, "r") as f:
147
- req_content = json.load(f)
148
- if (
149
- req_content["status"] in ["FINISHED"]
150
- and req_content["precision"] == precision.split(".")[-1]
151
- ):
152
- request_file = tmp_request_file
153
- return request_file
154
-
155
-
156
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
157
- """From the path of the results folder root, extract all needed info for results"""
158
- model_result_filepaths = []
159
-
160
- for root, _, files in os.walk(results_path):
161
- # We should only have json files in model results
162
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
163
- continue
164
-
165
- # Sort the files by date
166
- try:
167
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
168
- except dateutil.parser._parser.ParserError:
169
- files = [files[-1]]
170
-
171
- for file in files:
172
- model_result_filepaths.append(os.path.join(root, file))
173
-
174
- eval_results = {}
175
- for model_result_filepath in model_result_filepaths:
176
- # Creation of result
177
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
178
- eval_result.update_with_request_file(requests_path)
179
-
180
- # Store results of same eval together
181
- eval_name = eval_result.eval_name
182
- if eval_name in eval_results.keys():
183
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
184
- else:
185
- eval_results[eval_name] = eval_result
186
-
187
- results = []
188
- for v in eval_results.values():
189
- try:
190
- v.to_dict() # we test if the dict version is complete
191
- results.append(v)
192
- except KeyError: # not all eval values present
193
- continue
194
-
195
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/check_validity.py DELETED
@@ -1,97 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- try:
36
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
37
- if test_tokenizer:
38
- try:
39
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
40
- except ValueError as e:
41
- return (
42
- False,
43
- f"uses a tokenizer which is not in a transformers release: {e}",
44
- None
45
- )
46
- except Exception as e:
47
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
48
- return True, None, config
49
-
50
- except ValueError:
51
- return (
52
- False,
53
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
54
- None
55
- )
56
-
57
- except Exception as e:
58
- return False, "was not found on hub!", None
59
-
60
-
61
- def get_model_size(model_info: ModelInfo, precision: str):
62
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
63
- try:
64
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
65
- except (AttributeError, TypeError):
66
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
67
-
68
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
69
- model_size = size_factor * model_size
70
- return model_size
71
-
72
- def get_model_arch(model_info: ModelInfo):
73
- """Gets the model architecture from the configuration"""
74
- return model_info.config.get("architectures", "Unknown")
75
-
76
- def already_submitted_models(requested_models_dir: str) -> set[str]:
77
- depth = 1
78
- file_names = []
79
- users_to_submission_dates = defaultdict(list)
80
-
81
- for root, _, files in os.walk(requested_models_dir):
82
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
83
- if current_depth == depth:
84
- for file in files:
85
- if not file.endswith(".json"):
86
- continue
87
- with open(os.path.join(root, file), "r") as f:
88
- info = json.load(f)
89
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
90
-
91
- # Select organisation
92
- if info["model"].count("/") == 0 or "submitted_time" not in info:
93
- continue
94
- organisation, _ = info["model"].split("/")
95
- users_to_submission_dates[organisation].append(info["submitted_time"])
96
-
97
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,119 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )