Omid Ghahroodi commited on
Commit
17fcfcd
1 Parent(s): e7748c5

Update demo

Browse files
app.py CHANGED
@@ -248,87 +248,87 @@ with demo:
248
  with gr.Row():
249
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
250
 
251
- with gr.Column():
252
- with gr.Accordion(
253
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
254
- open=False,
255
- ):
256
- with gr.Row():
257
- finished_eval_table = gr.components.Dataframe(
258
- value=finished_eval_queue_df,
259
- headers=EVAL_COLS,
260
- datatype=EVAL_TYPES,
261
- row_count=5,
262
- )
263
- with gr.Accordion(
264
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
265
- open=False,
266
- ):
267
- with gr.Row():
268
- running_eval_table = gr.components.Dataframe(
269
- value=running_eval_queue_df,
270
- headers=EVAL_COLS,
271
- datatype=EVAL_TYPES,
272
- row_count=5,
273
- )
274
-
275
- with gr.Accordion(
276
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
277
- open=False,
278
- ):
279
- with gr.Row():
280
- pending_eval_table = gr.components.Dataframe(
281
- value=pending_eval_queue_df,
282
- headers=EVAL_COLS,
283
- datatype=EVAL_TYPES,
284
- row_count=5,
285
- )
286
- with gr.Row():
287
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
288
-
289
- with gr.Row():
290
- with gr.Column():
291
- model_name_textbox = gr.Textbox(label="Model name")
292
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
293
- model_type = gr.Dropdown(
294
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
295
- label="Model type",
296
- multiselect=False,
297
- value=None,
298
- interactive=True,
299
- )
300
-
301
- with gr.Column():
302
- precision = gr.Dropdown(
303
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
304
- label="Precision",
305
- multiselect=False,
306
- value="float16",
307
- interactive=True,
308
- )
309
- weight_type = gr.Dropdown(
310
- choices=[i.value.name for i in WeightType],
311
- label="Weights type",
312
- multiselect=False,
313
- value="Original",
314
- interactive=True,
315
- )
316
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
317
-
318
- submit_button = gr.Button("Submit Eval")
319
- submission_result = gr.Markdown()
320
- submit_button.click(
321
- add_new_eval,
322
- [
323
- model_name_textbox,
324
- base_model_name_textbox,
325
- revision_name_textbox,
326
- precision,
327
- weight_type,
328
- model_type,
329
- ],
330
- submission_result,
331
- )
332
 
333
  with gr.Row():
334
  with gr.Accordion("📙 Citation", open=False):
 
248
  with gr.Row():
249
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
250
 
251
+ # with gr.Column():
252
+ # with gr.Accordion(
253
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
254
+ # open=False,
255
+ # ):
256
+ # with gr.Row():
257
+ # finished_eval_table = gr.components.Dataframe(
258
+ # value=finished_eval_queue_df,
259
+ # headers=EVAL_COLS,
260
+ # datatype=EVAL_TYPES,
261
+ # row_count=5,
262
+ # )
263
+ # with gr.Accordion(
264
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
265
+ # open=False,
266
+ # ):
267
+ # with gr.Row():
268
+ # running_eval_table = gr.components.Dataframe(
269
+ # value=running_eval_queue_df,
270
+ # headers=EVAL_COLS,
271
+ # datatype=EVAL_TYPES,
272
+ # row_count=5,
273
+ # )
274
+
275
+ # with gr.Accordion(
276
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
277
+ # open=False,
278
+ # ):
279
+ # with gr.Row():
280
+ # pending_eval_table = gr.components.Dataframe(
281
+ # value=pending_eval_queue_df,
282
+ # headers=EVAL_COLS,
283
+ # datatype=EVAL_TYPES,
284
+ # row_count=5,
285
+ # )
286
+ # with gr.Row():
287
+ # gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
288
+
289
+ # with gr.Row():
290
+ # with gr.Column():
291
+ # model_name_textbox = gr.Textbox(label="Model name")
292
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
293
+ # model_type = gr.Dropdown(
294
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
295
+ # label="Model type",
296
+ # multiselect=False,
297
+ # value=None,
298
+ # interactive=True,
299
+ # )
300
+
301
+ # with gr.Column():
302
+ # precision = gr.Dropdown(
303
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
304
+ # label="Precision",
305
+ # multiselect=False,
306
+ # value="float16",
307
+ # interactive=True,
308
+ # )
309
+ # weight_type = gr.Dropdown(
310
+ # choices=[i.value.name for i in WeightType],
311
+ # label="Weights type",
312
+ # multiselect=False,
313
+ # value="Original",
314
+ # interactive=True,
315
+ # )
316
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
317
+
318
+ # submit_button = gr.Button("Submit Eval")
319
+ # submission_result = gr.Markdown()
320
+ # submit_button.click(
321
+ # add_new_eval,
322
+ # [
323
+ # model_name_textbox,
324
+ # base_model_name_textbox,
325
+ # revision_name_textbox,
326
+ # precision,
327
+ # weight_type,
328
+ # model_type,
329
+ # ],
330
+ # submission_result,
331
+ # )
332
 
333
  with gr.Row():
334
  with gr.Accordion("📙 Citation", open=False):
eval-queue/.DS_Store ADDED
Binary file (6.15 kB). View file
 
eval-queue/.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
eval-queue/leaderboard/demo-leaderboard_gpt2-demo_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "demo-leaderboard/gpt2-demo", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2023-11-21T18:10:08Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 0.1, "license": "custom"}
eval-results/.DS_Store ADDED
Binary file (6.15 kB). View file
 
eval-results/.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
eval-results/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: cc-by-nd-4.0
3
+ ---
eval-results/leaderboard/.DS_Store ADDED
Binary file (6.15 kB). View file
 
eval-results/leaderboard/gpt2-demo/demo-leaderboard_gpt2-demo_results_2023-11-21T18-10-08.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_dtype": "torch.float16",
4
+ "model_name": "demo-leaderboard/gpt2-demo",
5
+ "model_sha": "ac3299b02780836378b9e1e68c6eead546e89f90"
6
+ },
7
+ "results": {
8
+ "task_name1": {
9
+ "metric_name": 0
10
+ },
11
+ "task_name2": {
12
+ "metric_name": 0.90
13
+ }
14
+ }
15
+ }
scripts/create_request_file.py CHANGED
@@ -1,107 +1,107 @@
1
- import json
2
- import os
3
- import pprint
4
- import re
5
- from datetime import datetime, timezone
6
-
7
- import click
8
- from colorama import Fore
9
- from huggingface_hub import HfApi, snapshot_download
10
-
11
- EVAL_REQUESTS_PATH = "eval-queue"
12
- QUEUE_REPO = "open-llm-leaderboard/requests"
13
-
14
- precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
15
- model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
16
- weight_types = ("Original", "Delta", "Adapter")
17
-
18
-
19
- def get_model_size(model_info, precision: str):
20
- size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
21
- try:
22
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
23
- except (AttributeError, TypeError):
24
- try:
25
- size_match = re.search(size_pattern, model_info.modelId.lower())
26
- model_size = size_match.group(0)
27
- model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
28
- except AttributeError:
29
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
30
-
31
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
32
- model_size = size_factor * model_size
33
- return model_size
34
-
35
-
36
- def main():
37
- api = HfApi()
38
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
39
- snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
40
-
41
- model_name = click.prompt("Enter model name")
42
- revision = click.prompt("Enter revision", default="main")
43
- precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
44
- model_type = click.prompt("Enter model type", type=click.Choice(model_types))
45
- weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
46
- base_model = click.prompt("Enter base model", default="")
47
- status = click.prompt("Enter status", default="FINISHED")
48
-
49
- try:
50
- model_info = api.model_info(repo_id=model_name, revision=revision)
51
- except Exception as e:
52
- print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
53
- return 1
54
-
55
- model_size = get_model_size(model_info=model_info, precision=precision)
56
-
57
- try:
58
- license = model_info.cardData["license"]
59
- except Exception:
60
- license = "?"
61
-
62
- eval_entry = {
63
- "model": model_name,
64
- "base_model": base_model,
65
- "revision": revision,
66
- "private": False,
67
- "precision": precision,
68
- "weight_type": weight_type,
69
- "status": status,
70
- "submitted_time": current_time,
71
- "model_type": model_type,
72
- "likes": model_info.likes,
73
- "params": model_size,
74
- "license": license,
75
- }
76
-
77
- user_name = ""
78
- model_path = model_name
79
- if "/" in model_name:
80
- user_name = model_name.split("/")[0]
81
- model_path = model_name.split("/")[1]
82
-
83
- pprint.pprint(eval_entry)
84
-
85
- if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
86
- click.echo("continuing...")
87
-
88
- out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
89
- os.makedirs(out_dir, exist_ok=True)
90
- out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
91
-
92
- with open(out_path, "w") as f:
93
- f.write(json.dumps(eval_entry))
94
-
95
- api.upload_file(
96
- path_or_fileobj=out_path,
97
- path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
98
- repo_id=QUEUE_REPO,
99
- repo_type="dataset",
100
- commit_message=f"Add {model_name} to eval queue",
101
- )
102
- else:
103
- click.echo("aborting...")
104
-
105
-
106
- if __name__ == "__main__":
107
- main()
 
1
+ # import json
2
+ # import os
3
+ # import pprint
4
+ # import re
5
+ # from datetime import datetime, timezone
6
+
7
+ # import click
8
+ # from colorama import Fore
9
+ # from huggingface_hub import HfApi, snapshot_download
10
+
11
+ # EVAL_REQUESTS_PATH = "eval-queue"
12
+ # QUEUE_REPO = "open-llm-leaderboard/requests"
13
+
14
+ # precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
15
+ # model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
16
+ # weight_types = ("Original", "Delta", "Adapter")
17
+
18
+
19
+ # def get_model_size(model_info, precision: str):
20
+ # size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
21
+ # try:
22
+ # model_size = round(model_info.safetensors["total"] / 1e9, 3)
23
+ # except (AttributeError, TypeError):
24
+ # try:
25
+ # size_match = re.search(size_pattern, model_info.modelId.lower())
26
+ # model_size = size_match.group(0)
27
+ # model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
28
+ # except AttributeError:
29
+ # return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
30
+
31
+ # size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
32
+ # model_size = size_factor * model_size
33
+ # return model_size
34
+
35
+
36
+ # def main():
37
+ # api = HfApi()
38
+ # current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
39
+ # snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
40
+
41
+ # model_name = click.prompt("Enter model name")
42
+ # revision = click.prompt("Enter revision", default="main")
43
+ # precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
44
+ # model_type = click.prompt("Enter model type", type=click.Choice(model_types))
45
+ # weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
46
+ # base_model = click.prompt("Enter base model", default="")
47
+ # status = click.prompt("Enter status", default="FINISHED")
48
+
49
+ # try:
50
+ # model_info = api.model_info(repo_id=model_name, revision=revision)
51
+ # except Exception as e:
52
+ # print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
53
+ # return 1
54
+
55
+ # model_size = get_model_size(model_info=model_info, precision=precision)
56
+
57
+ # try:
58
+ # license = model_info.cardData["license"]
59
+ # except Exception:
60
+ # license = "?"
61
+
62
+ # eval_entry = {
63
+ # "model": model_name,
64
+ # "base_model": base_model,
65
+ # "revision": revision,
66
+ # "private": False,
67
+ # "precision": precision,
68
+ # "weight_type": weight_type,
69
+ # "status": status,
70
+ # "submitted_time": current_time,
71
+ # "model_type": model_type,
72
+ # "likes": model_info.likes,
73
+ # "params": model_size,
74
+ # "license": license,
75
+ # }
76
+
77
+ # user_name = ""
78
+ # model_path = model_name
79
+ # if "/" in model_name:
80
+ # user_name = model_name.split("/")[0]
81
+ # model_path = model_name.split("/")[1]
82
+
83
+ # pprint.pprint(eval_entry)
84
+
85
+ # if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
86
+ # click.echo("continuing...")
87
+
88
+ # out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
89
+ # os.makedirs(out_dir, exist_ok=True)
90
+ # out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
91
+
92
+ # with open(out_path, "w") as f:
93
+ # f.write(json.dumps(eval_entry))
94
+
95
+ # api.upload_file(
96
+ # path_or_fileobj=out_path,
97
+ # path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
98
+ # repo_id=QUEUE_REPO,
99
+ # repo_type="dataset",
100
+ # commit_message=f"Add {model_name} to eval queue",
101
+ # )
102
+ # else:
103
+ # click.echo("aborting...")
104
+
105
+
106
+ # if __name__ == "__main__":
107
+ # main()
src/__pycache__/envs.cpython-310.pyc ADDED
Binary file (550 Bytes). View file
 
src/__pycache__/populate.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
src/display/__pycache__/about.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
src/display/__pycache__/css_html_js.cpython-310.pyc ADDED
Binary file (2.04 kB). View file
 
src/display/__pycache__/formatting.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
src/display/__pycache__/utils.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
src/display/about.py CHANGED
@@ -11,57 +11,103 @@ class Task:
11
  # Init: to update with your specific keys
12
  class Tasks(Enum):
13
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
- task0 = Task("task_name1", "metric_name", "First task")
15
- task1 = Task("task_name2", "metric_name", "Second task")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  # Your leaderboard name
19
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
20
 
21
  # What does your leaderboard evaluate?
22
- INTRODUCTION_TEXT = """
23
- Intro text
24
- """
25
 
26
  # Which evaluations are you running? how can people reproduce what you have?
27
- LLM_BENCHMARKS_TEXT = f"""
28
- ## How it works
29
 
30
- ## Reproducibility
31
- To reproduce our results, here is the commands you can run:
32
 
33
- """
34
 
35
- EVALUATION_QUEUE_TEXT = """
36
- ## Some good practices before submitting a model
37
 
38
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
39
- ```python
40
- from transformers import AutoConfig, AutoModel, AutoTokenizer
41
- config = AutoConfig.from_pretrained("your model name", revision=revision)
42
- model = AutoModel.from_pretrained("your model name", revision=revision)
43
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
44
- ```
45
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
46
 
47
- Note: make sure your model is public!
48
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
49
 
50
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
51
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
52
 
53
- ### 3) Make sure your model has an open license!
54
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
55
 
56
- ### 4) Fill up your model card
57
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
58
 
59
- ## In case of model failure
60
- If your model is displayed in the `FAILED` category, its execution stopped.
61
- Make sure you have followed the above steps first.
62
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
63
- """
64
 
65
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
66
  CITATION_BUTTON_TEXT = r"""
 
 
 
 
 
 
67
  """
 
11
  # Init: to update with your specific keys
12
  class Tasks(Enum):
13
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
+ # task0 = Task("task_name1", "metric_name", "First task")
15
+ # task1 = Task("task_name2", "metric_name", "Second task")
16
+ Analytical_Talent_LSS = Task("Analytical Talent LSS", "Acc", "Analytical Talent LSS")
17
+ Calculus_USS = Task("Calculus USS", "Acc", "Calculus USS")
18
+ Chemistry_USS = Task("Chemistry USS", "Acc", "Chemistry USS")
19
+ Discrete_Mathematics_USS = Task("Discrete Mathematics USS", "Acc", "Discrete Mathematics USS")
20
+ Economy_USS = Task("Economy USS", "Acc", "Economy USS")
21
+ Geography_USS = Task("Geography USS", "Acc", "Geography USS")
22
+ Geology_USS = Task("Geology USS", "Acc", "Geology USS")
23
+ Geometry_USS = Task("Geometry USS", "Acc", "Geometry USS")
24
+ History_USS = Task("History USS", "Acc", "History USS")
25
+ Logic_USS = Task("Logic USS", "Acc", "Logic USS")
26
+ Mathematical_and_Logical_Intelligence_UPS = Task("Mathematical and Logical Intelligence UPS", "Acc", "Mathematical and Logical Intelligence UPS")
27
+ Mathematics_LPS = Task("Mathematics LPS", "Acc", "Mathematics LPS")
28
+ Mathematics_LSS = Task("Mathematics LSS", "Acc", "Mathematics LSS")
29
+ Mathematics_UPS = Task("Mathematics UPS", "Acc", "Mathematics UPS")
30
+ Mathematics_USS = Task("Mathematics USS", "Acc", "Mathematics USS")
31
+ Mathematics_and_Statistics_USS = Task("Mathematics and Statistics USS", "Acc", "Mathematics and Statistics USS")
32
+ Natural_Sciences_LPS = Task("Natural Sciences LPS", "Acc", "Natural Sciences LPS")
33
+ Natural_Sciences_LSS = Task("Natural Sciences LSS", "Acc", "Natural Sciences LSS")
34
+ Natural_Sciences_UPS = Task("Natural Sciences UPS", "Acc", "Natural Sciences UPS")
35
+ Persian_Literature_LPS = Task("Persian Literature LPS", "Acc", "Persian Literature LPS")
36
+ Persian_Literature_LSS = Task("Persian Literature LSS", "Acc", "Persian Literature LSS")
37
+ Persian_Literature_UPS = Task("Persian Literature UPS", "Acc", "Persian Literature UPS")
38
+ Persian_Literature_USS = Task("Persian Literature USS", "Acc", "Persian Literature USS")
39
+ Philosophy_USS = Task("Philosophy USS", "Acc", "Philosophy USS")
40
+ Physics_USS = Task("Physics USS", "Acc", "Physics USS")
41
+ Probability_and_Statistics_USS = Task("Probability and Statistics USS", "Acc", "Probability and Statistics USS")
42
+ Psychology_USS = Task("Psychology USS", "Acc", "Psychology USS")
43
+ Social_Studies_LPS = Task("Social Studies LPS", "Acc", "Social Studies LPS")
44
+ Social_Studies_LSS = Task("Social Studies LSS", "Acc", "Social Studies LSS")
45
+ Social_Studies_UPS = Task("Social Studies UPS", "Acc", "Social Studies UPS")
46
+ Sociology_USS = Task("Sociology USS", "Acc", "Sociology USS")
47
+ Speed_and_Accuracy_UPS = Task("Speed and Accuracy UPS", "Acc", "Speed and Accuracy UPS")
48
+ Theology_LPS = Task("Theology LPS", "Acc", "Theology LPS")
49
+ Theology_LSS = Task("Theology LSS", "Acc", "Theology LSS")
50
+ Theology_UPS = Task("Theology UPS", "Acc", "Theology UPS")
51
+ Theology_USS = Task("Theology USS", "Acc", "Theology USS")
52
+ Verbal_and_Linguistic_Intelligence_UPS = Task("Verbal and Linguistic Intelligence UPS", "Acc", "Verbal and Linguistic Intelligence UPS")
53
+ Biology_USS = Task("‌Biology USS", "Acc", "‌Biology USS")
54
+ Avg_on_all_tasks = Task("Avg on all tasks", "Acc", "Avg on all tasks")
55
+ Avg_on_all_questions = Task("Avg on all questions", "Acc", "Avg on all questions")
56
 
57
 
58
  # Your leaderboard name
59
+ TITLE = """<h1 align="center" id="space-title">Khayyam Challenge (PersianMMLU)</h1>"""
60
 
61
  # What does your leaderboard evaluate?
62
+ INTRODUCTION_TEXT = """"""
63
+ # Intro text
64
+ # """
65
 
66
  # Which evaluations are you running? how can people reproduce what you have?
67
+ LLM_BENCHMARKS_TEXT = f""""""
68
+ # ## How it works
69
 
70
+ # ## Reproducibility
71
+ # To reproduce our results, here is the commands you can run:
72
 
73
+ # """
74
 
75
+ EVALUATION_QUEUE_TEXT = """In progress"""
76
+ # ## Some good practices before submitting a model
77
 
78
+ # ### 1) Make sure you can load your model and tokenizer using AutoClasses:
79
+ # ```python
80
+ # from transformers import AutoConfig, AutoModel, AutoTokenizer
81
+ # config = AutoConfig.from_pretrained("your model name", revision=revision)
82
+ # model = AutoModel.from_pretrained("your model name", revision=revision)
83
+ # tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
84
+ # ```
85
+ # If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
86
 
87
+ # Note: make sure your model is public!
88
+ # Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
89
 
90
+ # ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
91
+ # It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
92
 
93
+ # ### 3) Make sure your model has an open license!
94
+ # This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
95
 
96
+ # ### 4) Fill up your model card
97
+ # When we add extra information about models to the leaderboard, it will be automatically taken from the model card
98
 
99
+ # ## In case of model failure
100
+ # If your model is displayed in the `FAILED` category, its execution stopped.
101
+ # Make sure you have followed the above steps first.
102
+ # If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
103
+ # """
104
 
105
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
106
  CITATION_BUTTON_TEXT = r"""
107
+ @article{ghahroodi2024khayyam,
108
+ title={Khayyam Challenge (PersianMMLU): Is Your LLM Truly Wise to The Persian Language?},
109
+ author={Ghahroodi, Omid and Nouri, Marzia and Sanian, Mohammad Vali and Sahebi, Alireza and Dastgheib, Doratossadat and Asgari, Ehsaneddin and Baghshah, Mahdieh Soleymani and Rohban, Mohammad Hossein},
110
+ journal={arXiv preprint arXiv:2404.06644},
111
+ year={2024}
112
+ }
113
  """
src/display/utils.py CHANGED
@@ -92,6 +92,7 @@ class WeightType(Enum):
92
  Delta = ModelDetails("Delta")
93
 
94
  class Precision(Enum):
 
95
  float16 = ModelDetails("float16")
96
  bfloat16 = ModelDetails("bfloat16")
97
  qt_8bit = ModelDetails("8bit")
@@ -100,6 +101,8 @@ class Precision(Enum):
100
  Unknown = ModelDetails("?")
101
 
102
  def from_str(precision):
 
 
103
  if precision in ["torch.float16", "float16"]:
104
  return Precision.float16
105
  if precision in ["torch.bfloat16", "bfloat16"]:
 
92
  Delta = ModelDetails("Delta")
93
 
94
  class Precision(Enum):
95
+ float32 = ModelDetails("float32")
96
  float16 = ModelDetails("float16")
97
  bfloat16 = ModelDetails("bfloat16")
98
  qt_8bit = ModelDetails("8bit")
 
101
  Unknown = ModelDetails("?")
102
 
103
  def from_str(precision):
104
+ if precision in ["torch.float32", "float32"]:
105
+ return Precision.float32
106
  if precision in ["torch.float16", "float16"]:
107
  return Precision.float16
108
  if precision in ["torch.bfloat16", "bfloat16"]:
src/leaderboard/__pycache__/read_evals.cpython-310.pyc ADDED
Binary file (6.17 kB). View file
 
src/submission/__pycache__/check_validity.cpython-310.pyc ADDED
Binary file (3.74 kB). View file
 
src/submission/__pycache__/submit.cpython-310.pyc ADDED
Binary file (2.82 kB). View file