Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
FinancialSupport
commited on
Commit
β’
79dfaa0
1
Parent(s):
23c9328
Update src/about.py
Browse files- src/about.py +74 -73
src/about.py
CHANGED
@@ -1,73 +1,74 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
@dataclass
|
5 |
-
class Task:
|
6 |
-
benchmark: str
|
7 |
-
metric: str
|
8 |
-
col_name: str
|
9 |
-
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
-
class Tasks(Enum):
|
14 |
-
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
task0 = Task("mmlu_it", "acc", "MMLU_IT")
|
16 |
-
task1 = Task("arc_it", "acc_norm", "ARC_IT")
|
17 |
-
task2 = Task("hellaswag_it", "acc_norm", "HELLASWAG_IT")
|
18 |
-
|
19 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
20 |
-
# ---------------------------------------------------
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
# Your leaderboard name
|
25 |
-
TITLE = """<h1 align="center" id="space-title">
|
26 |
-
|
27 |
-
# What does your leaderboard evaluate?
|
28 |
-
INTRODUCTION_TEXT = """
|
29 |
-
|
30 |
-
"""
|
31 |
-
|
32 |
-
# Which evaluations are you running? how can people reproduce what you have?
|
33 |
-
LLM_BENCHMARKS_TEXT = f"""
|
34 |
-
##
|
35 |
-
|
36 |
-
## Reproducibility
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
Note:
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
"""
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from enum import Enum
|
3 |
+
|
4 |
+
@dataclass
|
5 |
+
class Task:
|
6 |
+
benchmark: str
|
7 |
+
metric: str
|
8 |
+
col_name: str
|
9 |
+
|
10 |
+
|
11 |
+
# Select your tasks here
|
12 |
+
# ---------------------------------------------------
|
13 |
+
class Tasks(Enum):
|
14 |
+
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
+
task0 = Task("mmlu_it", "acc", "MMLU_IT")
|
16 |
+
task1 = Task("arc_it", "acc_norm", "ARC_IT")
|
17 |
+
task2 = Task("hellaswag_it", "acc_norm", "HELLASWAG_IT")
|
18 |
+
|
19 |
+
NUM_FEWSHOT = 0 # Change with your few shot
|
20 |
+
# ---------------------------------------------------
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
# Your leaderboard name
|
25 |
+
TITLE = """<h1 align="center" id="space-title">Classifica generale degli LLM italiani</h1>"""
|
26 |
+
|
27 |
+
# What does your leaderboard evaluate?
|
28 |
+
INTRODUCTION_TEXT = """
|
29 |
+
La leaderboard misura le performance dei Large Language Models nella lingua italiana
|
30 |
+
"""
|
31 |
+
|
32 |
+
# Which evaluations are you running? how can people reproduce what you have?
|
33 |
+
LLM_BENCHMARKS_TEXT = f"""
|
34 |
+
## Come funziona
|
35 |
+
|
36 |
+
## Reproducibility
|
37 |
+
Per riprodurre I risultati eseguite:
|
38 |
+
lm-eval --model hf --model_args pretrained=<vostro modello> --tasks hellaswag_it,arc_it --device cuda:0 --batch_size auto:2;
|
39 |
+
lm-eval --model hf --model_args pretrained=<vostro modello>, --tasks m_mmlu_it --num_fewshot 5 --device cuda:0 --batch_size auto:2
|
40 |
+
"""
|
41 |
+
|
42 |
+
EVALUATION_QUEUE_TEXT = """
|
43 |
+
## Some good practices before submitting a model
|
44 |
+
|
45 |
+
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
46 |
+
```python
|
47 |
+
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
48 |
+
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
49 |
+
model = AutoModel.from_pretrained("your model name", revision=revision)
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
51 |
+
```
|
52 |
+
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
53 |
+
|
54 |
+
Note: make sure your model is public!
|
55 |
+
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
56 |
+
|
57 |
+
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
58 |
+
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
59 |
+
|
60 |
+
### 3) Make sure your model has an open license!
|
61 |
+
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model π€
|
62 |
+
|
63 |
+
### 4) Fill up your model card
|
64 |
+
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
65 |
+
|
66 |
+
## In case of model failure
|
67 |
+
If your model is displayed in the `FAILED` category, its execution stopped.
|
68 |
+
Make sure you have followed the above steps first.
|
69 |
+
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
70 |
+
"""
|
71 |
+
|
72 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
73 |
+
CITATION_BUTTON_TEXT = r"""
|
74 |
+
"""
|