ldwang commited on
Commit
b2a6a0e
1 Parent(s): f9bb8f3

Create lighteval_tasks_v2.py

Browse files
Files changed (1) hide show
  1. lighteval_tasks_v2.py +653 -0
lighteval_tasks_v2.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: F405, F403, F401
2
+ """
3
+ Custom evaluation tasks for lighteval
4
+
5
+ Do note that we ran the evals with `max_samples=1000` to speed up large evals.
6
+ Most custom prompt changes were in an attempt to improve signal for small models in general.
7
+
8
+ This file generally creates just a TASKS_TABLE and TASKS_GROUPS which are then imported by LightEval.
9
+
10
+ Example usage (lighteval_tasks.py is the path to this file):
11
+ ===================
12
+ accelerate launch --num_processes=1 lighteval/run_evals_accelerate.py --model_args="pretrained=HuggingFaceFW/ablation-model-fineweb-edu" \
13
+ --custom_tasks "lighteval_tasks.py" --output_dir [OUTPUTPATH] --max_samples 1000 \
14
+ --tasks "custom|hellaswag|0|1,custom|winogrande|0|1,custom|piqa|0|1,custom|siqa|0|1,custom|openbookqa|0|1,custom|arc:easy|0|1,custom|arc:challenge|0|1,custom|commonsense_qa|0|1,custom|mmlu:abstract_algebra|0|1,custom|mmlu:anatomy|0|1,custom|mmlu:astronomy|0|1,custom|mmlu:business_ethics|0|1,custom|mmlu:clinical_knowledge|0|1,custom|mmlu:college_biology|0|1,custom|mmlu:college_chemistry|0|1,custom|mmlu:college_computer_science|0|1,custom|mmlu:college_mathematics|0|1,custom|mmlu:college_medicine|0|1,custom|mmlu:college_physics|0|1,custom|mmlu:computer_security|0|1,custom|mmlu:conceptual_physics|0|1,custom|mmlu:econometrics|0|1,custom|mmlu:electrical_engineering|0|1,custom|mmlu:elementary_mathematics|0|1,custom|mmlu:formal_logic|0|1,custom|mmlu:global_facts|0|1,custom|mmlu:high_school_biology|0|1,custom|mmlu:high_school_chemistry|0|1,custom|mmlu:high_school_computer_science|0|1,custom|mmlu:high_school_european_history|0|1,custom|mmlu:high_school_geography|0|1,custom|mmlu:high_school_government_and_politics|0|1,custom|mmlu:high_school_macroeconomics|0|1,custom|mmlu:high_school_mathematics|0|1,custom|mmlu:high_school_microeconomics|0|1,custom|mmlu:high_school_physics|0|1,custom|mmlu:high_school_psychology|0|1,custom|mmlu:high_school_statistics|0|1,custom|mmlu:high_school_us_history|0|1,custom|mmlu:high_school_world_history|0|1,custom|mmlu:human_aging|0|1,custom|mmlu:human_sexuality|0|1,custom|mmlu:international_law|0|1,custom|mmlu:jurisprudence|0|1,custom|mmlu:logical_fallacies|0|1,custom|mmlu:machine_learning|0|1,custom|mmlu:management|0|1,custom|mmlu:marketing|0|1,custom|mmlu:medical_genetics|0|1,custom|mmlu:miscellaneous|0|1,custom|mmlu:moral_disputes|0|1,custom|mmlu:moral_scenarios|0|1,custom|mmlu:nutrition|0|1,custom|mmlu:philosophy|0|1,custom|mmlu:prehistory|0|1,custom|mmlu:professional_accounting|0|1,custom|mmlu:professional_law|0|1,custom|mmlu:professional_medicine|0|1,custom|mmlu:professional_psychology|0|1,custom|mmlu:public_relations|0|1,custom|mmlu:security_studies|0|1,custom|mmlu:sociology|0|1,custom|mmlu:us_foreign_policy|0|1,custom|mmlu:virology|0|1,custom|mmlu:world_religions|0|1"
15
+ ===================
16
+ custom|cmmlu:agronomy|0|1,custom|cmmlu:anatomy|0|1,custom|cmmlu:ancient_chinese|0|1,custom|cmmlu:arts|0|1,custom|cmmlu:astronomy|0|1,custom|cmmlu:business_ethics|0|1,custom|cmmlu:chinese_civil_service_exam|0|1,custom|cmmlu:chinese_driving_rule|0|1,custom|cmmlu:chinese_food_culture|0|1,custom|cmmlu:chinese_foreign_policy|0|1,custom|cmmlu:chinese_history|0|1,custom|cmmlu:chinese_literature|0|1,custom|cmmlu:chinese_teacher_qualification|0|1,custom|cmmlu:clinical_knowledge|0|1,custom|cmmlu:college_actuarial_science|0|1,custom|cmmlu:college_education|0|1,custom|cmmlu:college_engineering_hydrology|0|1,custom|cmmlu:college_law|0|1,custom|cmmlu:college_mathematics|0|1,custom|cmmlu:college_medical_statistics|0|1,custom|cmmlu:college_medicine|0|1,custom|cmmlu:computer_science|0|1,custom|cmmlu:computer_security|0|1,custom|cmmlu:conceptual_physics|0|1,custom|cmmlu:construction_project_management|0|1,custom|cmmlu:economics|0|1,custom|cmmlu:education|0|1,custom|cmmlu:electrical_engineering|0|1,custom|cmmlu:elementary_chinese|0|1,custom|cmmlu:elementary_commonsense|0|1,custom|cmmlu:elementary_information_and_technology|0|1,custom|cmmlu:elementary_mathematics|0|1,custom|cmmlu:ethnology|0|1,custom|cmmlu:food_science|0|1,custom|cmmlu:genetics|0|1,custom|cmmlu:global_facts|0|1,custom|cmmlu:high_school_biology|0|1,custom|cmmlu:high_school_chemistry|0|1,custom|cmmlu:high_school_geography|0|1,custom|cmmlu:high_school_mathematics|0|1,custom|cmmlu:high_school_physics|0|1,custom|cmmlu:high_school_politics|0|1,custom|cmmlu:human_sexuality|0|1,custom|cmmlu:international_law|0|1,custom|cmmlu:journalism|0|1,custom|cmmlu:jurisprudence|0|1,custom|cmmlu:legal_and_moral_basis|0|1,custom|cmmlu:logical|0|1,custom|cmmlu:machine_learning|0|1,custom|cmmlu:management|0|1,custom|cmmlu:marketing|0|1,custom|cmmlu:marxist_theory|0|1,custom|cmmlu:modern_chinese|0|1,custom|cmmlu:nutrition|0|1,custom|cmmlu:philosophy|0|1,custom|cmmlu:professional_accounting|0|1,custom|cmmlu:professional_law|0|1,custom|cmmlu:professional_medicine|0|1,custom|cmmlu:professional_psychology|0|1,custom|cmmlu:public_relations|0|1,custom|cmmlu:security_study|0|1,custom|cmmlu:sociology|0|1,custom|cmmlu:sports_science|0|1,custom|cmmlu:traditional_chinese_medicine|0|1,custom|cmmlu:virology|0|1,custom|cmmlu:world_history|0|1,custom|cmmlu:world_religions|0|1
17
+ ===================
18
+ custom|ceval:computer_network|0|1,custom|ceval:operating_system|0|1,custom|ceval:computer_architecture|0|1,custom|ceval:college_programming|0|1,custom|ceval:college_physics|0|1,custom|ceval:college_chemistry|0|1,custom|ceval:advanced_mathematics|0|1,custom|ceval:probability_and_statistics|0|1,custom|ceval:discrete_mathematics|0|1,custom|ceval:electrical_engineer|0|1,custom|ceval:metrology_engineer|0|1,custom|ceval:high_school_mathematics|0|1,custom|ceval:high_school_physics|0|1,custom|ceval:high_school_chemistry|0|1,custom|ceval:high_school_biology|0|1,custom|ceval:middle_school_mathematics|0|1,custom|ceval:middle_school_biology|0|1,custom|ceval:middle_school_physics|0|1,custom|ceval:middle_school_chemistry|0|1,custom|ceval:veterinary_medicine|0|1,custom|ceval:college_economics|0|1,custom|ceval:business_administration|0|1,custom|ceval:marxism|0|1,custom|ceval:mao_zedong_thought|0|1,custom|ceval:education_science|0|1,custom|ceval:teacher_qualification|0|1,custom|ceval:high_school_politics|0|1,custom|ceval:high_school_geography|0|1,custom|ceval:middle_school_politics|0|1,custom|ceval:middle_school_geography|0|1,custom|ceval:modern_chinese_history|0|1,custom|ceval:ideological_and_moral_cultivation|0|1,custom|ceval:logic|0|1,custom|ceval:law|0|1,custom|ceval:chinese_language_and_literature|0|1,custom|ceval:art_studies|0|1,custom|ceval:professional_tour_guide|0|1,custom|ceval:legal_professional|0|1,custom|ceval:high_school_chinese|0|1,custom|ceval:high_school_history|0|1,custom|ceval:middle_school_history|0|1,custom|ceval:civil_servant|0|1,custom|ceval:sports_science|0|1,custom|ceval:plant_protection|0|1,custom|ceval:basic_medicine|0|1,custom|ceval:clinical_medicine|0|1,custom|ceval:urban_and_rural_planner|0|1,custom|ceval:accountant|0|1,custom|ceval:fire_engineer|0|1,custom|ceval:environmental_impact_assessment_engineer|0|1,custom|ceval:tax_accountant|0|1,custom|ceval:physician|0|1
19
+ ===================
20
+
21
+ More info here: https://github.com/huggingface/lighteval?tab=readme-ov-file#evaluate-a-model-on-extended-community-or-custom-tasks
22
+ For more info on differences between MMLU implementations: https://huggingface.co/blog/open-llm-leaderboard-mmlu#1001-flavors-of-mmlu
23
+ In particular, the default leaderboard MMLU implementation (which uses "A", "B", etc as answer targets) gives generally random results on small/non instruction tuned models.
24
+ Instead, we use the full MMLU answer as the target.
25
+ """
26
+ import re
27
+ from typing import List, Tuple
28
+
29
+ from lighteval.metrics import Metrics
30
+ from lighteval.tasks.lighteval_task import LightevalTaskConfig
31
+ from lighteval.tasks.requests import Doc
32
+ from lighteval.tasks.tasks_prompt_formatting import LETTER_INDICES
33
+
34
+ _TASKS_STRINGS: List[Tuple[LightevalTaskConfig, str]] = []
35
+ _TASKS: List[LightevalTaskConfig] = []
36
+
37
+ ## COMMON_SENSE_REASONING_TASKS ##
38
+ COMMON_SENSE_REASONING_TASKS = [
39
+ LightevalTaskConfig(
40
+ name="hellaswag",
41
+ prompt_function="hellaswag_prompt",
42
+ hf_repo="hellaswag",
43
+ hf_subset="default",
44
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
45
+ ),
46
+ LightevalTaskConfig(
47
+ name="winogrande",
48
+ prompt_function="winogrande",
49
+ hf_repo="winogrande",
50
+ hf_subset="winogrande_xl",
51
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
52
+ ),
53
+ LightevalTaskConfig(
54
+ name="piqa",
55
+ prompt_function="piqa_harness",
56
+ hf_repo="piqa",
57
+ hf_subset="plain_text",
58
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
59
+ ),
60
+ LightevalTaskConfig(
61
+ name="siqa",
62
+ prompt_function="siqa_prompt",
63
+ hf_repo="lighteval/siqa",
64
+ hf_subset="default",
65
+ hf_avail_splits=["train", "validation"],
66
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
67
+ ),
68
+ LightevalTaskConfig(
69
+ name="openbookqa",
70
+ prompt_function="openbookqa",
71
+ hf_repo="openbookqa",
72
+ hf_subset="main",
73
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
74
+ ),
75
+ LightevalTaskConfig(
76
+ name="arc:easy",
77
+ prompt_function="arc",
78
+ hf_repo="ai2_arc",
79
+ hf_subset="ARC-Easy",
80
+ evaluation_splits=["test"],
81
+ generation_size=1,
82
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
83
+ ),
84
+ LightevalTaskConfig(
85
+ name="arc:challenge",
86
+ prompt_function="arc",
87
+ hf_repo="ai2_arc",
88
+ hf_subset="ARC-Challenge",
89
+ evaluation_splits=["test"],
90
+ generation_size=1,
91
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
92
+ ),
93
+ LightevalTaskConfig(
94
+ name="commonsense_qa",
95
+ prompt_function="commonsense_qa_prompt",
96
+ hf_repo="commonsense_qa",
97
+ hf_subset="default",
98
+ metric=["loglikelihood_acc", "loglikelihood_acc_norm_nospace"],
99
+ ),
100
+ ]
101
+
102
+
103
+ def commonsense_qa_prompt(line, task_name: str = None):
104
+ return Doc(
105
+ task_name=task_name,
106
+ query=line["question"],
107
+ choices=[f" {c}" for c in line["choices"]["text"]],
108
+ gold_index=LETTER_INDICES.index(line["answerKey"].strip()),
109
+ instruction="",
110
+ )
111
+
112
+
113
+ def siqa_prompt(line, task_name: str = None):
114
+ return Doc(
115
+ task_name=task_name,
116
+ query=line["context"] + " " + line["question"],
117
+ choices=[f" {c}" for c in [line["answerA"], line["answerB"], line["answerC"]]],
118
+ gold_index=int(line["label"]) - 1,
119
+ instruction="",
120
+ )
121
+
122
+
123
+ def hellaswag_prompt(line, task_name: str = None):
124
+ def preprocess(text):
125
+ """Comes from AiHarness"""
126
+ # text = text.strip()
127
+ # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
128
+ text = text.replace(" [title]", ". ")
129
+ text = re.sub("\\[.*?\\]", "", text)
130
+ text = text.replace(" ", " ")
131
+ return text
132
+
133
+ ctx = f"{line['ctx_a']} {line['ctx_b'].capitalize()} "
134
+ return Doc(
135
+ task_name=task_name,
136
+ query=preprocess(line["activity_label"] + ": " + ctx),
137
+ choices=[" " + preprocess(ending) for ending in line["endings"]],
138
+ gold_index=int(line["label"]) if line["label"] != "" else -1, # -1 for test
139
+ # "metric": "choices_loglikelihood",
140
+ )
141
+
142
+
143
+ # 0 short for common sense
144
+ COMMON_SENSE_REASONING_STRING = [(t, f"custom|{t.name}|0|1") for t in COMMON_SENSE_REASONING_TASKS]
145
+ _TASKS_STRINGS.extend(COMMON_SENSE_REASONING_STRING)
146
+ _TASKS += COMMON_SENSE_REASONING_TASKS
147
+
148
+ ## MMLU ##
149
+ class CustomMMLUEvaluationTask(LightevalTaskConfig):
150
+ def __init__(
151
+ self,
152
+ name,
153
+ prompt_function="mmlu_prompt",
154
+ hf_repo="lighteval/mmlu",
155
+ hf_subset=None,
156
+ # metric=[Metrics.loglikelihood_acc_single_token],
157
+ metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace],
158
+ hf_avail_splits=None,
159
+ evaluation_splits=["test"],
160
+ few_shots_split="dev",
161
+ few_shots_select=None,
162
+ suite=None,
163
+ generation_size=-1,
164
+ stop_sequence=None,
165
+ output_regex=None,
166
+ frozen=False,
167
+ ):
168
+ super().__init__(
169
+ name=name,
170
+ prompt_function=prompt_function,
171
+ hf_repo=hf_repo,
172
+ hf_subset=hf_subset,
173
+ metric=metric,
174
+ hf_avail_splits=hf_avail_splits,
175
+ evaluation_splits=evaluation_splits,
176
+ few_shots_split=few_shots_split,
177
+ few_shots_select=few_shots_select,
178
+ suite=suite,
179
+ generation_size=generation_size,
180
+ stop_sequence=stop_sequence,
181
+ output_regex=output_regex,
182
+ frozen=frozen,
183
+ )
184
+
185
+
186
+ MMLU_TASKS = [
187
+ CustomMMLUEvaluationTask(name="mmlu:abstract_algebra", hf_subset="abstract_algebra"),
188
+ CustomMMLUEvaluationTask(name="mmlu:anatomy", hf_subset="anatomy"),
189
+ CustomMMLUEvaluationTask(name="mmlu:astronomy", hf_subset="astronomy"),
190
+ CustomMMLUEvaluationTask(name="mmlu:business_ethics", hf_subset="business_ethics"),
191
+ CustomMMLUEvaluationTask(name="mmlu:clinical_knowledge", hf_subset="clinical_knowledge"),
192
+ CustomMMLUEvaluationTask(name="mmlu:college_biology", hf_subset="college_biology"),
193
+ CustomMMLUEvaluationTask(name="mmlu:college_chemistry", hf_subset="college_chemistry"),
194
+ CustomMMLUEvaluationTask(name="mmlu:college_computer_science", hf_subset="college_computer_science"),
195
+ CustomMMLUEvaluationTask(name="mmlu:college_mathematics", hf_subset="college_mathematics"),
196
+ CustomMMLUEvaluationTask(name="mmlu:college_medicine", hf_subset="college_medicine"),
197
+ CustomMMLUEvaluationTask(name="mmlu:college_physics", hf_subset="college_physics"),
198
+ CustomMMLUEvaluationTask(name="mmlu:computer_security", hf_subset="computer_security"),
199
+ CustomMMLUEvaluationTask(name="mmlu:conceptual_physics", hf_subset="conceptual_physics"),
200
+ CustomMMLUEvaluationTask(name="mmlu:econometrics", hf_subset="econometrics"),
201
+ CustomMMLUEvaluationTask(name="mmlu:electrical_engineering", hf_subset="electrical_engineering"),
202
+ CustomMMLUEvaluationTask(name="mmlu:elementary_mathematics", hf_subset="elementary_mathematics"),
203
+ CustomMMLUEvaluationTask(name="mmlu:formal_logic", hf_subset="formal_logic"),
204
+ CustomMMLUEvaluationTask(name="mmlu:global_facts", hf_subset="global_facts"),
205
+ CustomMMLUEvaluationTask(name="mmlu:high_school_biology", hf_subset="high_school_biology"),
206
+ CustomMMLUEvaluationTask(name="mmlu:high_school_chemistry", hf_subset="high_school_chemistry"),
207
+ CustomMMLUEvaluationTask(name="mmlu:high_school_computer_science", hf_subset="high_school_computer_science"),
208
+ CustomMMLUEvaluationTask(name="mmlu:high_school_european_history", hf_subset="high_school_european_history"),
209
+ CustomMMLUEvaluationTask(name="mmlu:high_school_geography", hf_subset="high_school_geography"),
210
+ CustomMMLUEvaluationTask(
211
+ name="mmlu:high_school_government_and_politics", hf_subset="high_school_government_and_politics"
212
+ ),
213
+ CustomMMLUEvaluationTask(name="mmlu:high_school_macroeconomics", hf_subset="high_school_macroeconomics"),
214
+ CustomMMLUEvaluationTask(name="mmlu:high_school_mathematics", hf_subset="high_school_mathematics"),
215
+ CustomMMLUEvaluationTask(name="mmlu:high_school_microeconomics", hf_subset="high_school_microeconomics"),
216
+ CustomMMLUEvaluationTask(name="mmlu:high_school_physics", hf_subset="high_school_physics"),
217
+ CustomMMLUEvaluationTask(name="mmlu:high_school_psychology", hf_subset="high_school_psychology"),
218
+ CustomMMLUEvaluationTask(name="mmlu:high_school_statistics", hf_subset="high_school_statistics"),
219
+ CustomMMLUEvaluationTask(name="mmlu:high_school_us_history", hf_subset="high_school_us_history"),
220
+ CustomMMLUEvaluationTask(name="mmlu:high_school_world_history", hf_subset="high_school_world_history"),
221
+ CustomMMLUEvaluationTask(name="mmlu:human_aging", hf_subset="human_aging"),
222
+ CustomMMLUEvaluationTask(name="mmlu:human_sexuality", hf_subset="human_sexuality"),
223
+ CustomMMLUEvaluationTask(name="mmlu:international_law", hf_subset="international_law"),
224
+ CustomMMLUEvaluationTask(name="mmlu:jurisprudence", hf_subset="jurisprudence"),
225
+ CustomMMLUEvaluationTask(name="mmlu:logical_fallacies", hf_subset="logical_fallacies"),
226
+ CustomMMLUEvaluationTask(name="mmlu:machine_learning", hf_subset="machine_learning"),
227
+ CustomMMLUEvaluationTask(name="mmlu:management", hf_subset="management"),
228
+ CustomMMLUEvaluationTask(name="mmlu:marketing", hf_subset="marketing"),
229
+ CustomMMLUEvaluationTask(name="mmlu:medical_genetics", hf_subset="medical_genetics"),
230
+ CustomMMLUEvaluationTask(name="mmlu:miscellaneous", hf_subset="miscellaneous"),
231
+ CustomMMLUEvaluationTask(name="mmlu:moral_disputes", hf_subset="moral_disputes"),
232
+ CustomMMLUEvaluationTask(name="mmlu:moral_scenarios", hf_subset="moral_scenarios"),
233
+ CustomMMLUEvaluationTask(name="mmlu:nutrition", hf_subset="nutrition"),
234
+ CustomMMLUEvaluationTask(name="mmlu:philosophy", hf_subset="philosophy"),
235
+ CustomMMLUEvaluationTask(name="mmlu:prehistory", hf_subset="prehistory"),
236
+ CustomMMLUEvaluationTask(name="mmlu:professional_accounting", hf_subset="professional_accounting"),
237
+ CustomMMLUEvaluationTask(name="mmlu:professional_law", hf_subset="professional_law"),
238
+ CustomMMLUEvaluationTask(name="mmlu:professional_medicine", hf_subset="professional_medicine"),
239
+ CustomMMLUEvaluationTask(name="mmlu:professional_psychology", hf_subset="professional_psychology"),
240
+ CustomMMLUEvaluationTask(name="mmlu:public_relations", hf_subset="public_relations"),
241
+ CustomMMLUEvaluationTask(name="mmlu:security_studies", hf_subset="security_studies"),
242
+ CustomMMLUEvaluationTask(name="mmlu:sociology", hf_subset="sociology"),
243
+ CustomMMLUEvaluationTask(name="mmlu:us_foreign_policy", hf_subset="us_foreign_policy"),
244
+ CustomMMLUEvaluationTask(name="mmlu:virology", hf_subset="virology"),
245
+ CustomMMLUEvaluationTask(name="mmlu:world_religions", hf_subset="world_religions"),
246
+ ]
247
+
248
+
249
+ def mmlu_prompt(line, task_name: str = None):
250
+ """MMLU prompt without letters"""
251
+ topic = line["subject"]
252
+ prompt = f"The following are questions about {topic.replace('_', ' ')}.\nQuestion: "
253
+ prompt += line["question"] + "\nAnswer:"
254
+ #print(f"mmlu_prompt={prompt}")
255
+
256
+ return Doc(
257
+ task_name=task_name,
258
+ query=prompt,
259
+ choices=[f" {c}" for c in line["choices"]],
260
+ gold_index=line["answer"],
261
+ instruction=f"The following are questions about {topic.replace('_', ' ')}.\n",
262
+ )
263
+
264
+
265
+ MMLU_STRING = [(t, f"custom|{t.name}|0|1") for t in MMLU_TASKS]
266
+ _TASKS_STRINGS.extend(MMLU_STRING)
267
+ _TASKS += MMLU_TASKS
268
+
269
+
270
+ ############################################################################################################################################################
271
+ ## CMMLU ##
272
+ class CustomCMMLUEvaluationTask(LightevalTaskConfig):
273
+ def __init__(
274
+ self,
275
+ name,
276
+ prompt_function="cmmlu_prompt",
277
+ hf_repo="ldwang/lighteval-cmmlu",
278
+ hf_subset=None,
279
+ # metric=[Metrics.loglikelihood_acc_single_token],
280
+ metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace],
281
+ hf_avail_splits=None,
282
+ evaluation_splits=["test"],
283
+ few_shots_split="dev",
284
+ few_shots_select=None,
285
+ suite=None,
286
+ generation_size=-1,
287
+ stop_sequence=None,
288
+ output_regex=None,
289
+ frozen=False,
290
+ ):
291
+ super().__init__(
292
+ name=name,
293
+ prompt_function=prompt_function,
294
+ hf_repo=hf_repo,
295
+ hf_subset=hf_subset,
296
+ metric=metric,
297
+ hf_avail_splits=hf_avail_splits,
298
+ evaluation_splits=evaluation_splits,
299
+ few_shots_split=few_shots_split,
300
+ few_shots_select=few_shots_select,
301
+ suite=suite,
302
+ generation_size=generation_size,
303
+ stop_sequence=stop_sequence,
304
+ output_regex=output_regex,
305
+ frozen=frozen,
306
+ trust_dataset=True,
307
+ )
308
+
309
+
310
+ CMMLU_TASKS = [
311
+ CustomCMMLUEvaluationTask(name="cmmlu:agronomy", hf_subset="agronomy"),
312
+ CustomCMMLUEvaluationTask(name="cmmlu:anatomy", hf_subset="anatomy"),
313
+ CustomCMMLUEvaluationTask(name="cmmlu:ancient_chinese", hf_subset="ancient_chinese"),
314
+ CustomCMMLUEvaluationTask(name="cmmlu:arts", hf_subset="arts"),
315
+ CustomCMMLUEvaluationTask(name="cmmlu:astronomy", hf_subset="astronomy"),
316
+ CustomCMMLUEvaluationTask(name="cmmlu:business_ethics", hf_subset="business_ethics"),
317
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_civil_service_exam", hf_subset="chinese_civil_service_exam"),
318
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_driving_rule", hf_subset="chinese_driving_rule"),
319
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_food_culture", hf_subset="chinese_food_culture"),
320
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_foreign_policy", hf_subset="chinese_foreign_policy"),
321
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_history", hf_subset="chinese_history"),
322
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_literature", hf_subset="chinese_literature"),
323
+ CustomCMMLUEvaluationTask(name="cmmlu:chinese_teacher_qualification", hf_subset="chinese_teacher_qualification"),
324
+ CustomCMMLUEvaluationTask(name="cmmlu:clinical_knowledge", hf_subset="clinical_knowledge"),
325
+ CustomCMMLUEvaluationTask(name="cmmlu:college_actuarial_science", hf_subset="college_actuarial_science"),
326
+ CustomCMMLUEvaluationTask(name="cmmlu:college_education", hf_subset="college_education"),
327
+ CustomCMMLUEvaluationTask(name="cmmlu:college_engineering_hydrology", hf_subset="college_engineering_hydrology"),
328
+ CustomCMMLUEvaluationTask(name="cmmlu:college_law", hf_subset="college_law"),
329
+ CustomCMMLUEvaluationTask(name="cmmlu:college_mathematics", hf_subset="college_mathematics"),
330
+ CustomCMMLUEvaluationTask(name="cmmlu:college_medical_statistics", hf_subset="college_medical_statistics"),
331
+ CustomCMMLUEvaluationTask(name="cmmlu:college_medicine", hf_subset="college_medicine"),
332
+ CustomCMMLUEvaluationTask(name="cmmlu:computer_science", hf_subset="computer_science"),
333
+ CustomCMMLUEvaluationTask(name="cmmlu:computer_security", hf_subset="computer_security"),
334
+ CustomCMMLUEvaluationTask(name="cmmlu:conceptual_physics", hf_subset="conceptual_physics"),
335
+ CustomCMMLUEvaluationTask(name="cmmlu:construction_project_management", hf_subset="construction_project_management"),
336
+ CustomCMMLUEvaluationTask(name="cmmlu:economics", hf_subset="economics"),
337
+ CustomCMMLUEvaluationTask(name="cmmlu:education", hf_subset="education"),
338
+ CustomCMMLUEvaluationTask(name="cmmlu:electrical_engineering", hf_subset="electrical_engineering"),
339
+ CustomCMMLUEvaluationTask(name="cmmlu:elementary_chinese", hf_subset="elementary_chinese"),
340
+ CustomCMMLUEvaluationTask(name="cmmlu:elementary_commonsense", hf_subset="elementary_commonsense"),
341
+ CustomCMMLUEvaluationTask(name="cmmlu:elementary_information_and_technology", hf_subset="elementary_information_and_technology"),
342
+ CustomCMMLUEvaluationTask(name="cmmlu:elementary_mathematics", hf_subset="elementary_mathematics"),
343
+ CustomCMMLUEvaluationTask(name="cmmlu:ethnology", hf_subset="ethnology"),
344
+ CustomCMMLUEvaluationTask(name="cmmlu:food_science", hf_subset="food_science"),
345
+ CustomCMMLUEvaluationTask(name="cmmlu:genetics", hf_subset="genetics"),
346
+ CustomCMMLUEvaluationTask(name="cmmlu:global_facts", hf_subset="global_facts"),
347
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_biology", hf_subset="high_school_biology"),
348
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_chemistry", hf_subset="high_school_chemistry"),
349
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_geography", hf_subset="high_school_geography"),
350
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_mathematics", hf_subset="high_school_mathematics"),
351
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_physics", hf_subset="high_school_physics"),
352
+ CustomCMMLUEvaluationTask(name="cmmlu:high_school_politics", hf_subset="high_school_politics"),
353
+ CustomCMMLUEvaluationTask(name="cmmlu:human_sexuality", hf_subset="human_sexuality"),
354
+ CustomCMMLUEvaluationTask(name="cmmlu:international_law", hf_subset="international_law"),
355
+ CustomCMMLUEvaluationTask(name="cmmlu:journalism", hf_subset="journalism"),
356
+ CustomCMMLUEvaluationTask(name="cmmlu:jurisprudence", hf_subset="jurisprudence"),
357
+ CustomCMMLUEvaluationTask(name="cmmlu:legal_and_moral_basis", hf_subset="legal_and_moral_basis"),
358
+ CustomCMMLUEvaluationTask(name="cmmlu:logical", hf_subset="logical"),
359
+ CustomCMMLUEvaluationTask(name="cmmlu:machine_learning", hf_subset="machine_learning"),
360
+ CustomCMMLUEvaluationTask(name="cmmlu:management", hf_subset="management"),
361
+ CustomCMMLUEvaluationTask(name="cmmlu:marketing", hf_subset="marketing"),
362
+ CustomCMMLUEvaluationTask(name="cmmlu:marxist_theory", hf_subset="marxist_theory"),
363
+ CustomCMMLUEvaluationTask(name="cmmlu:modern_chinese", hf_subset="modern_chinese"),
364
+ CustomCMMLUEvaluationTask(name="cmmlu:nutrition", hf_subset="nutrition"),
365
+ CustomCMMLUEvaluationTask(name="cmmlu:philosophy", hf_subset="philosophy"),
366
+ CustomCMMLUEvaluationTask(name="cmmlu:professional_accounting", hf_subset="professional_accounting"),
367
+ CustomCMMLUEvaluationTask(name="cmmlu:professional_law", hf_subset="professional_law"),
368
+ CustomCMMLUEvaluationTask(name="cmmlu:professional_medicine", hf_subset="professional_medicine"),
369
+ CustomCMMLUEvaluationTask(name="cmmlu:professional_psychology", hf_subset="professional_psychology"),
370
+ CustomCMMLUEvaluationTask(name="cmmlu:public_relations", hf_subset="public_relations"),
371
+ CustomCMMLUEvaluationTask(name="cmmlu:security_study", hf_subset="security_study"),
372
+ CustomCMMLUEvaluationTask(name="cmmlu:sociology", hf_subset="sociology"),
373
+ CustomCMMLUEvaluationTask(name="cmmlu:sports_science", hf_subset="sports_science"),
374
+ CustomCMMLUEvaluationTask(name="cmmlu:traditional_chinese_medicine", hf_subset="traditional_chinese_medicine"),
375
+ CustomCMMLUEvaluationTask(name="cmmlu:virology", hf_subset="virology"),
376
+ CustomCMMLUEvaluationTask(name="cmmlu:world_history", hf_subset="world_history"),
377
+ CustomCMMLUEvaluationTask(name="cmmlu:world_religions", hf_subset="world_religions"),
378
+ ]
379
+
380
+ cmmlu_subject_mapping = {
381
+ 'agronomy': '农学',
382
+ 'anatomy': '解剖学',
383
+ 'ancient_chinese': '古汉语',
384
+ 'arts': '艺术学',
385
+ 'astronomy': '天文学',
386
+ 'business_ethics': '商业伦理',
387
+ 'chinese_civil_service_exam': '中国公务员考试',
388
+ 'chinese_driving_rule': '中国驾驶规则',
389
+ 'chinese_food_culture': '中国饮食文化',
390
+ 'chinese_foreign_policy': '中国外交政策',
391
+ 'chinese_history': '中国历史',
392
+ 'chinese_literature': '中国文学',
393
+ 'chinese_teacher_qualification': '中国教师资格',
394
+ 'clinical_knowledge': '临床知识',
395
+ 'college_actuarial_science': '大学精算学',
396
+ 'college_education': '大学教育学',
397
+ 'college_engineering_hydrology': '大学工程水文学',
398
+ 'college_law': '大学法律',
399
+ 'college_mathematics': '大学数学',
400
+ 'college_medical_statistics': '大学医学统计',
401
+ 'college_medicine': '大学医学',
402
+ 'computer_science': '计算机科学',
403
+ 'computer_security': '计算机安全',
404
+ 'conceptual_physics': '概念物理学',
405
+ 'construction_project_management': '建设工程管理',
406
+ 'economics': '经济学',
407
+ 'education': '教育学',
408
+ 'electrical_engineering': '电气工程',
409
+ 'elementary_chinese': '小学语文',
410
+ 'elementary_commonsense': '小学常识',
411
+ 'elementary_information_and_technology': '小学信息技术',
412
+ 'elementary_mathematics': '初等数学',
413
+ 'ethnology': '民族学',
414
+ 'food_science': '食品科学',
415
+ 'genetics': '遗传学',
416
+ 'global_facts': '全球事实',
417
+ 'high_school_biology': '高中生物',
418
+ 'high_school_chemistry': '高中化学',
419
+ 'high_school_geography': '高中地理',
420
+ 'high_school_mathematics': '高中数学',
421
+ 'high_school_physics': '高中物理学',
422
+ 'high_school_politics': '高中政治',
423
+ 'human_sexuality': '人类性行为',
424
+ 'international_law': '国际法学',
425
+ 'journalism': '新闻学',
426
+ 'jurisprudence': '法理学',
427
+ 'legal_and_moral_basis': '法律与道德基础',
428
+ 'logical': '逻辑学',
429
+ 'machine_learning': '机器学习',
430
+ 'management': '管理学',
431
+ 'marketing': '市场营销',
432
+ 'marxist_theory': '马克思主义理论',
433
+ 'modern_chinese': '现代汉语',
434
+ 'nutrition': '营养学',
435
+ 'philosophy': '哲学',
436
+ 'professional_accounting': '专业会计',
437
+ 'professional_law': '专业法学',
438
+ 'professional_medicine': '专业医学',
439
+ 'professional_psychology': '专业心理学',
440
+ 'public_relations': '公共关系',
441
+ 'security_study': '安全研究',
442
+ 'sociology': '社会学',
443
+ 'sports_science': '体育学',
444
+ 'traditional_chinese_medicine': '中医中药',
445
+ 'virology': '病毒学',
446
+ 'world_history': '世界历史',
447
+ 'world_religions': '世界宗教'
448
+ }
449
+
450
+ def cmmlu_prompt(line, task_name: str = None):
451
+ # 以下是关于{_ch_name}的单项选择题,请直接给出正确答案的选项。\n题目:{{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}
452
+ # 答案是: {{{answer}}}
453
+ """CMMLU prompt without letters"""
454
+ topic = cmmlu_subject_mapping[line['subject']]
455
+ prompt = f"以下是关于{topic.replace('_', ' ')}的单项选择题,请直接给出正确答案的选项。\n题目:"
456
+ prompt += line["question"] + "\n答案是:"
457
+ #print(f"cmmlu_prompt={prompt}")
458
+
459
+ return Doc(
460
+ task_name=task_name,
461
+ query=prompt,
462
+ choices=[f" {c}" for c in line["choices"]],
463
+ gold_index=line["answer"],
464
+ instruction=None,
465
+ )
466
+
467
+ CMMLU_STRING = [(t, f"custom|{t.name}|0|1") for t in CMMLU_TASKS]
468
+ _TASKS_STRINGS.extend(CMMLU_STRING)
469
+ _TASKS += CMMLU_TASKS
470
+ print(f'{",".join([t[1] for t in CMMLU_STRING])}')
471
+
472
+ ############################################################################################################################################################
473
+ ## CEVAL ##
474
+ class CustomCEVALEvaluationTask(LightevalTaskConfig):
475
+ def __init__(
476
+ self,
477
+ name,
478
+ prompt_function="ceval_prompt",
479
+ hf_repo="ldwang/lighteval-ceval-exam",
480
+ hf_subset=None,
481
+ # metric=[Metrics.loglikelihood_acc_single_token],
482
+ metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace],
483
+ hf_avail_splits=None,
484
+ evaluation_splits=["val"],
485
+ few_shots_split="dev",
486
+ few_shots_select=None,
487
+ suite=None,
488
+ generation_size=-1,
489
+ stop_sequence=None,
490
+ output_regex=None,
491
+ frozen=False,
492
+ ):
493
+ super().__init__(
494
+ name=name,
495
+ prompt_function=prompt_function,
496
+ hf_repo=hf_repo,
497
+ hf_subset=hf_subset,
498
+ metric=metric,
499
+ hf_avail_splits=hf_avail_splits,
500
+ evaluation_splits=evaluation_splits,
501
+ few_shots_split=few_shots_split,
502
+ few_shots_select=few_shots_select,
503
+ suite=suite,
504
+ generation_size=generation_size,
505
+ stop_sequence=stop_sequence,
506
+ output_regex=output_regex,
507
+ frozen=frozen,
508
+ trust_dataset=True,
509
+ )
510
+
511
+
512
+ CEVAL_TASKS = [
513
+ CustomCEVALEvaluationTask(name="ceval:computer_network", hf_subset="computer_network"),
514
+ CustomCEVALEvaluationTask(name="ceval:operating_system", hf_subset="operating_system"),
515
+ CustomCEVALEvaluationTask(name="ceval:computer_architecture", hf_subset="computer_architecture"),
516
+ CustomCEVALEvaluationTask(name="ceval:college_programming", hf_subset="college_programming"),
517
+ CustomCEVALEvaluationTask(name="ceval:college_physics", hf_subset="college_physics"),
518
+ CustomCEVALEvaluationTask(name="ceval:college_chemistry", hf_subset="college_chemistry"),
519
+ CustomCEVALEvaluationTask(name="ceval:advanced_mathematics", hf_subset="advanced_mathematics"),
520
+ CustomCEVALEvaluationTask(name="ceval:probability_and_statistics", hf_subset="probability_and_statistics"),
521
+ CustomCEVALEvaluationTask(name="ceval:discrete_mathematics", hf_subset="discrete_mathematics"),
522
+ CustomCEVALEvaluationTask(name="ceval:electrical_engineer", hf_subset="electrical_engineer"),
523
+ CustomCEVALEvaluationTask(name="ceval:metrology_engineer", hf_subset="metrology_engineer"),
524
+ CustomCEVALEvaluationTask(name="ceval:high_school_mathematics", hf_subset="high_school_mathematics"),
525
+ CustomCEVALEvaluationTask(name="ceval:high_school_physics", hf_subset="high_school_physics"),
526
+ CustomCEVALEvaluationTask(name="ceval:high_school_chemistry", hf_subset="high_school_chemistry"),
527
+ CustomCEVALEvaluationTask(name="ceval:high_school_biology", hf_subset="high_school_biology"),
528
+ CustomCEVALEvaluationTask(name="ceval:middle_school_mathematics", hf_subset="middle_school_mathematics"),
529
+ CustomCEVALEvaluationTask(name="ceval:middle_school_biology", hf_subset="middle_school_biology"),
530
+ CustomCEVALEvaluationTask(name="ceval:middle_school_physics", hf_subset="middle_school_physics"),
531
+ CustomCEVALEvaluationTask(name="ceval:middle_school_chemistry", hf_subset="middle_school_chemistry"),
532
+ CustomCEVALEvaluationTask(name="ceval:veterinary_medicine", hf_subset="veterinary_medicine"),
533
+ CustomCEVALEvaluationTask(name="ceval:college_economics", hf_subset="college_economics"),
534
+ CustomCEVALEvaluationTask(name="ceval:business_administration", hf_subset="business_administration"),
535
+ CustomCEVALEvaluationTask(name="ceval:marxism", hf_subset="marxism"),
536
+ CustomCEVALEvaluationTask(name="ceval:mao_zedong_thought", hf_subset="mao_zedong_thought"),
537
+ CustomCEVALEvaluationTask(name="ceval:education_science", hf_subset="education_science"),
538
+ CustomCEVALEvaluationTask(name="ceval:teacher_qualification", hf_subset="teacher_qualification"),
539
+ CustomCEVALEvaluationTask(name="ceval:high_school_politics", hf_subset="high_school_politics"),
540
+ CustomCEVALEvaluationTask(name="ceval:high_school_geography", hf_subset="high_school_geography"),
541
+ CustomCEVALEvaluationTask(name="ceval:middle_school_politics", hf_subset="middle_school_politics"),
542
+ CustomCEVALEvaluationTask(name="ceval:middle_school_geography", hf_subset="middle_school_geography"),
543
+ CustomCEVALEvaluationTask(name="ceval:modern_chinese_history", hf_subset="modern_chinese_history"),
544
+ CustomCEVALEvaluationTask(name="ceval:ideological_and_moral_cultivation", hf_subset="ideological_and_moral_cultivation"),
545
+ CustomCEVALEvaluationTask(name="ceval:logic", hf_subset="logic"),
546
+ CustomCEVALEvaluationTask(name="ceval:law", hf_subset="law"),
547
+ CustomCEVALEvaluationTask(name="ceval:chinese_language_and_literature", hf_subset="chinese_language_and_literature"),
548
+ CustomCEVALEvaluationTask(name="ceval:art_studies", hf_subset="art_studies"),
549
+ CustomCEVALEvaluationTask(name="ceval:professional_tour_guide", hf_subset="professional_tour_guide"),
550
+ CustomCEVALEvaluationTask(name="ceval:legal_professional", hf_subset="legal_professional"),
551
+ CustomCEVALEvaluationTask(name="ceval:high_school_chinese", hf_subset="high_school_chinese"),
552
+ CustomCEVALEvaluationTask(name="ceval:high_school_history", hf_subset="high_school_history"),
553
+ CustomCEVALEvaluationTask(name="ceval:middle_school_history", hf_subset="middle_school_history"),
554
+ CustomCEVALEvaluationTask(name="ceval:civil_servant", hf_subset="civil_servant"),
555
+ CustomCEVALEvaluationTask(name="ceval:sports_science", hf_subset="sports_science"),
556
+ CustomCEVALEvaluationTask(name="ceval:plant_protection", hf_subset="plant_protection"),
557
+ CustomCEVALEvaluationTask(name="ceval:basic_medicine", hf_subset="basic_medicine"),
558
+ CustomCEVALEvaluationTask(name="ceval:clinical_medicine", hf_subset="clinical_medicine"),
559
+ CustomCEVALEvaluationTask(name="ceval:urban_and_rural_planner", hf_subset="urban_and_rural_planner"),
560
+ CustomCEVALEvaluationTask(name="ceval:accountant", hf_subset="accountant"),
561
+ CustomCEVALEvaluationTask(name="ceval:fire_engineer", hf_subset="fire_engineer"),
562
+ CustomCEVALEvaluationTask(name="ceval:environmental_impact_assessment_engineer", hf_subset="environmental_impact_assessment_engineer"),
563
+ CustomCEVALEvaluationTask(name="ceval:tax_accountant", hf_subset="tax_accountant"),
564
+ CustomCEVALEvaluationTask(name="ceval:physician", hf_subset="physician"),
565
+ ]
566
+
567
+ ceval_subject_mapping = {
568
+ 'computer_network': ['Computer Network', '计算机网络', 'STEM'],
569
+ 'operating_system': ['Operating System', '操作系统', 'STEM'],
570
+ 'computer_architecture': ['Computer Architecture', '计算机组成', 'STEM'],
571
+ 'college_programming': ['College Programming', '大学编程', 'STEM'],
572
+ 'college_physics': ['College Physics', '大学物理', 'STEM'],
573
+ 'college_chemistry': ['College Chemistry', '大学化学', 'STEM'],
574
+ 'advanced_mathematics': ['Advanced Mathematics', '高等数学', 'STEM'],
575
+ 'probability_and_statistics': ['Probability and Statistics', '概率统计', 'STEM'],
576
+ 'discrete_mathematics': ['Discrete Mathematics', '离散数学', 'STEM'],
577
+ 'electrical_engineer': ['Electrical Engineer', '注册电气工程师', 'STEM'],
578
+ 'metrology_engineer': ['Metrology Engineer', '注册计量师', 'STEM'],
579
+ 'high_school_mathematics': ['High School Mathematics', '高中数学', 'STEM'],
580
+ 'high_school_physics': ['High School Physics', '高中物理', 'STEM'],
581
+ 'high_school_chemistry': ['High School Chemistry', '高中化学', 'STEM'],
582
+ 'high_school_biology': ['High School Biology', '高中生物', 'STEM'],
583
+ 'middle_school_mathematics': ['Middle School Mathematics', '初中数学', 'STEM'],
584
+ 'middle_school_biology': ['Middle School Biology', '初中生物', 'STEM'],
585
+ 'middle_school_physics': ['Middle School Physics', '初中物理', 'STEM'],
586
+ 'middle_school_chemistry': ['Middle School Chemistry', '初中化学', 'STEM'],
587
+ 'veterinary_medicine': ['Veterinary Medicine', '兽医学', 'STEM'],
588
+ 'college_economics': ['College Economics', '大学经济学', 'Social Science'],
589
+ 'business_administration': ['Business Administration', '工商管理', 'Social Science'],
590
+ 'marxism': ['Marxism', '马克思主义基本原理', 'Social Science'],
591
+ 'mao_zedong_thought': ['Mao Zedong Thought', '毛泽东思想和中国特色社会主义理论体系概论', 'Social Science'],
592
+ 'education_science': ['Education Science', '教育学', 'Social Science'],
593
+ 'teacher_qualification': ['Teacher Qualification', '教师资格', 'Social Science'],
594
+ 'high_school_politics': ['High School Politics', '高中政治', 'Social Science'],
595
+ 'high_school_geography': ['High School Geography', '高中地理', 'Social Science'],
596
+ 'middle_school_politics': ['Middle School Politics', '初中政治', 'Social Science'],
597
+ 'middle_school_geography': ['Middle School Geography', '初中地理', 'Social Science'],
598
+ 'modern_chinese_history': ['Modern Chinese History', '近代史纲要', 'Humanities'],
599
+ 'ideological_and_moral_cultivation': ['Ideological and Moral Cultivation', '思想道德修养与法律基础', 'Humanities'],
600
+ 'logic': ['Logic', '逻辑学', 'Humanities'],
601
+ 'law': ['Law', '法学', 'Humanities'],
602
+ 'chinese_language_and_literature': ['Chinese Language and Literature', '中国语言文学', 'Humanities'],
603
+ 'art_studies': ['Art Studies', '艺术学', 'Humanities'],
604
+ 'professional_tour_guide': ['Professional Tour Guide', '导游资格', 'Humanities'],
605
+ 'legal_professional': ['Legal Professional', '法律职业资格', 'Humanities'],
606
+ 'high_school_chinese': ['High School Chinese', '高中语文', 'Humanities'],
607
+ 'high_school_history': ['High School History', '高中历史', 'Humanities'],
608
+ 'middle_school_history': ['Middle School History', '初中历史', 'Humanities'],
609
+ 'civil_servant': ['Civil Servant', '公务员', 'Other'],
610
+ 'sports_science': ['Sports Science', '体育学', 'Other'],
611
+ 'plant_protection': ['Plant Protection', '植物保护', 'Other'],
612
+ 'basic_medicine': ['Basic Medicine', '基础医学', 'Other'],
613
+ 'clinical_medicine': ['Clinical Medicine', '临床医学', 'Other'],
614
+ 'urban_and_rural_planner': ['Urban and Rural Planner', '注册城乡规划师', 'Other'],
615
+ 'accountant': ['Accountant', '注册会计师', 'Other'],
616
+ 'fire_engineer': ['Fire Engineer', '注册消防工程师', 'Other'],
617
+ 'environmental_impact_assessment_engineer': ['Environmental Impact Assessment Engineer', '环境影响评价工程师', 'Other'],
618
+ 'tax_accountant': ['Tax Accountant', '税务师', 'Other'],
619
+ 'physician': ['Physician', '医师资格', 'Other'],
620
+ }
621
+
622
+ def ceval_prompt(line, task_name: str = None):
623
+ # f"以下是中国关于{_ch_name}考试的单项选择题,请选出其中的正确答案。\n{{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案: "
624
+ """CEVAL prompt without letters"""
625
+ topic = ceval_subject_mapping[line['subject']][1]
626
+ prompt = f"以下是中国关于{topic.replace('_', ' ')}考试的单项选择题,请选出其中的正确答案。\n题目:"
627
+ prompt += line["question"] + "\n答案:"
628
+ #print(f"ceval_prompt={prompt}")
629
+
630
+ return Doc(
631
+ task_name=task_name,
632
+ query=prompt,
633
+ choices=[f" {c}" for c in line["choices"]],
634
+ gold_index=line["answer"],
635
+ instruction=None,
636
+ )
637
+
638
+ CEVAL_STRING = [(t, f"custom|{t.name}|0|1") for t in CEVAL_TASKS]
639
+ _TASKS_STRINGS.extend(CEVAL_STRING)
640
+ _TASKS += CEVAL_TASKS
641
+ print(f'{",".join([t[1] for t in CEVAL_STRING])}')
642
+
643
+ ############################################################################################################################################################
644
+
645
+ # common sense reasoning + mmlu
646
+ EARLY_SIGNAL_TASKS = ",".join([t[1] for t in COMMON_SENSE_REASONING_STRING] + [t[1] for t in MMLU_STRING] + [t[1] for t in CMMLU_STRING])
647
+
648
+ # Convert to dict for lighteval
649
+ TASKS_TABLE = [task.as_dict() for task in _TASKS]
650
+ # You can have a few pre-organised groups of tasks
651
+ TASKS_GROUPS = {
652
+ "early-signal": EARLY_SIGNAL_TASKS,
653
+ }