BryanW commited on
Commit
2ee4cd6
·
verified ·
1 Parent(s): 3182520

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/README.md +59 -0
  2. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py +26 -0
  3. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml +38 -0
  4. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml +4 -0
  5. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml +4 -0
  6. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +4 -0
  7. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/utils.py +39 -0
  8. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +26 -0
  9. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +38 -0
  10. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +4 -0
  11. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml +4 -0
  12. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml +4 -0
  13. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/utils.py +39 -0
  14. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/_generate_configs.py +26 -0
  15. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml +39 -0
  16. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml +4 -0
  17. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml +4 -0
  18. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml +4 -0
  19. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/utils.py +39 -0
  20. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/_generate_configs.py +26 -0
  21. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml +21 -0
  22. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml +4 -0
  23. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml +4 -0
  24. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml +4 -0
  25. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/utils.py +41 -0
  26. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py +26 -0
  27. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml +21 -0
  28. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml +4 -0
  29. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml +4 -0
  30. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml +4 -0
  31. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/utils.py +38 -0
  32. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/README.md +62 -0
  33. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-llama.yaml +84 -0
  34. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml +34 -0
  35. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml +44 -0
  36. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot.yaml +83 -0
  37. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k.yaml +45 -0
  38. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/README.md +52 -0
  39. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval.yaml +26 -0
  40. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_5_instruct.yaml +11 -0
  41. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_64_instruct.yaml +11 -0
  42. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_plus.yaml +3 -0
  43. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/README.md +45 -0
  44. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/ifeval.yaml +29 -0
  45. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions.py +1612 -0
  46. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions_registry.py +168 -0
  47. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions_util.py +1701 -0
  48. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/utils.py +134 -0
  49. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/math500/math500.yaml +12 -0
  50. Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/math500/utils.py +14 -0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPQA
2
+
3
+ ### Paper
4
+
5
+ Title: GPQA: A Graduate-Level Google-Proof Q&A Benchmark
6
+
7
+ Abstract: https://arxiv.org/abs/2311.12022
8
+
9
+ We present GPQA, a challenging dataset of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry. We ensure that the questions are high-quality and extremely difficult: experts who have or are pursuing PhDs in the corresponding domains reach 65% accuracy (74% when discounting clear mistakes the experts identified in retrospect), while highly skilled non-expert validators only reach 34% accuracy, despite spending on average over 30 minutes with unrestricted access to the web (i.e., the questions are “Google-proof”). The questions are also difficult for state-of-the-art AI systems, with our strongest GPT-4–based baseline achieving 39% accuracy. If we are to use future AI systems to help us answer very hard questions—for example, when developing new scientific knowledge—we need to develop *scalable oversight* methods that enable humans to supervise their outputs, which may be difficult even if the supervisors are themselves skilled and knowledgeable. The difficulty of GPQA both for skilled non-experts and frontier AI systems should enable realistic scalable oversight experiments, which we hope can help devise ways for human experts to reliably get truthful information from AI systems that surpass human capabilities.
10
+
11
+ Homepage: `https://github.com/idavidrein/gpqa/tree/main`
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @misc{rein2023gpqa,
17
+ title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark},
18
+ author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},
19
+ year={2023},
20
+ eprint={2311.12022},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.AI}
23
+ }
24
+ ```
25
+
26
+ This dataset is gated, so you will have to accept the terms of use at https://huggingface.co/datasets/Idavidrein/gpqa and login via `huggingface-cli login` using your HF Hub token before running this task.
27
+
28
+ ### Groups, Tags, and Tasks
29
+
30
+ #### Groups
31
+
32
+ None
33
+
34
+ #### Tags
35
+
36
+ * `gpqa`: runs all GPQA variants.
37
+
38
+ #### Tasks
39
+
40
+ * `gpqa_{main, diamond, extended}_zeroshot`
41
+ * `gpqa_{main, diamond, extended}_n_shot`
42
+ * `gpqa_{main, diamond, extended}_generative_n_shot`
43
+ * `gpqa_{main, diamond, extended}_cot_zeroshot`
44
+ * `gpqa_{main, diamond, extended}_cot_n_shot`
45
+
46
+ ### Checklist
47
+
48
+ For adding novel benchmarks/datasets to the library:
49
+
50
+ * [x] Is the task an existing benchmark in the literature?
51
+ * [x] Have you referenced the original paper that introduced the task?
52
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
53
+
54
+
55
+ If other tasks on this dataset are already supported:
56
+
57
+ * [ ] Is the "Main" variant of this task clearly denoted?
58
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
59
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ do_sample: false
30
+ temperature: 0.0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_diamond_cot_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_extended_cot_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_main_cot_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_n_shot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
10
+ doc_to_target: answer
11
+ filter_list:
12
+ - name: "strict-match"
13
+ filter:
14
+ - function: "regex"
15
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
16
+ - function: "take_first"
17
+ - name: "flexible-extract"
18
+ filter:
19
+ - function: "multi_choice_regex"
20
+ group_select: -1
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ regex_pattern: "(\\([A-Z]\\))"
24
+ - function: "take_first"
25
+ generation_kwargs:
26
+ until:
27
+ - "</s>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ num_fewshot: 0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_diamond_cot_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_extended_cot_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_main_cot_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "generative_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ - "Question:"
30
+ - "<|im_end|>"
31
+ temperature: 0.0
32
+ metric_list:
33
+ - metric: exact_match
34
+ aggregation: mean
35
+ higher_is_better: true
36
+ ignore_case: true
37
+ ignore_punctuation: true
38
+ metadata:
39
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_diamond_generative_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_extended_generative_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_main_generative_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/generative/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_n_shot.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_gpqa_n_shot_yaml",
16
+ "task": f"gpqa_{task}_n_shot",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_diamond_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_extended_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_main_n_shot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/n_shot/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ rng = random.Random(42)
18
+
19
+
20
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
21
+ def _process_doc(doc):
22
+ choices = [
23
+ preprocess(doc["Incorrect Answer 1"]),
24
+ preprocess(doc["Incorrect Answer 2"]),
25
+ preprocess(doc["Incorrect Answer 3"]),
26
+ preprocess(doc["Correct Answer"]),
27
+ ]
28
+
29
+ rng.shuffle(choices)
30
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
31
+
32
+ out_doc = {
33
+ "choice1": choices[0],
34
+ "choice2": choices[1],
35
+ "choice3": choices[2],
36
+ "choice4": choices[3],
37
+ "answer": f"({chr(65 + correct_answer_index)})",
38
+ }
39
+ return out_doc
40
+
41
+ return dataset.map(_process_doc)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
10
+ doc_to_target: answer
11
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
12
+ num_fewshot: 0
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_diamond_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_extended_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_main_zeroshot
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gpqa/zeroshot/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "answer": f"({chr(65 + correct_answer_index)})",
35
+ }
36
+ return out_doc
37
+
38
+ return dataset.map(_process_doc)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GSM8k
2
+
3
+ ## Paper
4
+ Training Verifiers to Solve Math Word Problems
5
+ https://arxiv.org/abs/2110.14168
6
+
7
+ State-of-the-art language models can match human performance on many tasks, but
8
+ they still struggle to robustly perform multi-step mathematical reasoning. To
9
+ diagnose the failures of current models and support research, we introduce GSM8K,
10
+ a dataset of 8.5K high quality linguistically diverse grade school math word problems.
11
+ We find that even the largest transformer models fail to achieve high test performance,
12
+ despite the conceptual simplicity of this problem distribution.
13
+
14
+ NOTE: See the official implementation of the task:
15
+ https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py
16
+ for how to make use of the dataset's calculator annotations in your language
17
+ model's sample/generation function.
18
+
19
+ Homepage: https://github.com/openai/grade-school-math
20
+
21
+
22
+ ## Citation
23
+ ```
24
+ @misc{cobbe2021training,
25
+ title={Training Verifiers to Solve Math Word Problems},
26
+ author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
27
+ year={2021},
28
+ eprint={2110.14168},
29
+ archivePrefix={arXiv},
30
+ primaryClass={cs.LG}
31
+ }
32
+ ```
33
+
34
+ ### Groups and Tasks
35
+
36
+ #### Groups
37
+
38
+ - `math_word_problems`
39
+ - `chain_of_thought`
40
+ - `self_consistency`
41
+
42
+ #### Tasks
43
+
44
+ - `gsm8k_yaml`
45
+ - `gsm8k_cot`: GSM8K with Chain-of-Thought
46
+ - `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency
47
+ - `gsm8k_cot_llama`: GSM8K with prompt formatting modified to conform to the evaluation settings described by Meta here: https://huggingface.co/datasets/meta-llama/Meta-Llama-3.1-8B-Instruct-evals/viewer/Meta-Llama-3.1-8B-Instruct-evals__gsm8k__details?row=0
48
+ - Use this task with --fewshot_as_multiturn and --apply_chat_template to replicate Meta's reported performance.
49
+
50
+
51
+ ### Checklist
52
+
53
+ - [x] Is in Eval-harness v1.0 ?
54
+ - [ ] Has been checked for regression from v1.0?
55
+ - [ ] Has been checked for equivalence with original paper methodology?
56
+ - [ ] "Main" checked variant clearly denoted?
57
+
58
+ ### Variant Wishlist
59
+
60
+ - [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
61
+ - [ ] Using Verifiers
62
+ - [ ] Majority voting "without CoT"
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-llama.yaml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: main
2
+ dataset_path: gsm8k
3
+ doc_to_target: '{{answer.split(''####'')[-1].strip() if answer is defined else target}}'
4
+ doc_to_text: "Given the following problem, reason and give a final answer to the problem.\nProblem: {{question}}\nYour response should end with \"The final answer is [answer]\" where [answer] is the response to the problem.\n"
5
+ fewshot_config:
6
+ sampler: first_n
7
+ samples:
8
+ - question: There are 15 trees in the grove. Grove workers will plant trees in the
9
+ grove today. After they are done, there will be 21 trees. How many trees did
10
+ the grove workers plant today?
11
+ target: There are 15 trees originally. Then there were 21 trees after some more
12
+ were planted. So there must have been 21 - 15 = 6. The final answer is 6
13
+ - question: If there are 3 cars in the parking lot and 2 more cars arrive, how many
14
+ cars are in the parking lot?
15
+ target: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The final answer
16
+ is 5
17
+ - question: Leah had 32 chocolates and her sister had 42. If they ate 35, how many
18
+ pieces do they have left in total?
19
+ target: Originally, Leah had 32 chocolates. Her sister had 42. So in total they
20
+ had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The final answer is 39
21
+ - question: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12
22
+ lollipops. How many lollipops did Jason give to Denny?
23
+ target: Jason started with 20 lollipops. Then he had 12 after giving some to Denny.
24
+ So he gave Denny 20 - 12 = 8. The final answer is 8
25
+ - question: Shawn has five toys. For Christmas, he got two toys each from his mom and
26
+ dad. How many toys does he have now?
27
+ target: Shawn started with 5 toys. If he got 2 toys each from his mom and dad,
28
+ then that is 4 more toys. 5 + 4 = 9. The final answer is 9
29
+ - question: There were nine computers in the server room. Five more computers were
30
+ installed each day, from monday to thursday. How many computers are now in the
31
+ server room?
32
+ target: There were originally 9 computers. For each of 4 days, 5 more computers
33
+ were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The final answer is
34
+ 29
35
+ - question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday,
36
+ he lost 2 more. How many golf balls did he have at the end of wednesday?
37
+ target: Michael started with 58 golf balls. After losing 23 on tuesday, he had
38
+ 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The final answer
39
+ is 33
40
+ - question: Olivia has $23. She bought five bagels for $3 each. How much money does
41
+ she have left?
42
+ target: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15
43
+ dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The final answer is 8
44
+ filter_list:
45
+ - filter:
46
+ - function: regex
47
+ group_select: -1
48
+ regex_pattern: The final answer is ((-?[$0-9.,]{2,})|(-?[0-9]+))
49
+ - function: take_first
50
+ name: strict-match
51
+ - filter:
52
+ - function: regex
53
+ group_select: -1
54
+ regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
55
+ - function: take_first
56
+ name: flexible-extract
57
+ generation_kwargs:
58
+ do_sample: false
59
+ until:
60
+ - '<|eot_id|>'
61
+ - '<|start_header_id|>user<|end_header_id|>'
62
+ - 'Q:'
63
+ - </s>
64
+ - <|im_end|>
65
+ tag:
66
+ - chain_of_thought
67
+ metadata:
68
+ version: 3.0
69
+ metric_list:
70
+ - aggregation: mean
71
+ higher_is_better: true
72
+ ignore_case: true
73
+ ignore_punctuation: false
74
+ metric: exact_match
75
+ regexes_to_ignore:
76
+ - ','
77
+ - \$
78
+ - '(?s).*#### '
79
+ - \.$
80
+ num_fewshot: 8
81
+ output_type: generate_until
82
+ repeats: 1
83
+ task: gsm8k_cot_llama
84
+ test_split: test
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: gsm8k-cot.yaml
2
+ tag:
3
+ - chain_of_thought
4
+ - self_consistency
5
+ task: gsm8k_cot_self_consistency
6
+ generation_kwargs:
7
+ until:
8
+ - "Q:"
9
+ - "\n\n"
10
+ do_sample: true
11
+ temperature: 0.2
12
+ repeats: 64
13
+ filter_list:
14
+ - name: "score-first" # pick only the first response, and report metrics on that
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
18
+ - function: "take_first"
19
+ - name: "maj@64"
20
+ filter:
21
+ - function: "regex"
22
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
23
+ - function: "majority_vote"
24
+ - function: "take_first"
25
+ - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal.
26
+ filter:
27
+ - function: "take_first_k"
28
+ k: 8
29
+ - function: "regex"
30
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
31
+ - function: "majority_vote"
32
+ - function: "take_first"
33
+ metadata:
34
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tag:
2
+ - math_word_problems
3
+ task: gsm8k_cot_zeroshot
4
+ dataset_path: gsm8k
5
+ dataset_name: main
6
+ output_type: generate_until
7
+ training_split: train
8
+ fewshot_split: train
9
+ test_split: test
10
+ doc_to_text: "Q: {{question}}\nA: Let's think step by step."
11
+ doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: false
18
+ regexes_to_ignore:
19
+ - ","
20
+ - "\\$"
21
+ - "(?s).*#### "
22
+ - "\\.$"
23
+ generation_kwargs:
24
+ until:
25
+ - "Q:"
26
+ - "</s>"
27
+ - "<|im_end|>"
28
+ do_sample: false
29
+ repeats: 1
30
+ num_fewshot: 0
31
+ filter_list:
32
+ - name: "strict-match"
33
+ filter:
34
+ - function: "regex"
35
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
36
+ - function: "take_first"
37
+ - name: "flexible-extract"
38
+ filter:
39
+ - function: "regex"
40
+ group_select: -1
41
+ regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
42
+ - function: "take_first"
43
+ metadata:
44
+ version: 3.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k-cot.yaml ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: main
2
+ dataset_path: gsm8k
3
+ doc_to_target: '{{answer.split(''####'')[-1].strip() if answer is defined else target}}'
4
+ doc_to_text: 'Q: {{question}}
5
+
6
+ A:'
7
+ fewshot_config:
8
+ sampler: first_n
9
+ samples:
10
+ - question: There are 15 trees in the grove. Grove workers will plant trees in the
11
+ grove today. After they are done, there will be 21 trees. How many trees did
12
+ the grove workers plant today?
13
+ target: There are 15 trees originally. Then there were 21 trees after some more
14
+ were planted. So there must have been 21 - 15 = 6. The answer is 6.
15
+ - question: If there are 3 cars in the parking lot and 2 more cars arrive, how many
16
+ cars are in the parking lot?
17
+ target: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer
18
+ is 5.
19
+ - question: Leah had 32 chocolates and her sister had 42. If they ate 35, how many
20
+ pieces do they have left in total?
21
+ target: Originally, Leah had 32 chocolates. Her sister had 42. So in total they
22
+ had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.
23
+ - question: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12
24
+ lollipops. How many lollipops did Jason give to Denny?
25
+ target: Jason started with 20 lollipops. Then he had 12 after giving some to Denny.
26
+ So he gave Denny 20 - 12 = 8. The answer is 8.
27
+ - question: Shawn has five toys. For Christmas, he got two toys each from his mom and
28
+ dad. How many toys does he have now?
29
+ target: Shawn started with 5 toys. If he got 2 toys each from his mom and dad,
30
+ then that is 4 more toys. 5 + 4 = 9. The answer is 9.
31
+ - question: There were nine computers in the server room. Five more computers were
32
+ installed each day, from monday to thursday. How many computers are now in the
33
+ server room?
34
+ target: There were originally 9 computers. For each of 4 days, 5 more computers
35
+ were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is
36
+ 29.
37
+ - question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday,
38
+ he lost 2 more. How many golf balls did he have at the end of wednesday?
39
+ target: Michael started with 58 golf balls. After losing 23 on tuesday, he had
40
+ 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer
41
+ is 33.
42
+ - question: Olivia has $23. She bought five bagels for $3 each. How much money does
43
+ she have left?
44
+ target: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15
45
+ dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.
46
+ filter_list:
47
+ - filter:
48
+ - function: regex
49
+ regex_pattern: The answer is (\-?[0-9\.\,]+).
50
+ - function: take_first
51
+ name: strict-match
52
+ - filter:
53
+ - function: regex
54
+ group_select: -1
55
+ regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
56
+ - function: take_first
57
+ name: flexible-extract
58
+ generation_kwargs:
59
+ do_sample: false
60
+ until:
61
+ - 'Q:'
62
+ - </s>
63
+ - <|im_end|>
64
+ tag:
65
+ - chain_of_thought
66
+ metadata:
67
+ version: 3.0
68
+ metric_list:
69
+ - aggregation: mean
70
+ higher_is_better: true
71
+ ignore_case: true
72
+ ignore_punctuation: false
73
+ metric: exact_match
74
+ regexes_to_ignore:
75
+ - ','
76
+ - \$
77
+ - '(?s).*#### '
78
+ - \.$
79
+ num_fewshot: 8
80
+ output_type: generate_until
81
+ repeats: 1
82
+ task: gsm8k_cot
83
+ test_split: test
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/gsm8k/gsm8k.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tag:
2
+ - math_word_problems
3
+ task: gsm8k
4
+ dataset_path: gsm8k
5
+ dataset_name: main
6
+ output_type: generate_until
7
+ training_split: train
8
+ fewshot_split: train
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}\nAnswer:"
11
+ doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: false
18
+ regexes_to_ignore:
19
+ - ","
20
+ - "\\$"
21
+ - "(?s).*#### "
22
+ - "\\.$"
23
+ generation_kwargs:
24
+ until:
25
+ - "Question:"
26
+ - "</s>"
27
+ - "<|im_end|>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ repeats: 1
31
+ num_fewshot: 5
32
+ filter_list:
33
+ - name: "strict-match"
34
+ filter:
35
+ - function: "regex"
36
+ regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
37
+ - function: "take_first"
38
+ - name: "flexible-extract"
39
+ filter:
40
+ - function: "regex"
41
+ group_select: -1
42
+ regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
43
+ - function: "take_first"
44
+ metadata:
45
+ version: 3.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HumanEval
2
+
3
+ ## Paper
4
+ Evaluating Large Language Models Trained on Code
5
+ https://arxiv.org/abs/2107.03374
6
+
7
+ We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8% of the problems, while GPT-3 solves 0% and GPT-J solves 11.4%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.
8
+
9
+ Homepage: https://github.com/openai/human-eval
10
+
11
+ Note: For instruct tuned models, we recommend the instruct variant. That uses a gen_prefix to ensure the model completes the partial code snippet (might not work with all APIs)
12
+
13
+ ## Citation
14
+ ```
15
+ @article{chen2021codex,
16
+ title={Evaluating Large Language Models Trained on Code},
17
+ author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser and Mohammad Bavarian and Clemens Winter and Philippe Tillet and Felipe Petroski Such and Dave Cummings and Matthias Plappert and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain and William Saunders and Christopher Hesse and Andrew N. Carr and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
18
+ year={2021},
19
+ eprint={2107.03374},
20
+ archivePrefix={arXiv},
21
+ primaryClass={cs.LG}
22
+ }
23
+ ```
24
+
25
+ ### Groups and Tasks
26
+
27
+ #### Groups
28
+
29
+ * Not part of a group yet.
30
+
31
+ #### Tasks
32
+
33
+ - `humaneval` pass@1
34
+ - `humaneval_64` pass@64 variant
35
+ - `humaneval_instruct`: pass@1 with config more appropriate for instruct models. (implementation taken from llama [evals](https://huggingface.co/datasets/meta-llama/Llama-3.1-8B-Instruct-evals/viewer/Llama-3.1-8B-Instruct-evals__human_eval__details?row=0))
36
+ - `humaneval_instruct_64`: pass@64 variant
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [ ] Is the task an existing benchmark in the literature?
42
+ * [ ] Have you referenced the original paper that introduced the task?
43
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [ ] Is the "Main" variant of this task clearly denoted?
48
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
50
+
51
+ ### Changelog
52
+ v2 20-MAR-2025: `humaneval_instruct`, `humaneval_instruct_64`: fixed typo in gen_prefix
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: humaneval
2
+ dataset_path: openai/openai_humaneval
3
+ unsafe_code: true
4
+ output_type: generate_until
5
+ test_split: test
6
+ doc_to_text: "{{prompt}}"
7
+ doc_to_target: "{{test}}\ncheck({{entry_point}})"
8
+ metric_list:
9
+ - metric: !function utils.pass_at_k
10
+ aggregation: mean
11
+ higher_is_better: true
12
+ k: [1]
13
+ generation_kwargs:
14
+ until:
15
+ - "[DONE]"
16
+ max_gen_toks: 1024
17
+ do_sample: false
18
+ repeats: 1
19
+ num_fewshot: 0
20
+ filter_list:
21
+ - name: "create_test"
22
+ filter:
23
+ - function: "custom"
24
+ filter_fn: !function utils.build_predictions
25
+ metadata:
26
+ version: 1.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_5_instruct.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: humaneval_5.yaml
2
+ task: humaneval_5_instruct
3
+ doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```{{prompt}}"
4
+ gen_prefix: "Here is the completed function:\n```python\n{{prompt}}\n"
5
+ filter_list:
6
+ - name: "create_test"
7
+ filter:
8
+ - function: "custom"
9
+ filter_fn: !function utils.build_predictions_instruct
10
+ metadata:
11
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_64_instruct.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: humaneval_64.yaml
2
+ task: humaneval_64_instruct
3
+ doc_to_text: "Write a solution to the following problem and make sure that it passes the tests:\n```{{prompt}}"
4
+ gen_prefix: "Here is the completed function:\n```python\n{{prompt}}\n"
5
+ filter_list:
6
+ - name: "create_test"
7
+ filter:
8
+ - function: "custom"
9
+ filter_fn: !function utils.build_predictions_instruct
10
+ metadata:
11
+ version: 2.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/humaneval/humaneval_plus.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: humaneval.yaml
2
+ task: humaneval_plus
3
+ dataset_path: evalplus/humanevalplus
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # IFEval
2
+
3
+ ### Paper
4
+
5
+ Title: Instruction-Following Evaluation for Large Language Models
6
+ Abstract: https://arxiv.org/abs/2311.07911
7
+
8
+ One core capability of Large Language Models (LLMs) is to follow natural language instructions. However, the evaluation of such abilities is not standardized: Human evaluations are expensive, slow, and not objectively reproducible, while LLM-based auto-evaluation is potentially biased or limited by the ability of the evaluator LLM. To overcome these issues, we introduce Instruction-Following Eval (IFEval) for large language models. IFEval is a straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set of "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times". We identified 25 types of those verifiable instructions and constructed around 500 prompts, with each prompt containing one or more verifiable instructions. We show evaluation results of two widely available LLMs on the market. Our code and data can be found at https://github.com/google-research/google-research/tree/master/instruction_following_eval
9
+
10
+ Homepage: https://github.com/google-research/google-research/tree/master/instruction_following_eval
11
+
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @article{zhou2023instructionfollowing,
17
+ title={Instruction-Following Evaluation for Large Language Models},
18
+ author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
19
+ journal={arXiv preprint arXiv:2311.07911},
20
+ year={2023},
21
+ }
22
+ ```
23
+
24
+ ### Groups and Tasks
25
+
26
+ #### Groups
27
+
28
+ * Not part of a group yet
29
+
30
+ #### Tasks
31
+
32
+ * `ifeval`
33
+
34
+ ### Checklist
35
+
36
+ For adding novel benchmarks/datasets to the library:
37
+ * [x] Is the task an existing benchmark in the literature?
38
+ * [x] Have you referenced the original paper that introduced the task?
39
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
40
+
41
+
42
+ If other tasks on this dataset are already supported:
43
+ * [ ] Is the "Main" variant of this task clearly denoted?
44
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
45
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/ifeval.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: ifeval
2
+ dataset_path: google/IFEval
3
+ dataset_name: null
4
+ output_type: generate_until
5
+ test_split: train
6
+ num_fewshot: 0
7
+ doc_to_text: prompt
8
+ doc_to_target: 0
9
+ generation_kwargs:
10
+ until: []
11
+ do_sample: false
12
+ temperature: 0.0
13
+ max_gen_toks: 1280
14
+ process_results: !function utils.process_results
15
+ metric_list:
16
+ - metric: prompt_level_strict_acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: inst_level_strict_acc
20
+ aggregation: !function utils.agg_inst_level_acc
21
+ higher_is_better: true
22
+ - metric: prompt_level_loose_acc
23
+ aggregation: mean
24
+ higher_is_better: true
25
+ - metric: inst_level_loose_acc
26
+ aggregation: !function utils.agg_inst_level_acc
27
+ higher_is_better: true
28
+ metadata:
29
+ version: 4.0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions.py ADDED
@@ -0,0 +1,1612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Library of instructions."""
16
+
17
+ import collections
18
+ import json
19
+ import logging
20
+ import random
21
+ import re
22
+ import string
23
+ from typing import Dict, Optional, Sequence, Union
24
+
25
+ import langdetect
26
+
27
+ from lm_eval.tasks.ifeval import instructions_util
28
+
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ _InstructionArgsDtype = Optional[Dict[str, Union[int, str, Sequence[str]]]]
33
+
34
+ _LANGUAGES = instructions_util.LANGUAGE_CODES
35
+
36
+ # The relational operation for comparison.
37
+ _COMPARISON_RELATION = ("less than", "at least")
38
+
39
+ # The maximum number of sentences.
40
+ _MAX_NUM_SENTENCES = 20
41
+
42
+ # The number of placeholders.
43
+ _NUM_PLACEHOLDERS = 4
44
+
45
+ # The number of bullet lists.
46
+ _NUM_BULLETS = 5
47
+
48
+ # The options of constrained response.
49
+ _CONSTRAINED_RESPONSE_OPTIONS = (
50
+ "My answer is yes.",
51
+ "My answer is no.",
52
+ "My answer is maybe.",
53
+ )
54
+
55
+ # The options of starter keywords.
56
+ _STARTER_OPTIONS = (
57
+ "I would say",
58
+ "My answer is",
59
+ "I believe",
60
+ "In my opinion",
61
+ "I think",
62
+ "I reckon",
63
+ "I feel",
64
+ "From my perspective",
65
+ "As I see it",
66
+ "According to me",
67
+ "As far as I'm concerned",
68
+ "To my understanding",
69
+ "In my view",
70
+ "My take on it is",
71
+ "As per my perception",
72
+ )
73
+
74
+ # The options of ending keywords.
75
+ # TODO(jeffreyzhou) add more ending options
76
+ _ENDING_OPTIONS = ("Any other questions?", "Is there anything else I can help with?")
77
+
78
+ # The number of highlighted sections.
79
+ _NUM_HIGHLIGHTED_SECTIONS = 4
80
+
81
+ # The section splitter.
82
+ _SECTION_SPLITER = ("Section", "SECTION")
83
+
84
+ # The number of sections.
85
+ _NUM_SECTIONS = 5
86
+
87
+ # The number of paragraphs.
88
+ _NUM_PARAGRAPHS = 5
89
+
90
+ # The postscript marker.
91
+ _POSTSCRIPT_MARKER = ("P.S.", "P.P.S")
92
+
93
+ # The number of keywords.
94
+ _NUM_KEYWORDS = 2
95
+
96
+ # The occurrences of a single keyword.
97
+ _KEYWORD_FREQUENCY = 3
98
+
99
+ # The occurrences of a single letter.
100
+ _LETTER_FREQUENCY = 10
101
+
102
+ # The occurrences of words with all capital letters.
103
+ _ALL_CAPITAL_WORD_FREQUENCY = 20
104
+
105
+ # The number of words in the response.
106
+ _NUM_WORDS_LOWER_LIMIT = 100
107
+ _NUM_WORDS_UPPER_LIMIT = 500
108
+
109
+
110
+ class Instruction:
111
+ """An instruction template."""
112
+
113
+ def __init__(self, instruction_id):
114
+ self.id = instruction_id
115
+
116
+ def build_description(self, **kwargs):
117
+ raise NotImplementedError("`build_description` not implemented.")
118
+
119
+ def get_instruction_args(self):
120
+ raise NotImplementedError("`get_instruction_args` not implemented.")
121
+
122
+ def get_instruction_args_keys(self):
123
+ raise NotImplementedError("`get_instruction_args_keys` not implemented.")
124
+
125
+ def check_following(self, value):
126
+ raise NotImplementedError("`check_following` not implemented.")
127
+
128
+
129
+ class ResponseLanguageChecker(Instruction):
130
+ """Check the language of the entire response."""
131
+
132
+ def build_description(self, *, language=None):
133
+ """Build the instruction description.
134
+
135
+ Args:
136
+ language: A string representing the expected language of the response. The
137
+ language has to comply to the 97 types defined in
138
+ `langid.py` (https://pypi.org/project/langid/1.1.5/), which follows
139
+ ISO 639-1 codes (https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes);
140
+ for example, `en` for English, `zh` for Chinese, `fr` for French.
141
+
142
+ Returns:
143
+ A string representing the instruction description.
144
+ """
145
+ self._language = language
146
+ if self._language is None:
147
+ self._language = random.choice(list(_LANGUAGES.keys()))
148
+ # TODO(tianjianlu): opens the description generation to more choices.
149
+ self._description_pattern = (
150
+ "Your ENTIRE response should be in {language} language, no other "
151
+ + "language is allowed."
152
+ )
153
+ return self._description_pattern.format(language=_LANGUAGES[self._language])
154
+
155
+ def get_instruction_args(self):
156
+ """Returns the keyword args of `build_description`."""
157
+ return {"language": self._language}
158
+
159
+ def get_instruction_args_keys(self):
160
+ """Returns the args keys of `build_description`."""
161
+ return ["language"]
162
+
163
+ def check_following(self, value):
164
+ """Check if the language of the entire response follows the instruction.
165
+
166
+ Args:
167
+ value: A string representing the response.
168
+
169
+ Returns:
170
+ True if the language of `value` follows instruction; otherwise False.
171
+ """
172
+ assert isinstance(value, str)
173
+
174
+ try:
175
+ return langdetect.detect(value) == self._language
176
+ except langdetect.LangDetectException as e:
177
+ # Count as instruction is followed.
178
+ logging.error(
179
+ "Unable to detect language for text %s due to %s", value, e
180
+ ) # refex: disable=pytotw.037
181
+ return True
182
+
183
+
184
+ class NumberOfSentences(Instruction):
185
+ """Check the number of sentences."""
186
+
187
+ def build_description(self, *, num_sentences=None, relation=None):
188
+ """Build the instruction description.
189
+
190
+ Args:
191
+ num_sentences: An integer specifying the number of sentences as a
192
+ threshold.
193
+ relation: A string in (`less than`, `at least`), defining the relational
194
+ operator for comparison.
195
+ Two relational comparisons are supported for now:
196
+ if 'less than', the actual number of sentences < the threshold;
197
+ if 'at least', the actual number of sentences >= the threshold.
198
+
199
+ Returns:
200
+ A string representing the instruction description.
201
+ """
202
+ # The number of sentences as a threshold for comparison.
203
+ self._num_sentences_threshold = num_sentences
204
+ if self._num_sentences_threshold is None or self._num_sentences_threshold < 0:
205
+ self._num_sentences_threshold = random.randint(1, _MAX_NUM_SENTENCES)
206
+
207
+ if relation is None:
208
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
209
+ elif relation not in _COMPARISON_RELATION:
210
+ raise ValueError(
211
+ "The supported relation for comparison must be in "
212
+ f"{_COMPARISON_RELATION}, but {relation} is given."
213
+ )
214
+ else:
215
+ self._comparison_relation = relation
216
+
217
+ self._description_pattern = (
218
+ "Your response should contain {relation} {num_sentences} sentences."
219
+ )
220
+ return self._description_pattern.format(
221
+ relation=self._comparison_relation,
222
+ num_sentences=self._num_sentences_threshold,
223
+ )
224
+
225
+ def get_instruction_args(self):
226
+ """Returns the keyword args of `build_description`."""
227
+ return {
228
+ "num_sentences": self._num_sentences_threshold,
229
+ "relation": self._comparison_relation,
230
+ }
231
+
232
+ def get_instruction_args_keys(self):
233
+ """Returns the args keys of `build_description`."""
234
+ return ["num_sentences", "relation"]
235
+
236
+ def check_following(self, value):
237
+ """Check if the number of sentences follows the instruction.
238
+
239
+ Args:
240
+ value: A string representing the response.
241
+
242
+ Returns:
243
+ True if the response follows the instruction.
244
+
245
+ Raise:
246
+ ValueError if the string in `instruction_args` is not in
247
+ [`less_than`, `at_least`].
248
+ """
249
+ num_sentences = instructions_util.count_sentences(value)
250
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
251
+ return num_sentences < self._num_sentences_threshold
252
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
253
+ return num_sentences >= self._num_sentences_threshold
254
+
255
+
256
+ class PlaceholderChecker(Instruction):
257
+ """Check the placeholders in template writing."""
258
+
259
+ def build_description(self, *, num_placeholders=None):
260
+ """Build the instruction description.
261
+
262
+ Args:
263
+ num_placeholders: An integer denoting the minimum number of
264
+ placeholders required in the response.
265
+
266
+ Returns:
267
+ A string representing the instruction description.
268
+ """
269
+ self._num_placeholders = num_placeholders
270
+ if self._num_placeholders is None or self._num_placeholders < 0:
271
+ self._num_placeholders = random.randint(1, _NUM_PLACEHOLDERS)
272
+ self._description_pattern = (
273
+ "The response must contain at least {num_placeholders} placeholders "
274
+ + "represented by square brackets, such as [address]."
275
+ )
276
+ return self._description_pattern.format(num_placeholders=self._num_placeholders)
277
+
278
+ def get_instruction_args(self):
279
+ """Returns the keyword args of `build_description`."""
280
+ return {"num_placeholders": self._num_placeholders}
281
+
282
+ def get_instruction_args_keys(self):
283
+ """Returns the args keys of `build_description`."""
284
+ return ["num_placeholders"]
285
+
286
+ def check_following(self, value):
287
+ """Check if the number of placeholders follows the instruction.
288
+
289
+ Args:
290
+ value: A string representing the response.
291
+
292
+ Returns:
293
+ True if the actual number of placeholders in the response is greater than
294
+ or equal to `num_placeholders`; otherwise, False.
295
+ """
296
+ placeholders = re.findall(r"\[.*?\]", value)
297
+ num_placeholders = len(placeholders)
298
+ return num_placeholders >= self._num_placeholders
299
+
300
+
301
+ class BulletListChecker(Instruction):
302
+ """Checks the bullet list in the prompt."""
303
+
304
+ def build_description(self, *, num_bullets=None):
305
+ """Build the instruction description.
306
+
307
+ Args:
308
+ num_bullets: An integer specifying the exact number of bullet lists
309
+ that is required to appear in the response.
310
+
311
+ Returns:
312
+ A string representing the instruction description.
313
+ """
314
+ self._num_bullets = num_bullets
315
+ if self._num_bullets is None or self._num_bullets < 0:
316
+ self._num_bullets = random.randint(1, _NUM_BULLETS)
317
+ self._description_pattern = (
318
+ "Your answer must contain exactly {num_bullets} bullet points. "
319
+ + "Use the markdown bullet points such as:\n"
320
+ + "* This is point 1. \n"
321
+ + "* This is point 2"
322
+ )
323
+ return self._description_pattern.format(num_bullets=self._num_bullets)
324
+
325
+ def get_instruction_args(self):
326
+ """Returns the keyword args of `build_description`."""
327
+ return {"num_bullets": self._num_bullets}
328
+
329
+ def get_instruction_args_keys(self):
330
+ """Returns the args keys of `build_description`."""
331
+ return ["num_bullets"]
332
+
333
+ def check_following(self, value):
334
+ r"""Check if the number of bullet lists meets the requirement.
335
+
336
+ Args:
337
+ value: A string representing the response. The response is expected to
338
+ contain some bullet lists that start with `\*`.
339
+
340
+ Returns:
341
+ True if the actual number of bullet lists in the response meets the
342
+ requirement.
343
+ """
344
+ bullet_lists = re.findall(r"^\s*\*[^\*].*$", value, flags=re.MULTILINE)
345
+ bullet_lists_2 = re.findall(r"^\s*-.*$", value, flags=re.MULTILINE)
346
+ num_bullet_lists = len(bullet_lists) + len(bullet_lists_2)
347
+ return num_bullet_lists == self._num_bullets
348
+
349
+
350
+ class ConstrainedResponseChecker(Instruction):
351
+ """Checks the constrained response."""
352
+
353
+ def build_description(self):
354
+ """Build the instruction description."""
355
+ # A sequence of string(s) representing the options of the expected response.
356
+ self._constrained_responses = _CONSTRAINED_RESPONSE_OPTIONS
357
+ self._description_pattern = (
358
+ "Answer with one of the following options: {response_options}"
359
+ )
360
+ return self._description_pattern.format(
361
+ response_options=self._constrained_responses
362
+ )
363
+
364
+ def get_instruction_args(self):
365
+ """Returns the keyword args of `build_description`."""
366
+ return None
367
+
368
+ def get_instruction_args_keys(self):
369
+ """Returns the args keys of `build_description`."""
370
+ return []
371
+
372
+ def check_following(self, value):
373
+ """Checks if the response matches the constrained options.
374
+
375
+ Args:
376
+ value: A string representing the response.
377
+
378
+ Returns:
379
+ True if the actual response contains one of the options in the constrained
380
+ responses; otherwise False.
381
+ """
382
+ value = value.strip()
383
+ for constrained_response in self._constrained_responses:
384
+ if constrained_response in value:
385
+ return True
386
+ return False
387
+
388
+
389
+ class ConstrainedStartChecker(Instruction):
390
+ """Checks the response start."""
391
+
392
+ def build_description(self, *, starter=None):
393
+ """Build the instruction description.
394
+
395
+ Args:
396
+ starter: A string representing the keyword that the response should start
397
+ with.
398
+
399
+ Returns:
400
+ A string representing the instruction description.
401
+ """
402
+ self._starter = starter.strip() if isinstance(starter, str) else starter
403
+ if self._starter is None:
404
+ self._starter = random.choice(_STARTER_OPTIONS)
405
+ self._description_pattern = (
406
+ "During the conversation, when it is your turn, "
407
+ + "please always start with {starter}"
408
+ )
409
+ return self._description_pattern.format(starter=self._starter)
410
+
411
+ def get_instruction_args(self):
412
+ """Returns the keyword args of `build_description`."""
413
+ return {"starter": self._starter}
414
+
415
+ def get_instruction_args_keys(self):
416
+ """Returns the args keys of `build_description`."""
417
+ return ["starter"]
418
+
419
+ def check_following(self, value):
420
+ """Checks if the response starts with the constrained keyword or phrase.
421
+
422
+ Args:
423
+ value: A string representing the response.
424
+
425
+ Returns:
426
+ True if the response starts with the given phrase or keyword that is
427
+ contained in `instruction_args`; otherwise, False.
428
+ """
429
+ response_pattern = r"^\s*" + self._starter + r".*$"
430
+ response_with_constrained_start = re.search(
431
+ response_pattern, value, flags=re.MULTILINE
432
+ )
433
+ return True if response_with_constrained_start else False
434
+
435
+
436
+ class HighlightSectionChecker(Instruction):
437
+ """Checks the highlighted section."""
438
+
439
+ def build_description(self, *, num_highlights=None):
440
+ """Build the instruction description.
441
+
442
+ Args:
443
+ num_highlights: An integer specifying the minimum number of highlighted
444
+ sections.
445
+
446
+ Returns:
447
+ A string representing the instruction description.
448
+ """
449
+ self._num_highlights = num_highlights
450
+ if self._num_highlights is None or self._num_highlights < 0:
451
+ self._num_highlights = random.randint(1, _NUM_HIGHLIGHTED_SECTIONS)
452
+
453
+ self._description_pattern = (
454
+ "Highlight at least {num_highlights} sections in your answer with "
455
+ + "markdown, i.e. *highlighted section*."
456
+ )
457
+
458
+ return self._description_pattern.format(num_highlights=self._num_highlights)
459
+
460
+ def get_instruction_args(self):
461
+ """Returns the keyword args of `build_description`."""
462
+ return {"num_highlights": self._num_highlights}
463
+
464
+ def get_instruction_args_keys(self):
465
+ """Returns the args keys of `build_description`."""
466
+ return ["num_highlights"]
467
+
468
+ def check_following(self, value):
469
+ """Checks if the number of highlighted sections meets the requirement.
470
+
471
+ Args:
472
+ value: a string representing the response. The response is expected to
473
+ contain highlighted sections in the format of *highlighted*.
474
+
475
+ Returns:
476
+ True if the actual number of highlighted sections in the format of
477
+ *highlighted sections* meets the minimum requirement; otherwise False.
478
+ """
479
+ num_highlights = 0
480
+ highlights = re.findall(r"\*[^\n\*]*\*", value)
481
+ double_highlights = re.findall(r"\*\*[^\n\*]*\*\*", value)
482
+ for highlight in highlights:
483
+ if highlight.strip("*").strip():
484
+ num_highlights += 1
485
+ for highlight in double_highlights:
486
+ if highlight.removeprefix("**").removesuffix("**").strip():
487
+ num_highlights += 1
488
+
489
+ return num_highlights >= self._num_highlights
490
+
491
+
492
+ class SectionChecker(Instruction):
493
+ """Checks the sections."""
494
+
495
+ def build_description(self, *, section_spliter=None, num_sections=None):
496
+ """Build the instruction description.
497
+
498
+ Args:
499
+ section_spliter: A string represents the section spliter keyword that
500
+ marks a new section, i.e., `Section` or `SECTION`.
501
+ num_sections: An integer specifying the number of sections.
502
+
503
+ Returns:
504
+ A string representing the instruction description.
505
+ """
506
+ self._section_spliter = (
507
+ section_spliter.strip()
508
+ if isinstance(section_spliter, str)
509
+ else section_spliter
510
+ )
511
+ if self._section_spliter is None:
512
+ self._section_spliter = random.choice(_SECTION_SPLITER)
513
+
514
+ self._num_sections = num_sections
515
+ if self._num_sections is None or self._num_sections < 0:
516
+ self._num_sections = random.randint(1, _NUM_SECTIONS)
517
+
518
+ self._description_pattern = (
519
+ "Your response must have {num_sections} sections. Mark the beginning "
520
+ + "of each section with {section_spliter} X, such as:\n"
521
+ + "{section_spliter} 1\n"
522
+ + "[content of section 1]\n"
523
+ + "{section_spliter} 2\n"
524
+ + "[content of section 2]"
525
+ )
526
+
527
+ return self._description_pattern.format(
528
+ num_sections=self._num_sections, section_spliter=self._section_spliter
529
+ )
530
+
531
+ def get_instruction_args(self):
532
+ """Returns the keyword args of `build_description`."""
533
+ return {
534
+ "section_spliter": self._section_spliter,
535
+ "num_sections": self._num_sections,
536
+ }
537
+
538
+ def get_instruction_args_keys(self):
539
+ """Returns the args keys of `build_description`."""
540
+ return ["section_spliter", "num_sections"]
541
+
542
+ def check_following(self, value):
543
+ """Checks the response contains multiple sections.
544
+
545
+ Args:
546
+ value: A string representing the response. The response is expected
547
+ to contain multiple sections (number of sections is greater than 1).
548
+ A new section starts with `Section 1`, where the number denotes the
549
+ section index.
550
+
551
+ Returns:
552
+ True if the number of sections in the response is greater than or equal to
553
+ the minimum number of sections; otherwise, False.
554
+ """
555
+ section_splitter_patten = r"\s?" + self._section_spliter + r"\s?\d+\s?"
556
+ sections = re.split(section_splitter_patten, value)
557
+ num_sections = len(sections) - 1
558
+ return num_sections >= self._num_sections
559
+
560
+
561
+ class ParagraphChecker(Instruction):
562
+ """Checks the paragraphs."""
563
+
564
+ def build_description(self, *, num_paragraphs=None):
565
+ """Build the instruction description.
566
+
567
+ Args:
568
+ num_paragraphs: An integer specifying the number of paragraphs.
569
+
570
+ Returns:
571
+ A string representing the instruction description.
572
+ """
573
+ self._num_paragraphs = num_paragraphs
574
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
575
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
576
+
577
+ self._description_pattern = (
578
+ "There should be {num_paragraphs} paragraphs. "
579
+ + "Paragraphs are separated with the markdown divider: ***"
580
+ )
581
+
582
+ return self._description_pattern.format(num_paragraphs=self._num_paragraphs)
583
+
584
+ def get_instruction_args(self):
585
+ """Returns the keyword args of `build_description`."""
586
+ return {"num_paragraphs": self._num_paragraphs}
587
+
588
+ def get_instruction_args_keys(self):
589
+ """Returns the args keys of `build_description`."""
590
+ return ["num_paragraphs"]
591
+
592
+ def check_following(self, value):
593
+ """Checks the response contains required number of paragraphs.
594
+
595
+ Args:
596
+ value: A string representing the response. The response may contain
597
+ paragraphs that are separated by the markdown divider: `***`.
598
+
599
+ Returns:
600
+ True if the actual number of paragraphs is the same as required;
601
+ otherwise, False.
602
+ """
603
+ paragraphs = re.split(r"\s?\*\*\*\s?", value)
604
+ num_paragraphs = len(paragraphs)
605
+
606
+ for index, paragraph in enumerate(paragraphs):
607
+ if not paragraph.strip():
608
+ if index == 0 or index == len(paragraphs) - 1:
609
+ num_paragraphs -= 1
610
+ else:
611
+ return False
612
+
613
+ return num_paragraphs == self._num_paragraphs
614
+
615
+
616
+ class PostscriptChecker(Instruction):
617
+ """Checks the postscript."""
618
+
619
+ def build_description(self, *, postscript_marker=None):
620
+ """Build the instruction description.
621
+
622
+ Args:
623
+ postscript_marker: A string containing the keyword that marks the start
624
+ of the postscript section.
625
+
626
+ Returns:
627
+ A string representing the instruction description.
628
+ """
629
+ self._postscript_marker = (
630
+ postscript_marker.strip()
631
+ if isinstance(postscript_marker, str)
632
+ else postscript_marker
633
+ )
634
+ if self._postscript_marker is None:
635
+ self._postscript_marker = random.choice(_POSTSCRIPT_MARKER)
636
+
637
+ self._description_pattern = (
638
+ "At the end of your response, please explicitly add a postscript "
639
+ + "starting with {postscript}"
640
+ )
641
+
642
+ return self._description_pattern.format(postscript=self._postscript_marker)
643
+
644
+ def get_instruction_args(self):
645
+ """Returns the keyword args of `build_description`."""
646
+ return {"postscript_marker": self._postscript_marker}
647
+
648
+ def get_instruction_args_keys(self):
649
+ """Returns the args keys of `build_description`."""
650
+ return ["postscript_marker"]
651
+
652
+ def check_following(self, value):
653
+ """Checks if the response follows the postscript format.
654
+
655
+ Args:
656
+ value: a string representing the response. The response is expected to
657
+ contain a postscript section.
658
+
659
+ Returns:
660
+ True if the response contains a postscript section starting with
661
+ the keyword containing in the `instruction_args`; otherwise False.
662
+ """
663
+ value = value.lower()
664
+ if self._postscript_marker == "P.P.S":
665
+ postscript_pattern = r"\s*p\.\s?p\.\s?s.*$"
666
+ elif self._postscript_marker == "P.S.":
667
+ postscript_pattern = r"\s*p\.\s?s\..*$"
668
+ else:
669
+ postscript_pattern = r"\s*" + self._postscript_marker.lower() + r".*$"
670
+ postscript = re.findall(postscript_pattern, value, flags=re.MULTILINE)
671
+ return True if postscript else False
672
+
673
+
674
+ class RephraseChecker(Instruction):
675
+ """Checks the rephrase."""
676
+
677
+ def build_description(self, *, original_message):
678
+ """Build the instruction description.
679
+
680
+ Args:
681
+ original_message: A string representing the original message. The
682
+ rephrased response should only change its words/sentences in between
683
+ its two asterisks, for example, *change me*. Both original and rephrased
684
+ messages should contain the changes in the form of *change me*.
685
+
686
+ Returns:
687
+ A string representing the instruction description.
688
+ """
689
+ if not self.is_change(original_message):
690
+ raise ValueError(
691
+ f"Message {original_message} does not contain changes "
692
+ "in the form of *change me*."
693
+ )
694
+
695
+ self._reference_without_change = original_message
696
+ self._description = (
697
+ "Rephrasing: Your rephrased response should only"
698
+ + "change the words/sentences in between two asterisks"
699
+ + "such as *change me*."
700
+ )
701
+ return self._description
702
+
703
+ def get_instruction_args(self):
704
+ """Returns the keyword args of `build_description`."""
705
+ return {"original_message": self._reference_without_change}
706
+
707
+ def get_instruction_args_keys(self):
708
+ """Returns the args keys of `build_description`."""
709
+ return ["original_message"]
710
+
711
+ def check_following(self, value):
712
+ r"""Checks if the rephrasing follows the instruction.
713
+
714
+ Args:
715
+ value: A string representing the response, which is expected to rephras
716
+ the string of `instruction_args`.
717
+
718
+ Returns:
719
+ True if `value` and `instruction_args` only differ by the words/sentences
720
+ in between two asterisks such as *change me*; otherwise, False.
721
+ """
722
+
723
+ if not self.is_change(value):
724
+ raise ValueError(
725
+ f"value {value} does not contain changes in the form of *change me*."
726
+ )
727
+
728
+ response_without_changes = self.strip_changes(value)
729
+ reference_without_changes = self.strip_changes(self._reference_without_change)
730
+
731
+ return response_without_changes == reference_without_changes
732
+
733
+ def is_change(self, response):
734
+ """Check if there is change in the response in the form of *change me*."""
735
+ return re.search(r"\*.*\*", response)
736
+
737
+ def strip_changes(self, response):
738
+ """Strips off the changes."""
739
+ return re.sub(r"\*.*\*", "", response)
740
+
741
+
742
+ class KeywordChecker(Instruction):
743
+ """Check the exisitence of certain keywords."""
744
+
745
+ def build_description(self, *, keywords=None):
746
+ """Build the instruction description.
747
+
748
+ Args:
749
+ keywords: A sequence of strings representing the keywords that are
750
+ expected in the response.
751
+
752
+ Returns:
753
+ A string representing the instruction description.
754
+ """
755
+
756
+ if not keywords:
757
+ self._keywords = instructions_util.generate_keywords(
758
+ num_keywords=_NUM_KEYWORDS
759
+ )
760
+ else:
761
+ self._keywords = keywords
762
+ self._keywords = sorted(self._keywords)
763
+
764
+ self._description_pattern = "Include keywords {keywords} in the response."
765
+
766
+ return self._description_pattern.format(keywords=self._keywords)
767
+
768
+ def get_instruction_args(self):
769
+ """Returns the keyword args of `build_description`."""
770
+ return {"keywords": self._keywords}
771
+
772
+ def get_instruction_args_keys(self):
773
+ """Returns the args keys of `build_description`."""
774
+ return ["keywords"]
775
+
776
+ def check_following(self, value):
777
+ """Check if the response contain the expected keywords."""
778
+ for keyword in self._keywords:
779
+ if not re.search(keyword, value, flags=re.IGNORECASE):
780
+ return False
781
+ return True
782
+
783
+
784
+ class KeywordFrequencyChecker(Instruction):
785
+ """Check the keyword frequency."""
786
+
787
+ def build_description(self, *, keyword=None, frequency=None, relation=None):
788
+ """Build the instruction description.
789
+
790
+ Args:
791
+ keyword: A string representing a keyword that is expected in the response.
792
+ frequency: An integer specifying the number of times `keyword` is expected
793
+ to appear in the response.
794
+ relation: A string in (`less than`, `at least`), defining the relational
795
+ operator for comparison.
796
+ Two relational comparisons are supported for now:
797
+ if 'less than', the actual number of occurrences < frequency;
798
+ if 'at least', the actual number of occurrences >= frequency.
799
+
800
+ Returns:
801
+ A string representing the instruction description.
802
+ """
803
+ if not keyword:
804
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
805
+ else:
806
+ self._keyword = keyword.strip()
807
+
808
+ self._frequency = frequency
809
+ if self._frequency is None or self._frequency < 0:
810
+ self._frequency = random.randint(1, _KEYWORD_FREQUENCY)
811
+
812
+ if relation is None:
813
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
814
+ elif relation not in _COMPARISON_RELATION:
815
+ raise ValueError(
816
+ "The supported relation for comparison must be in "
817
+ f"{_COMPARISON_RELATION}, but {relation} is given."
818
+ )
819
+ else:
820
+ self._comparison_relation = relation
821
+
822
+ self._description_pattern = (
823
+ "In your response, the word {keyword} should appear {relation} "
824
+ + "{frequency} times."
825
+ )
826
+
827
+ return self._description_pattern.format(
828
+ keyword=self._keyword,
829
+ relation=self._comparison_relation,
830
+ frequency=self._frequency,
831
+ )
832
+
833
+ def get_instruction_args(self):
834
+ """Returns the keyword args of `build_description`."""
835
+ return {
836
+ "keyword": self._keyword,
837
+ "frequency": self._frequency,
838
+ "relation": self._comparison_relation,
839
+ }
840
+
841
+ def get_instruction_args_keys(self):
842
+ """Returns the args keys of `build_description`."""
843
+ return ["keyword", "frequency", "relation"]
844
+
845
+ def check_following(self, value):
846
+ """Checks if the response contain the keyword with required frequency."""
847
+ actual_occurrences = len(re.findall(self._keyword, value, flags=re.IGNORECASE))
848
+
849
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
850
+ return actual_occurrences < self._frequency
851
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
852
+ return actual_occurrences >= self._frequency
853
+
854
+
855
+ class NumberOfWords(Instruction):
856
+ """Checks the number of words."""
857
+
858
+ def build_description(self, *, num_words=None, relation=None):
859
+ """Build the instruction description.
860
+
861
+ Args:
862
+ num_words: An integer specifying the number of words contained in the
863
+ response.
864
+ relation: A string in (`less than`, `at least`), defining the relational
865
+ operator for comparison.
866
+ Two relational comparisons are supported for now:
867
+ if 'less than', the actual number of words < num_words;
868
+ if 'at least', the actual number of words >= num_words.
869
+
870
+ Returns:
871
+ A string representing the instruction description.
872
+ """
873
+
874
+ self._num_words = num_words
875
+ if self._num_words is None or self._num_words < 0:
876
+ self._num_words = random.randint(
877
+ _NUM_WORDS_LOWER_LIMIT, _NUM_WORDS_UPPER_LIMIT
878
+ )
879
+
880
+ if relation is None:
881
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
882
+ elif relation not in _COMPARISON_RELATION:
883
+ raise ValueError(
884
+ "The supported relation for comparison must be in "
885
+ f"{_COMPARISON_RELATION}, but {relation} is given."
886
+ )
887
+ else:
888
+ self._comparison_relation = relation
889
+
890
+ self._description_pattern = "Answer with {relation} {num_words} words."
891
+
892
+ return self._description_pattern.format(
893
+ relation=self._comparison_relation, num_words=self._num_words
894
+ )
895
+
896
+ def get_instruction_args(self):
897
+ """Returns the keyword args of `build_description`."""
898
+ return {"num_words": self._num_words, "relation": self._comparison_relation}
899
+
900
+ def get_instruction_args_keys(self):
901
+ """Returns the args keys of `build_description`."""
902
+ return ["num_words", "relation"]
903
+
904
+ def check_following(self, value):
905
+ """Checks if the response contains the expected number of words."""
906
+ num_words = instructions_util.count_words(value)
907
+
908
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
909
+ return num_words < self._num_words
910
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
911
+ return num_words >= self._num_words
912
+
913
+
914
+ class JsonFormat(Instruction):
915
+ """Check the Json format."""
916
+
917
+ def build_description(self):
918
+ self._description_pattern = (
919
+ "Entire output should be wrapped in JSON format. You can use markdown"
920
+ " ticks such as ```."
921
+ )
922
+ return self._description_pattern
923
+
924
+ def get_instruction_args(self):
925
+ """Returns the keyword args of `build_description`."""
926
+ return None
927
+
928
+ def get_instruction_args_keys(self):
929
+ """Returns the args keys of `build_description`."""
930
+ return []
931
+
932
+ def check_following(self, value):
933
+ value = (
934
+ value.strip()
935
+ .removeprefix("```json")
936
+ .removeprefix("```Json")
937
+ .removeprefix("```JSON")
938
+ .removeprefix("```")
939
+ .removesuffix("```")
940
+ .strip()
941
+ )
942
+ try:
943
+ json.loads(value)
944
+ except ValueError:
945
+ return False
946
+ return True
947
+
948
+
949
+ class ParagraphFirstWordCheck(Instruction):
950
+ """Check the paragraph and the first word of the nth paragraph."""
951
+
952
+ def build_description(
953
+ self, num_paragraphs=None, nth_paragraph=None, first_word=None
954
+ ):
955
+ r"""Build the instruction description.
956
+
957
+ Args:
958
+ num_paragraphs: An integer indicating the number of paragraphs expected
959
+ in the response. A paragraph is a subset of the string that is
960
+ expected to be separated by '\n\n'.
961
+ nth_paragraph: An integer indicating the paragraph number that we look at.
962
+ Note that n starts from 1.
963
+ first_word: A string that represent the first word of the bth paragraph.
964
+
965
+ Returns:
966
+ A string representing the instruction description.
967
+ """
968
+ self._num_paragraphs = num_paragraphs
969
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
970
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
971
+
972
+ self._nth_paragraph = nth_paragraph
973
+ if (
974
+ self._nth_paragraph is None
975
+ or self._nth_paragraph <= 0
976
+ or self._nth_paragraph > self._num_paragraphs
977
+ ):
978
+ self._nth_paragraph = random.randint(1, self._num_paragraphs + 1)
979
+
980
+ self._first_word = first_word
981
+ if self._first_word is None:
982
+ self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
983
+ self._first_word = self._first_word.lower()
984
+
985
+ self._description_pattern = (
986
+ "There should be {num_paragraphs} paragraphs. "
987
+ + "Paragraphs and only paragraphs are separated with each other by two "
988
+ + "new lines as if it was '\\n\\n' in python. "
989
+ + "Paragraph {nth_paragraph} must start with word {first_word}."
990
+ )
991
+
992
+ return self._description_pattern.format(
993
+ num_paragraphs=self._num_paragraphs,
994
+ nth_paragraph=self._nth_paragraph,
995
+ first_word=self._first_word,
996
+ )
997
+
998
+ def get_instruction_args(self):
999
+ """Returns the keyword args of `build_description`."""
1000
+ return {
1001
+ "num_paragraphs": self._num_paragraphs,
1002
+ "nth_paragraph": self._nth_paragraph,
1003
+ "first_word": self._first_word,
1004
+ }
1005
+
1006
+ def get_instruction_args_keys(self):
1007
+ """Returns the args keys of `build_description`."""
1008
+ return ["num_paragraphs", "nth_paragraph", "first_word"]
1009
+
1010
+ def check_following(self, value):
1011
+ """Checks for required number of paragraphs and correct first word.
1012
+
1013
+ Args:
1014
+ value: a string representing the response. The response may contain
1015
+ paragraphs that are separated by two new lines and the first word of
1016
+ the nth paragraph will have to match a specified word.
1017
+
1018
+ Returns:
1019
+ True if the number of paragraphs is the same as required and the first
1020
+ word of the specified paragraph is the same as required. Otherwise, false.
1021
+ """
1022
+
1023
+ paragraphs = re.split(r"\n\n", value)
1024
+ num_paragraphs = len(paragraphs)
1025
+
1026
+ for paragraph in paragraphs:
1027
+ if not paragraph.strip():
1028
+ num_paragraphs -= 1
1029
+
1030
+ # check that index doesn't go out of bounds
1031
+ if self._nth_paragraph <= num_paragraphs:
1032
+ paragraph = paragraphs[self._nth_paragraph - 1].strip()
1033
+ if not paragraph:
1034
+ return False
1035
+ else:
1036
+ return False
1037
+
1038
+ first_word = ""
1039
+ punctuation = {".", ",", "?", "!", "'", '"'}
1040
+
1041
+ # get first word and remove punctuation
1042
+ word = paragraph.split()[0].strip()
1043
+ # TODO(jeffrey): make more complex?
1044
+ word = word.lstrip("'")
1045
+ word = word.lstrip('"')
1046
+
1047
+ for letter in word:
1048
+ if letter in punctuation:
1049
+ break
1050
+ first_word += letter.lower()
1051
+
1052
+ return num_paragraphs == self._num_paragraphs and first_word == self._first_word
1053
+
1054
+
1055
+ # TODO(jeffrey) add relation - at least/at most?
1056
+ class KeySentenceChecker(Instruction):
1057
+ """Check the existence of certain key sentences."""
1058
+
1059
+ def build_description(self, key_sentences=None, num_sentences=None):
1060
+ """Build the instruction description.
1061
+
1062
+ Args:
1063
+ key_sentences: A sequences of strings representing the key sentences that
1064
+ are expected in the response.
1065
+ num_sentences: The number of key sentences that are expected to be seen in
1066
+ the response.
1067
+
1068
+ Returns:
1069
+ A string representing the instruction description.
1070
+ """
1071
+
1072
+ if not key_sentences:
1073
+ # TODO(jeffrey) make a generate sentences function? wonderwords package
1074
+ self._key_sentences = set(["For now, this is fine."])
1075
+ else:
1076
+ self._key_sentences = key_sentences
1077
+
1078
+ if not num_sentences:
1079
+ self._num_sentences = random.randint(1, len(self._key_sentences))
1080
+ else:
1081
+ self._num_sentences = num_sentences
1082
+
1083
+ self._description_pattern = (
1084
+ "Include {num_sentences} of the following sentences {key_sentences}"
1085
+ )
1086
+
1087
+ return self._description_pattern.format(
1088
+ num_sentences=self._num_sentences, key_sentences=self._key_sentences
1089
+ )
1090
+
1091
+ def get_instruction_args(self):
1092
+ """Returns the keyword args of `build_description`."""
1093
+ return {
1094
+ "num_sentences": self._num_sentences,
1095
+ "key_sentences": list(self._key_sentences),
1096
+ }
1097
+
1098
+ def get_instruction_args_keys(self):
1099
+ """Returns the args keys of `build_description`."""
1100
+ return ["num_sentences", "key_sentences"]
1101
+
1102
+ def check_following(self, value):
1103
+ """Checks if the response contains the expected key sentences."""
1104
+ count = 0
1105
+ sentences = instructions_util.split_into_sentences(value)
1106
+ for sentence in self._key_sentences:
1107
+ if sentence in sentences:
1108
+ count += 1
1109
+
1110
+ return count == self._num_sentences
1111
+
1112
+
1113
+ class ForbiddenWords(Instruction):
1114
+ """Checks that specified words are not used in response."""
1115
+
1116
+ def build_description(self, forbidden_words=None):
1117
+ """Build the instruction description.
1118
+
1119
+ Args:
1120
+ forbidden_words: A sequences of strings representing words that are not
1121
+ allowed in the response.
1122
+
1123
+ Returns:
1124
+ A string representing the instruction description.
1125
+ """
1126
+
1127
+ if not forbidden_words:
1128
+ self._forbidden_words = instructions_util.generate_keywords(
1129
+ num_keywords=_NUM_KEYWORDS
1130
+ )
1131
+ else:
1132
+ self._forbidden_words = list(set(forbidden_words))
1133
+ self._forbidden_words = sorted(self._forbidden_words)
1134
+ self._description_pattern = (
1135
+ "Do not include keywords {forbidden_words} in the response."
1136
+ )
1137
+
1138
+ return self._description_pattern.format(forbidden_words=self._forbidden_words)
1139
+
1140
+ def get_instruction_args(self):
1141
+ """Returns the keyword args of `build_description`."""
1142
+ return {"forbidden_words": self._forbidden_words}
1143
+
1144
+ def get_instruction_args_keys(self):
1145
+ """Returns the args keys of `build_description`."""
1146
+ return ["forbidden_words"]
1147
+
1148
+ def check_following(self, value):
1149
+ """Check if the response does not contain the expected keywords."""
1150
+ for word in self._forbidden_words:
1151
+ if re.search(r"\b" + word + r"\b", value, flags=re.IGNORECASE):
1152
+ return False
1153
+ return True
1154
+
1155
+
1156
+ class RephraseParagraph(Instruction):
1157
+ """Checks that the paragraph is rephrased."""
1158
+
1159
+ def build_description(self, *, original_paragraph, low, high):
1160
+ """Builds the instruction description.
1161
+
1162
+ Args:
1163
+ original_paragraph: A string presenting the original paragraph. The
1164
+ rephrases response should have betweeb low-high words in common.
1165
+ low: An integer presenting the lower bound of similar words.
1166
+ high: An integer representing the upper bound of similar words.
1167
+
1168
+ Returns:
1169
+ A string representing the instruction description.
1170
+ """
1171
+ # TODO(jeffrey) make more encompassing
1172
+ self._original_paragraph = original_paragraph
1173
+ self._low = low
1174
+ self._high = high
1175
+
1176
+ self._description = (
1177
+ "Rephrase the following paragraph: "
1178
+ + "{original_paragraph}\nYour response should have "
1179
+ + "between {low} and {high} of the same words. "
1180
+ + "Words are the same if and only if all of the "
1181
+ + "letters, ignoring cases, are the same. For "
1182
+ + "example, 'run' is the same as 'Run' but different "
1183
+ + "to 'ran'."
1184
+ )
1185
+
1186
+ return self._description.format(
1187
+ original_paragraph=original_paragraph, low=self._low, high=self._high
1188
+ )
1189
+
1190
+ def get_instruction_args(self):
1191
+ """Returns the keyword args of `build_description`."""
1192
+ return {
1193
+ "original_paragraph": self._original_paragraph,
1194
+ "low": self._low,
1195
+ "high": self._high,
1196
+ }
1197
+
1198
+ def get_instruction_args_keys(self):
1199
+ """Returns the args keys of `build_description`."""
1200
+ return ["original_paragraph", "low", "high"]
1201
+
1202
+ def check_following(self, value):
1203
+ val_words = re.findall(r"\w+", value.lower())
1204
+ original_words = re.findall(r"\w+", self._original_paragraph.lower())
1205
+ similar_words = 0
1206
+
1207
+ dict_val = collections.Counter(val_words)
1208
+ dict_original = collections.Counter(original_words)
1209
+
1210
+ for word in dict_original:
1211
+ similar_words += min(dict_original[word], dict_val[word])
1212
+
1213
+ return similar_words >= self._low and similar_words <= self._high
1214
+
1215
+
1216
+ class TwoResponsesChecker(Instruction):
1217
+ """Check that two responses were given."""
1218
+
1219
+ def build_description(self):
1220
+ """Build the instruction description."""
1221
+ self._description_pattern = (
1222
+ "Give two different responses. Responses and only responses should"
1223
+ " be separated by 6 asterisk symbols: ******."
1224
+ )
1225
+ return self._description_pattern
1226
+
1227
+ def get_instruction_args(self):
1228
+ """Returns the keyword args of `build_description`."""
1229
+ return None
1230
+
1231
+ def get_instruction_args_keys(self):
1232
+ """Returns the args keys of `build_description`."""
1233
+ return []
1234
+
1235
+ def check_following(self, value):
1236
+ """Checks if the response has two different answers.
1237
+
1238
+ Args:
1239
+ value: A string representing the response.
1240
+
1241
+ Returns:
1242
+ True if two responses are detected and false otherwise.
1243
+ """
1244
+ valid_responses = list()
1245
+ responses = value.split("******")
1246
+ for index, response in enumerate(responses):
1247
+ if not response.strip():
1248
+ if index != 0 and index != len(responses) - 1:
1249
+ return False
1250
+ else:
1251
+ valid_responses.append(response)
1252
+ return (
1253
+ len(valid_responses) == 2
1254
+ and valid_responses[0].strip() != valid_responses[1].strip()
1255
+ )
1256
+
1257
+
1258
+ class RepeatPromptThenAnswer(Instruction):
1259
+ """Checks that Prompt is first repeated then answered."""
1260
+
1261
+ def build_description(self, *, prompt_to_repeat=None):
1262
+ """Build the instruction description.
1263
+
1264
+ Args:
1265
+ prompt_to_repeat: The prompt that is meant to be repeated.
1266
+
1267
+ Returns:
1268
+ A string representing the instruction description.
1269
+ """
1270
+ if not prompt_to_repeat:
1271
+ raise ValueError("prompt_to_repeat must be set.")
1272
+ else:
1273
+ self._prompt_to_repeat = prompt_to_repeat
1274
+ self._description_pattern = (
1275
+ "First repeat the request word for word without change,"
1276
+ " then give your answer (1. do not say any words or characters"
1277
+ " before repeating the request; 2. the request you need to repeat"
1278
+ " does not include this sentence)"
1279
+ )
1280
+ return self._description_pattern
1281
+
1282
+ def get_instruction_args(self):
1283
+ return {"prompt_to_repeat": self._prompt_to_repeat}
1284
+
1285
+ def get_instruction_args_keys(self):
1286
+ """Returns the args keys of `build_description`."""
1287
+ return ["prompt_to_repeat"]
1288
+
1289
+ def check_following(self, value):
1290
+ if value.strip().lower().startswith(self._prompt_to_repeat.strip().lower()):
1291
+ return True
1292
+ return False
1293
+
1294
+
1295
+ class EndChecker(Instruction):
1296
+ """Checks that the prompt ends with a given phrase."""
1297
+
1298
+ def build_description(self, *, end_phrase=None):
1299
+ """Build the instruction description.
1300
+
1301
+ Args:
1302
+ end_phrase: A string representing the phrase the response should end with.
1303
+
1304
+ Returns:
1305
+ A string representing the instruction description.
1306
+ """
1307
+ self._end_phrase = (
1308
+ end_phrase.strip() if isinstance(end_phrase, str) else end_phrase
1309
+ )
1310
+ if self._end_phrase is None:
1311
+ self._end_phrase = random.choice(_ENDING_OPTIONS)
1312
+ self._description_pattern = (
1313
+ "Finish your response with this exact phrase {ender}. "
1314
+ "No other words should follow this phrase."
1315
+ )
1316
+ return self._description_pattern.format(ender=self._end_phrase)
1317
+
1318
+ def get_instruction_args(self):
1319
+ return {"end_phrase": self._end_phrase}
1320
+
1321
+ def get_instruction_args_keys(self):
1322
+ """Returns the args keys of `build_description`."""
1323
+ return ["end_phrase"]
1324
+
1325
+ def check_following(self, value):
1326
+ """Checks if the response ends with the expected phrase."""
1327
+ value = value.strip().strip('"').lower()
1328
+ self._end_phrase = self._end_phrase.strip().lower()
1329
+ return value.endswith(self._end_phrase)
1330
+
1331
+
1332
+ class TitleChecker(Instruction):
1333
+ """Checks the response for a title."""
1334
+
1335
+ def build_description(self):
1336
+ """Build the instruction description."""
1337
+ self._description_pattern = (
1338
+ "Your answer must contain a title, wrapped in double angular brackets,"
1339
+ " such as <<poem of joy>>."
1340
+ )
1341
+ return self._description_pattern
1342
+
1343
+ def get_instruction_args(self):
1344
+ return None
1345
+
1346
+ def get_instruction_args_keys(self):
1347
+ """Returns the args keys of `build_description`."""
1348
+ return []
1349
+
1350
+ def check_following(self, value):
1351
+ """Checks if the response contains a title."""
1352
+ pattern = r"<<[^\n]+>>"
1353
+ re_pattern = re.compile(pattern)
1354
+ titles = re.findall(re_pattern, value)
1355
+
1356
+ for title in titles:
1357
+ if title.lstrip("<").rstrip(">").strip():
1358
+ return True
1359
+ return False
1360
+
1361
+
1362
+ class LetterFrequencyChecker(Instruction):
1363
+ """Checks letter frequency."""
1364
+
1365
+ def build_description(self, *, letter=None, let_frequency=None, let_relation=None):
1366
+ """Build the instruction description.
1367
+
1368
+ Args:
1369
+ letter: A string representing a letter that is expected in the response.
1370
+ let_frequency: An integer specifying the number of times `keyword` is
1371
+ expected to appear in the response.
1372
+ let_relation: A string in (`less than`, `at least`), defining the
1373
+ relational operator for comparison. Two relational comparisons are
1374
+ supported for now; if 'less than', the actual number of
1375
+ occurrences < frequency; if 'at least', the actual number of
1376
+ occurrences >= frequency.
1377
+
1378
+ Returns:
1379
+ A string representing the instruction description.
1380
+ """
1381
+ if (
1382
+ not letter
1383
+ or len(letter) > 1
1384
+ or ord(letter.lower()) < 97
1385
+ or ord(letter.lower()) > 122
1386
+ ):
1387
+ self._letter = random.choice(list(string.ascii_letters))
1388
+ else:
1389
+ self._letter = letter.strip()
1390
+ self._letter = self._letter.lower()
1391
+
1392
+ self._frequency = let_frequency
1393
+ if self._frequency is None or self._frequency < 0:
1394
+ self._frequency = random.randint(1, _LETTER_FREQUENCY)
1395
+
1396
+ if let_relation is None:
1397
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
1398
+ elif let_relation not in _COMPARISON_RELATION:
1399
+ raise ValueError(
1400
+ "The supported relation for comparison must be in "
1401
+ f"{_COMPARISON_RELATION}, but {let_relation} is given."
1402
+ )
1403
+ else:
1404
+ self._comparison_relation = let_relation
1405
+
1406
+ self._description_pattern = (
1407
+ "In your response, the letter {letter} should appear {let_relation}"
1408
+ " {let_frequency} times."
1409
+ )
1410
+
1411
+ return self._description_pattern.format(
1412
+ letter=self._letter,
1413
+ let_frequency=self._frequency,
1414
+ let_relation=self._comparison_relation,
1415
+ )
1416
+
1417
+ def get_instruction_args(self):
1418
+ """Returns the keyword args of build description."""
1419
+ return {
1420
+ "letter": self._letter,
1421
+ "let_frequency": self._frequency,
1422
+ "let_relation": self._comparison_relation,
1423
+ }
1424
+
1425
+ def get_instruction_args_keys(self):
1426
+ """Returns the args keys of `build_description`."""
1427
+ return ["letter", "let_frequency", "let_relation"]
1428
+
1429
+ def check_following(self, value):
1430
+ """Checks that the response contains the letter at the right frequency."""
1431
+ value = value.lower()
1432
+ letters = collections.Counter(value)
1433
+
1434
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
1435
+ return letters[self._letter] < self._frequency
1436
+ else:
1437
+ return letters[self._letter] >= self._frequency
1438
+
1439
+
1440
+ class CapitalLettersEnglishChecker(Instruction):
1441
+ """Checks that the response is in english and is in all capital letters."""
1442
+
1443
+ def build_description(self):
1444
+ """Build the instruction description."""
1445
+ self._description_pattern = (
1446
+ "Your entire response should be in English, and in all capital letters."
1447
+ )
1448
+ return self._description_pattern
1449
+
1450
+ def get_instruction_args(self):
1451
+ return None
1452
+
1453
+ def get_instruction_args_keys(self):
1454
+ """Returns the args keys of `build_description`."""
1455
+ return []
1456
+
1457
+ def check_following(self, value):
1458
+ """Checks that the response is in English and in all capital letters."""
1459
+ assert isinstance(value, str)
1460
+
1461
+ try:
1462
+ return value.isupper() and langdetect.detect(value) == "en"
1463
+ except langdetect.LangDetectException as e:
1464
+ # Count as instruction is followed.
1465
+ logging.error(
1466
+ "Unable to detect language for text %s due to %s", value, e
1467
+ ) # refex: disable=pytotw.037
1468
+ return True
1469
+
1470
+
1471
+ class LowercaseLettersEnglishChecker(Instruction):
1472
+ """Checks that the response is in english and is in all lowercase letters."""
1473
+
1474
+ def build_description(self):
1475
+ """Build the instruction description."""
1476
+ self._description_pattern = (
1477
+ "Your entire response should be in English, and in all lowercase"
1478
+ " letters. No capital letters are allowed."
1479
+ )
1480
+ return self._description_pattern
1481
+
1482
+ def get_instruction_args(self):
1483
+ return None
1484
+
1485
+ def get_instruction_args_keys(self):
1486
+ """Returns the args keys of `build_description`."""
1487
+ return []
1488
+
1489
+ def check_following(self, value):
1490
+ """Checks that the response is in English and in all lowercase letters."""
1491
+ assert isinstance(value, str)
1492
+
1493
+ try:
1494
+ return value.islower() and langdetect.detect(value) == "en"
1495
+ except langdetect.LangDetectException as e:
1496
+ # Count as instruction is followed.
1497
+ logging.error(
1498
+ "Unable to detect language for text %s due to %s", value, e
1499
+ ) # refex: disable=pytotw.037
1500
+ return True
1501
+
1502
+
1503
+ class CommaChecker(Instruction):
1504
+ """Checks the response for no commas."""
1505
+
1506
+ def build_description(self):
1507
+ """Build the instruction description."""
1508
+ self._description_pattern = (
1509
+ "In your entire response, refrain from the use of any commas."
1510
+ )
1511
+ return self._description_pattern
1512
+
1513
+ def get_instruction_args(self):
1514
+ return None
1515
+
1516
+ def get_instruction_args_keys(self):
1517
+ """Returns the args keys of `build_description`."""
1518
+ return []
1519
+
1520
+ def check_following(self, value):
1521
+ """Checks that the response does not contain commas."""
1522
+ return not re.search(r"\,", value)
1523
+
1524
+
1525
+ class CapitalWordFrequencyChecker(Instruction):
1526
+ """Checks frequency of words with all capital letters."""
1527
+
1528
+ def build_description(
1529
+ self,
1530
+ capital_frequency=None,
1531
+ capital_relation=None,
1532
+ ):
1533
+ """Build the instruction description.
1534
+
1535
+ Args:
1536
+ capital_frequency: An integer that represents the number of words that
1537
+ should be in all capital letters.
1538
+ capital_relation: A string that is 'at least' or 'at most' that refers to
1539
+ the frequency.
1540
+
1541
+ Returns:
1542
+ A string representing the instruction description.
1543
+ """
1544
+ self._frequency = capital_frequency
1545
+ if self._frequency is None:
1546
+ self._frequency = random.randint(1, _ALL_CAPITAL_WORD_FREQUENCY)
1547
+
1548
+ self._comparison_relation = capital_relation
1549
+ if capital_relation is None:
1550
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
1551
+ elif capital_relation not in _COMPARISON_RELATION:
1552
+ raise ValueError(
1553
+ "The supported relation for comparison must be in "
1554
+ f"{_COMPARISON_RELATION}, but {capital_relation} is given."
1555
+ )
1556
+
1557
+ self._description_pattern = (
1558
+ "In your response, words with all capital letters should appear"
1559
+ " {relation} {frequency} times."
1560
+ )
1561
+
1562
+ return self._description_pattern.format(
1563
+ frequency=self._frequency, relation=self._comparison_relation
1564
+ )
1565
+
1566
+ def get_instruction_args(self):
1567
+ """Returns the keyword args of build description."""
1568
+ return {
1569
+ "capital_frequency": self._frequency,
1570
+ "capital_relation": self._comparison_relation,
1571
+ }
1572
+
1573
+ def get_instruction_args_keys(self):
1574
+ """Returns the args keys of `build_description`."""
1575
+ return ["capital_frequency", "capital_relation"]
1576
+
1577
+ def check_following(self, value):
1578
+ """Checks the frequency of words with all capital letters."""
1579
+ # Hyphenated words will count as one word
1580
+ words = instructions_util.nltk.word_tokenize(value)
1581
+ capital_words = [word for word in words if word.isupper()]
1582
+
1583
+ capital_words = len(capital_words)
1584
+
1585
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
1586
+ return capital_words < self._frequency
1587
+ else:
1588
+ return capital_words >= self._frequency
1589
+
1590
+
1591
+ class QuotationChecker(Instruction):
1592
+ """Checks response is wrapped with double quotation marks."""
1593
+
1594
+ def build_description(self):
1595
+ """Build the instruction description."""
1596
+ self._description_pattern = (
1597
+ "Wrap your entire response with double quotation marks."
1598
+ )
1599
+ return self._description_pattern
1600
+
1601
+ def get_instruction_args(self):
1602
+ """Returns the keyword args of build description."""
1603
+ return None
1604
+
1605
+ def get_instruction_args_keys(self):
1606
+ """Returns the args keys of `build_description`."""
1607
+ return []
1608
+
1609
+ def check_following(self, value):
1610
+ """Checks if the response is wrapped with double quotation marks."""
1611
+ value = value.strip()
1612
+ return len(value) > 1 and value[0] == '"' and value[-1] == '"'
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions_registry.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Registry of all instructions."""
16
+
17
+ from lm_eval.tasks.ifeval import instructions
18
+
19
+
20
+ _KEYWORD = "keywords:"
21
+
22
+ _LANGUAGE = "language:"
23
+
24
+ _LENGTH = "length_constraints:"
25
+
26
+ _CONTENT = "detectable_content:"
27
+
28
+ _FORMAT = "detectable_format:"
29
+
30
+ _MULTITURN = "multi-turn:"
31
+
32
+ _COMBINATION = "combination:"
33
+
34
+ _STARTEND = "startend:"
35
+
36
+ _CHANGE_CASES = "change_case:"
37
+
38
+ _PUNCTUATION = "punctuation:"
39
+
40
+ INSTRUCTION_DICT = {
41
+ _KEYWORD + "existence": instructions.KeywordChecker,
42
+ _KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
43
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
44
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
45
+ _KEYWORD + "forbidden_words": instructions.ForbiddenWords,
46
+ _KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
47
+ _LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
48
+ _LENGTH + "number_sentences": instructions.NumberOfSentences,
49
+ _LENGTH + "number_paragraphs": instructions.ParagraphChecker,
50
+ _LENGTH + "number_words": instructions.NumberOfWords,
51
+ _LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
52
+ _CONTENT + "number_placeholders": instructions.PlaceholderChecker,
53
+ _CONTENT + "postscript": instructions.PostscriptChecker,
54
+ _FORMAT + "number_bullet_lists": instructions.BulletListChecker,
55
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
56
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
57
+ _FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
58
+ _FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
59
+ _FORMAT + "multiple_sections": instructions.SectionChecker,
60
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
61
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
62
+ _FORMAT + "json_format": instructions.JsonFormat,
63
+ _FORMAT + "title": instructions.TitleChecker,
64
+ # TODO(tianjianlu): Re-enable with specific prompts.
65
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
66
+ _COMBINATION + "two_responses": instructions.TwoResponsesChecker,
67
+ _COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
68
+ _STARTEND + "end_checker": instructions.EndChecker,
69
+ _CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
70
+ _CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
71
+ _CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
72
+ _PUNCTUATION + "no_comma": instructions.CommaChecker,
73
+ _STARTEND + "quotation": instructions.QuotationChecker,
74
+ }
75
+
76
+ INSTRUCTION_CONFLICTS = {
77
+ _KEYWORD + "existence": {_KEYWORD + "existence"},
78
+ _KEYWORD + "frequency": {_KEYWORD + "frequency"},
79
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
80
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
81
+ _KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
82
+ _KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
83
+ _LANGUAGE + "response_language": {
84
+ _LANGUAGE + "response_language",
85
+ _FORMAT + "multiple_sections",
86
+ _KEYWORD + "existence",
87
+ _KEYWORD + "frequency",
88
+ _KEYWORD + "forbidden_words",
89
+ _STARTEND + "end_checker",
90
+ _CHANGE_CASES + "english_capital",
91
+ _CHANGE_CASES + "english_lowercase",
92
+ },
93
+ _LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
94
+ _LENGTH + "number_paragraphs": {
95
+ _LENGTH + "number_paragraphs",
96
+ _LENGTH + "nth_paragraph_first_word",
97
+ _LENGTH + "number_sentences",
98
+ _LENGTH + "nth_paragraph_first_word",
99
+ },
100
+ _LENGTH + "number_words": {_LENGTH + "number_words"},
101
+ _LENGTH + "nth_paragraph_first_word": {
102
+ _LENGTH + "nth_paragraph_first_word",
103
+ _LENGTH + "number_paragraphs",
104
+ },
105
+ _CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
106
+ _CONTENT + "postscript": {_CONTENT + "postscript"},
107
+ _FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
108
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
109
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
110
+ _FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
111
+ _FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
112
+ _FORMAT + "multiple_sections": {
113
+ _FORMAT + "multiple_sections",
114
+ _LANGUAGE + "response_language",
115
+ _FORMAT + "number_highlighted_sections",
116
+ },
117
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
118
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
119
+ _FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
120
+ {_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
121
+ ),
122
+ _FORMAT + "title": {_FORMAT + "title"},
123
+ # TODO(tianjianlu): Re-enable with specific prompts.
124
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
125
+ _COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
126
+ {
127
+ _KEYWORD + "forbidden_words",
128
+ _KEYWORD + "existence",
129
+ _LANGUAGE + "response_language",
130
+ _FORMAT + "title",
131
+ _PUNCTUATION + "no_comma",
132
+ }
133
+ ),
134
+ _COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
135
+ {_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
136
+ ),
137
+ _STARTEND + "end_checker": {_STARTEND + "end_checker"},
138
+ _CHANGE_CASES + "capital_word_frequency": {
139
+ _CHANGE_CASES + "capital_word_frequency",
140
+ _CHANGE_CASES + "english_lowercase",
141
+ _CHANGE_CASES + "english_capital",
142
+ },
143
+ _CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
144
+ _CHANGE_CASES + "english_lowercase": {
145
+ _CHANGE_CASES + "english_lowercase",
146
+ _CHANGE_CASES + "english_capital",
147
+ },
148
+ _PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
149
+ _STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
150
+ }
151
+
152
+
153
+ def conflict_make(conflicts):
154
+ """Makes sure if A conflicts with B, B will conflict with A.
155
+
156
+ Args:
157
+ conflicts: Dictionary of potential conflicts where key is instruction id
158
+ and value is set of instruction ids that it conflicts with.
159
+
160
+ Returns:
161
+ Revised version of the dictionary. All instructions conflict with
162
+ themselves. If A conflicts with B, B will conflict with A.
163
+ """
164
+ for key in conflicts:
165
+ for k in conflicts[key]:
166
+ conflicts[k].add(key)
167
+ conflicts[key].add(key)
168
+ return conflicts
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/instructions_util.py ADDED
@@ -0,0 +1,1701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Utility library of instructions."""
16
+
17
+ import functools
18
+ import os
19
+ import random
20
+ import re
21
+ from importlib.metadata import version
22
+
23
+ import immutabledict
24
+ import nltk
25
+ from packaging.version import parse as parse_version
26
+
27
+
28
+ # Downloading 'punkt' with nltk<3.9 has a remote code vuln.
29
+ # see https://github.com/EleutherAI/lm-evaluation-harness/issues/2210
30
+ # and https://github.com/nltk/nltk/issues/3266
31
+ # for more information.
32
+ NLTK_MIN_VERSION = "3.9.1"
33
+ RANK = os.environ.get("LOCAL_RANK", "0")
34
+
35
+
36
+ def download_nltk_resources():
37
+ """Download 'punkt' if not already installed"""
38
+ assert (nltk_version := parse_version(version("nltk"))) >= parse_version(
39
+ NLTK_MIN_VERSION
40
+ ), (
41
+ f"`nltk` version {nltk_version} is not >= {NLTK_MIN_VERSION}. Please update `nltk` before proceeding--older versions are vulnerable to a remote code execution vulnerability."
42
+ )
43
+
44
+ try:
45
+ nltk.data.find("tokenizers/punkt_tab")
46
+ except LookupError:
47
+ if RANK == "0":
48
+ nltk.download("punkt_tab")
49
+ print("Downloaded punkt_tab on rank 0")
50
+
51
+
52
+ download_nltk_resources()
53
+
54
+ WORD_LIST = [
55
+ "western",
56
+ "sentence",
57
+ "signal",
58
+ "dump",
59
+ "spot",
60
+ "opposite",
61
+ "bottom",
62
+ "potato",
63
+ "administration",
64
+ "working",
65
+ "welcome",
66
+ "morning",
67
+ "good",
68
+ "agency",
69
+ "primary",
70
+ "wish",
71
+ "responsibility",
72
+ "press",
73
+ "problem",
74
+ "president",
75
+ "steal",
76
+ "brush",
77
+ "read",
78
+ "type",
79
+ "beat",
80
+ "trainer",
81
+ "growth",
82
+ "lock",
83
+ "bone",
84
+ "case",
85
+ "equal",
86
+ "comfortable",
87
+ "region",
88
+ "replacement",
89
+ "performance",
90
+ "mate",
91
+ "walk",
92
+ "medicine",
93
+ "film",
94
+ "thing",
95
+ "rock",
96
+ "tap",
97
+ "total",
98
+ "competition",
99
+ "ease",
100
+ "south",
101
+ "establishment",
102
+ "gather",
103
+ "parking",
104
+ "world",
105
+ "plenty",
106
+ "breath",
107
+ "claim",
108
+ "alcohol",
109
+ "trade",
110
+ "dear",
111
+ "highlight",
112
+ "street",
113
+ "matter",
114
+ "decision",
115
+ "mess",
116
+ "agreement",
117
+ "studio",
118
+ "coach",
119
+ "assist",
120
+ "brain",
121
+ "wing",
122
+ "style",
123
+ "private",
124
+ "top",
125
+ "brown",
126
+ "leg",
127
+ "buy",
128
+ "procedure",
129
+ "method",
130
+ "speed",
131
+ "high",
132
+ "company",
133
+ "valuable",
134
+ "pie",
135
+ "analyst",
136
+ "session",
137
+ "pattern",
138
+ "district",
139
+ "pleasure",
140
+ "dinner",
141
+ "swimming",
142
+ "joke",
143
+ "order",
144
+ "plate",
145
+ "department",
146
+ "motor",
147
+ "cell",
148
+ "spend",
149
+ "cabinet",
150
+ "difference",
151
+ "power",
152
+ "examination",
153
+ "engine",
154
+ "horse",
155
+ "dimension",
156
+ "pay",
157
+ "toe",
158
+ "curve",
159
+ "literature",
160
+ "bother",
161
+ "fire",
162
+ "possibility",
163
+ "debate",
164
+ "activity",
165
+ "passage",
166
+ "hello",
167
+ "cycle",
168
+ "background",
169
+ "quiet",
170
+ "author",
171
+ "effect",
172
+ "actor",
173
+ "page",
174
+ "bicycle",
175
+ "error",
176
+ "throat",
177
+ "attack",
178
+ "character",
179
+ "phone",
180
+ "tea",
181
+ "increase",
182
+ "outcome",
183
+ "file",
184
+ "specific",
185
+ "inspector",
186
+ "internal",
187
+ "potential",
188
+ "staff",
189
+ "building",
190
+ "employer",
191
+ "shoe",
192
+ "hand",
193
+ "direction",
194
+ "garden",
195
+ "purchase",
196
+ "interview",
197
+ "study",
198
+ "recognition",
199
+ "member",
200
+ "spiritual",
201
+ "oven",
202
+ "sandwich",
203
+ "weird",
204
+ "passenger",
205
+ "particular",
206
+ "response",
207
+ "reaction",
208
+ "size",
209
+ "variation",
210
+ "a",
211
+ "cancel",
212
+ "candy",
213
+ "exit",
214
+ "guest",
215
+ "condition",
216
+ "fly",
217
+ "price",
218
+ "weakness",
219
+ "convert",
220
+ "hotel",
221
+ "great",
222
+ "mouth",
223
+ "mind",
224
+ "song",
225
+ "sugar",
226
+ "suspect",
227
+ "telephone",
228
+ "ear",
229
+ "roof",
230
+ "paint",
231
+ "refrigerator",
232
+ "organization",
233
+ "jury",
234
+ "reward",
235
+ "engineering",
236
+ "day",
237
+ "possession",
238
+ "crew",
239
+ "bar",
240
+ "road",
241
+ "description",
242
+ "celebration",
243
+ "score",
244
+ "mark",
245
+ "letter",
246
+ "shower",
247
+ "suggestion",
248
+ "sir",
249
+ "luck",
250
+ "national",
251
+ "progress",
252
+ "hall",
253
+ "stroke",
254
+ "theory",
255
+ "offer",
256
+ "story",
257
+ "tax",
258
+ "definition",
259
+ "history",
260
+ "ride",
261
+ "medium",
262
+ "opening",
263
+ "glass",
264
+ "elevator",
265
+ "stomach",
266
+ "question",
267
+ "ability",
268
+ "leading",
269
+ "village",
270
+ "computer",
271
+ "city",
272
+ "grand",
273
+ "confidence",
274
+ "candle",
275
+ "priest",
276
+ "recommendation",
277
+ "point",
278
+ "necessary",
279
+ "body",
280
+ "desk",
281
+ "secret",
282
+ "horror",
283
+ "noise",
284
+ "culture",
285
+ "warning",
286
+ "water",
287
+ "round",
288
+ "diet",
289
+ "flower",
290
+ "bus",
291
+ "tough",
292
+ "permission",
293
+ "week",
294
+ "prompt",
295
+ "connection",
296
+ "abuse",
297
+ "height",
298
+ "save",
299
+ "corner",
300
+ "border",
301
+ "stress",
302
+ "drive",
303
+ "stop",
304
+ "rip",
305
+ "meal",
306
+ "listen",
307
+ "confusion",
308
+ "girlfriend",
309
+ "living",
310
+ "relation",
311
+ "significance",
312
+ "plan",
313
+ "creative",
314
+ "atmosphere",
315
+ "blame",
316
+ "invite",
317
+ "housing",
318
+ "paper",
319
+ "drink",
320
+ "roll",
321
+ "silver",
322
+ "drunk",
323
+ "age",
324
+ "damage",
325
+ "smoke",
326
+ "environment",
327
+ "pack",
328
+ "savings",
329
+ "influence",
330
+ "tourist",
331
+ "rain",
332
+ "post",
333
+ "sign",
334
+ "grandmother",
335
+ "run",
336
+ "profit",
337
+ "push",
338
+ "clerk",
339
+ "final",
340
+ "wine",
341
+ "swim",
342
+ "pause",
343
+ "stuff",
344
+ "singer",
345
+ "funeral",
346
+ "average",
347
+ "source",
348
+ "scene",
349
+ "tradition",
350
+ "personal",
351
+ "snow",
352
+ "nobody",
353
+ "distance",
354
+ "sort",
355
+ "sensitive",
356
+ "animal",
357
+ "major",
358
+ "negotiation",
359
+ "click",
360
+ "mood",
361
+ "period",
362
+ "arrival",
363
+ "expression",
364
+ "holiday",
365
+ "repeat",
366
+ "dust",
367
+ "closet",
368
+ "gold",
369
+ "bad",
370
+ "sail",
371
+ "combination",
372
+ "clothes",
373
+ "emphasis",
374
+ "duty",
375
+ "black",
376
+ "step",
377
+ "school",
378
+ "jump",
379
+ "document",
380
+ "professional",
381
+ "lip",
382
+ "chemical",
383
+ "front",
384
+ "wake",
385
+ "while",
386
+ "inside",
387
+ "watch",
388
+ "row",
389
+ "subject",
390
+ "penalty",
391
+ "balance",
392
+ "possible",
393
+ "adult",
394
+ "aside",
395
+ "sample",
396
+ "appeal",
397
+ "wedding",
398
+ "depth",
399
+ "king",
400
+ "award",
401
+ "wife",
402
+ "blow",
403
+ "site",
404
+ "camp",
405
+ "music",
406
+ "safe",
407
+ "gift",
408
+ "fault",
409
+ "guess",
410
+ "act",
411
+ "shame",
412
+ "drama",
413
+ "capital",
414
+ "exam",
415
+ "stupid",
416
+ "record",
417
+ "sound",
418
+ "swing",
419
+ "novel",
420
+ "minimum",
421
+ "ratio",
422
+ "machine",
423
+ "shape",
424
+ "lead",
425
+ "operation",
426
+ "salary",
427
+ "cloud",
428
+ "affair",
429
+ "hit",
430
+ "chapter",
431
+ "stage",
432
+ "quantity",
433
+ "access",
434
+ "army",
435
+ "chain",
436
+ "traffic",
437
+ "kick",
438
+ "analysis",
439
+ "airport",
440
+ "time",
441
+ "vacation",
442
+ "philosophy",
443
+ "ball",
444
+ "chest",
445
+ "thanks",
446
+ "place",
447
+ "mountain",
448
+ "advertising",
449
+ "red",
450
+ "past",
451
+ "rent",
452
+ "return",
453
+ "tour",
454
+ "house",
455
+ "construction",
456
+ "net",
457
+ "native",
458
+ "war",
459
+ "figure",
460
+ "fee",
461
+ "spray",
462
+ "user",
463
+ "dirt",
464
+ "shot",
465
+ "task",
466
+ "stick",
467
+ "friend",
468
+ "software",
469
+ "promotion",
470
+ "interaction",
471
+ "surround",
472
+ "block",
473
+ "purpose",
474
+ "practice",
475
+ "conflict",
476
+ "routine",
477
+ "requirement",
478
+ "bonus",
479
+ "hole",
480
+ "state",
481
+ "junior",
482
+ "sweet",
483
+ "catch",
484
+ "tear",
485
+ "fold",
486
+ "wall",
487
+ "editor",
488
+ "life",
489
+ "position",
490
+ "pound",
491
+ "respect",
492
+ "bathroom",
493
+ "coat",
494
+ "script",
495
+ "job",
496
+ "teach",
497
+ "birth",
498
+ "view",
499
+ "resolve",
500
+ "theme",
501
+ "employee",
502
+ "doubt",
503
+ "market",
504
+ "education",
505
+ "serve",
506
+ "recover",
507
+ "tone",
508
+ "harm",
509
+ "miss",
510
+ "union",
511
+ "understanding",
512
+ "cow",
513
+ "river",
514
+ "association",
515
+ "concept",
516
+ "training",
517
+ "recipe",
518
+ "relationship",
519
+ "reserve",
520
+ "depression",
521
+ "proof",
522
+ "hair",
523
+ "revenue",
524
+ "independent",
525
+ "lift",
526
+ "assignment",
527
+ "temporary",
528
+ "amount",
529
+ "loss",
530
+ "edge",
531
+ "track",
532
+ "check",
533
+ "rope",
534
+ "estimate",
535
+ "pollution",
536
+ "stable",
537
+ "message",
538
+ "delivery",
539
+ "perspective",
540
+ "mirror",
541
+ "assistant",
542
+ "representative",
543
+ "witness",
544
+ "nature",
545
+ "judge",
546
+ "fruit",
547
+ "tip",
548
+ "devil",
549
+ "town",
550
+ "emergency",
551
+ "upper",
552
+ "drop",
553
+ "stay",
554
+ "human",
555
+ "neck",
556
+ "speaker",
557
+ "network",
558
+ "sing",
559
+ "resist",
560
+ "league",
561
+ "trip",
562
+ "signature",
563
+ "lawyer",
564
+ "importance",
565
+ "gas",
566
+ "choice",
567
+ "engineer",
568
+ "success",
569
+ "part",
570
+ "external",
571
+ "worker",
572
+ "simple",
573
+ "quarter",
574
+ "student",
575
+ "heart",
576
+ "pass",
577
+ "spite",
578
+ "shift",
579
+ "rough",
580
+ "lady",
581
+ "grass",
582
+ "community",
583
+ "garage",
584
+ "youth",
585
+ "standard",
586
+ "skirt",
587
+ "promise",
588
+ "blind",
589
+ "television",
590
+ "disease",
591
+ "commission",
592
+ "positive",
593
+ "energy",
594
+ "calm",
595
+ "presence",
596
+ "tune",
597
+ "basis",
598
+ "preference",
599
+ "head",
600
+ "common",
601
+ "cut",
602
+ "somewhere",
603
+ "presentation",
604
+ "current",
605
+ "thought",
606
+ "revolution",
607
+ "effort",
608
+ "master",
609
+ "implement",
610
+ "republic",
611
+ "floor",
612
+ "principle",
613
+ "stranger",
614
+ "shoulder",
615
+ "grade",
616
+ "button",
617
+ "tennis",
618
+ "police",
619
+ "collection",
620
+ "account",
621
+ "register",
622
+ "glove",
623
+ "divide",
624
+ "professor",
625
+ "chair",
626
+ "priority",
627
+ "combine",
628
+ "peace",
629
+ "extension",
630
+ "maybe",
631
+ "evening",
632
+ "frame",
633
+ "sister",
634
+ "wave",
635
+ "code",
636
+ "application",
637
+ "mouse",
638
+ "match",
639
+ "counter",
640
+ "bottle",
641
+ "half",
642
+ "cheek",
643
+ "resolution",
644
+ "back",
645
+ "knowledge",
646
+ "make",
647
+ "discussion",
648
+ "screw",
649
+ "length",
650
+ "accident",
651
+ "battle",
652
+ "dress",
653
+ "knee",
654
+ "log",
655
+ "package",
656
+ "it",
657
+ "turn",
658
+ "hearing",
659
+ "newspaper",
660
+ "layer",
661
+ "wealth",
662
+ "profile",
663
+ "imagination",
664
+ "answer",
665
+ "weekend",
666
+ "teacher",
667
+ "appearance",
668
+ "meet",
669
+ "bike",
670
+ "rise",
671
+ "belt",
672
+ "crash",
673
+ "bowl",
674
+ "equivalent",
675
+ "support",
676
+ "image",
677
+ "poem",
678
+ "risk",
679
+ "excitement",
680
+ "remote",
681
+ "secretary",
682
+ "public",
683
+ "produce",
684
+ "plane",
685
+ "display",
686
+ "money",
687
+ "sand",
688
+ "situation",
689
+ "punch",
690
+ "customer",
691
+ "title",
692
+ "shake",
693
+ "mortgage",
694
+ "option",
695
+ "number",
696
+ "pop",
697
+ "window",
698
+ "extent",
699
+ "nothing",
700
+ "experience",
701
+ "opinion",
702
+ "departure",
703
+ "dance",
704
+ "indication",
705
+ "boy",
706
+ "material",
707
+ "band",
708
+ "leader",
709
+ "sun",
710
+ "beautiful",
711
+ "muscle",
712
+ "farmer",
713
+ "variety",
714
+ "fat",
715
+ "handle",
716
+ "director",
717
+ "opportunity",
718
+ "calendar",
719
+ "outside",
720
+ "pace",
721
+ "bath",
722
+ "fish",
723
+ "consequence",
724
+ "put",
725
+ "owner",
726
+ "go",
727
+ "doctor",
728
+ "information",
729
+ "share",
730
+ "hurt",
731
+ "protection",
732
+ "career",
733
+ "finance",
734
+ "force",
735
+ "golf",
736
+ "garbage",
737
+ "aspect",
738
+ "kid",
739
+ "food",
740
+ "boot",
741
+ "milk",
742
+ "respond",
743
+ "objective",
744
+ "reality",
745
+ "raw",
746
+ "ring",
747
+ "mall",
748
+ "one",
749
+ "impact",
750
+ "area",
751
+ "news",
752
+ "international",
753
+ "series",
754
+ "impress",
755
+ "mother",
756
+ "shelter",
757
+ "strike",
758
+ "loan",
759
+ "month",
760
+ "seat",
761
+ "anything",
762
+ "entertainment",
763
+ "familiar",
764
+ "clue",
765
+ "year",
766
+ "glad",
767
+ "supermarket",
768
+ "natural",
769
+ "god",
770
+ "cost",
771
+ "conversation",
772
+ "tie",
773
+ "ruin",
774
+ "comfort",
775
+ "earth",
776
+ "storm",
777
+ "percentage",
778
+ "assistance",
779
+ "budget",
780
+ "strength",
781
+ "beginning",
782
+ "sleep",
783
+ "other",
784
+ "young",
785
+ "unit",
786
+ "fill",
787
+ "store",
788
+ "desire",
789
+ "hide",
790
+ "value",
791
+ "cup",
792
+ "maintenance",
793
+ "nurse",
794
+ "function",
795
+ "tower",
796
+ "role",
797
+ "class",
798
+ "camera",
799
+ "database",
800
+ "panic",
801
+ "nation",
802
+ "basket",
803
+ "ice",
804
+ "art",
805
+ "spirit",
806
+ "chart",
807
+ "exchange",
808
+ "feedback",
809
+ "statement",
810
+ "reputation",
811
+ "search",
812
+ "hunt",
813
+ "exercise",
814
+ "nasty",
815
+ "notice",
816
+ "male",
817
+ "yard",
818
+ "annual",
819
+ "collar",
820
+ "date",
821
+ "platform",
822
+ "plant",
823
+ "fortune",
824
+ "passion",
825
+ "friendship",
826
+ "spread",
827
+ "cancer",
828
+ "ticket",
829
+ "attitude",
830
+ "island",
831
+ "active",
832
+ "object",
833
+ "service",
834
+ "buyer",
835
+ "bite",
836
+ "card",
837
+ "face",
838
+ "steak",
839
+ "proposal",
840
+ "patient",
841
+ "heat",
842
+ "rule",
843
+ "resident",
844
+ "broad",
845
+ "politics",
846
+ "west",
847
+ "knife",
848
+ "expert",
849
+ "girl",
850
+ "design",
851
+ "salt",
852
+ "baseball",
853
+ "grab",
854
+ "inspection",
855
+ "cousin",
856
+ "couple",
857
+ "magazine",
858
+ "cook",
859
+ "dependent",
860
+ "security",
861
+ "chicken",
862
+ "version",
863
+ "currency",
864
+ "ladder",
865
+ "scheme",
866
+ "kitchen",
867
+ "employment",
868
+ "local",
869
+ "attention",
870
+ "manager",
871
+ "fact",
872
+ "cover",
873
+ "sad",
874
+ "guard",
875
+ "relative",
876
+ "county",
877
+ "rate",
878
+ "lunch",
879
+ "program",
880
+ "initiative",
881
+ "gear",
882
+ "bridge",
883
+ "breast",
884
+ "talk",
885
+ "dish",
886
+ "guarantee",
887
+ "beer",
888
+ "vehicle",
889
+ "reception",
890
+ "woman",
891
+ "substance",
892
+ "copy",
893
+ "lecture",
894
+ "advantage",
895
+ "park",
896
+ "cold",
897
+ "death",
898
+ "mix",
899
+ "hold",
900
+ "scale",
901
+ "tomorrow",
902
+ "blood",
903
+ "request",
904
+ "green",
905
+ "cookie",
906
+ "church",
907
+ "strip",
908
+ "forever",
909
+ "beyond",
910
+ "debt",
911
+ "tackle",
912
+ "wash",
913
+ "following",
914
+ "feel",
915
+ "maximum",
916
+ "sector",
917
+ "sea",
918
+ "property",
919
+ "economics",
920
+ "menu",
921
+ "bench",
922
+ "try",
923
+ "language",
924
+ "start",
925
+ "call",
926
+ "solid",
927
+ "address",
928
+ "income",
929
+ "foot",
930
+ "senior",
931
+ "honey",
932
+ "few",
933
+ "mixture",
934
+ "cash",
935
+ "grocery",
936
+ "link",
937
+ "map",
938
+ "form",
939
+ "factor",
940
+ "pot",
941
+ "model",
942
+ "writer",
943
+ "farm",
944
+ "winter",
945
+ "skill",
946
+ "anywhere",
947
+ "birthday",
948
+ "policy",
949
+ "release",
950
+ "husband",
951
+ "lab",
952
+ "hurry",
953
+ "mail",
954
+ "equipment",
955
+ "sink",
956
+ "pair",
957
+ "driver",
958
+ "consideration",
959
+ "leather",
960
+ "skin",
961
+ "blue",
962
+ "boat",
963
+ "sale",
964
+ "brick",
965
+ "two",
966
+ "feed",
967
+ "square",
968
+ "dot",
969
+ "rush",
970
+ "dream",
971
+ "location",
972
+ "afternoon",
973
+ "manufacturer",
974
+ "control",
975
+ "occasion",
976
+ "trouble",
977
+ "introduction",
978
+ "advice",
979
+ "bet",
980
+ "eat",
981
+ "kill",
982
+ "category",
983
+ "manner",
984
+ "office",
985
+ "estate",
986
+ "pride",
987
+ "awareness",
988
+ "slip",
989
+ "crack",
990
+ "client",
991
+ "nail",
992
+ "shoot",
993
+ "membership",
994
+ "soft",
995
+ "anybody",
996
+ "web",
997
+ "official",
998
+ "individual",
999
+ "pizza",
1000
+ "interest",
1001
+ "bag",
1002
+ "spell",
1003
+ "profession",
1004
+ "queen",
1005
+ "deal",
1006
+ "resource",
1007
+ "ship",
1008
+ "guy",
1009
+ "chocolate",
1010
+ "joint",
1011
+ "formal",
1012
+ "upstairs",
1013
+ "car",
1014
+ "resort",
1015
+ "abroad",
1016
+ "dealer",
1017
+ "associate",
1018
+ "finger",
1019
+ "surgery",
1020
+ "comment",
1021
+ "team",
1022
+ "detail",
1023
+ "crazy",
1024
+ "path",
1025
+ "tale",
1026
+ "initial",
1027
+ "arm",
1028
+ "radio",
1029
+ "demand",
1030
+ "single",
1031
+ "draw",
1032
+ "yellow",
1033
+ "contest",
1034
+ "piece",
1035
+ "quote",
1036
+ "pull",
1037
+ "commercial",
1038
+ "shirt",
1039
+ "contribution",
1040
+ "cream",
1041
+ "channel",
1042
+ "suit",
1043
+ "discipline",
1044
+ "instruction",
1045
+ "concert",
1046
+ "speech",
1047
+ "low",
1048
+ "effective",
1049
+ "hang",
1050
+ "scratch",
1051
+ "industry",
1052
+ "breakfast",
1053
+ "lay",
1054
+ "join",
1055
+ "metal",
1056
+ "bedroom",
1057
+ "minute",
1058
+ "product",
1059
+ "rest",
1060
+ "temperature",
1061
+ "many",
1062
+ "give",
1063
+ "argument",
1064
+ "print",
1065
+ "purple",
1066
+ "laugh",
1067
+ "health",
1068
+ "credit",
1069
+ "investment",
1070
+ "sell",
1071
+ "setting",
1072
+ "lesson",
1073
+ "egg",
1074
+ "middle",
1075
+ "marriage",
1076
+ "level",
1077
+ "evidence",
1078
+ "phrase",
1079
+ "love",
1080
+ "self",
1081
+ "benefit",
1082
+ "guidance",
1083
+ "affect",
1084
+ "you",
1085
+ "dad",
1086
+ "anxiety",
1087
+ "special",
1088
+ "boyfriend",
1089
+ "test",
1090
+ "blank",
1091
+ "payment",
1092
+ "soup",
1093
+ "obligation",
1094
+ "reply",
1095
+ "smile",
1096
+ "deep",
1097
+ "complaint",
1098
+ "addition",
1099
+ "review",
1100
+ "box",
1101
+ "towel",
1102
+ "minor",
1103
+ "fun",
1104
+ "soil",
1105
+ "issue",
1106
+ "cigarette",
1107
+ "internet",
1108
+ "gain",
1109
+ "tell",
1110
+ "entry",
1111
+ "spare",
1112
+ "incident",
1113
+ "family",
1114
+ "refuse",
1115
+ "branch",
1116
+ "can",
1117
+ "pen",
1118
+ "grandfather",
1119
+ "constant",
1120
+ "tank",
1121
+ "uncle",
1122
+ "climate",
1123
+ "ground",
1124
+ "volume",
1125
+ "communication",
1126
+ "kind",
1127
+ "poet",
1128
+ "child",
1129
+ "screen",
1130
+ "mine",
1131
+ "quit",
1132
+ "gene",
1133
+ "lack",
1134
+ "charity",
1135
+ "memory",
1136
+ "tooth",
1137
+ "fear",
1138
+ "mention",
1139
+ "marketing",
1140
+ "reveal",
1141
+ "reason",
1142
+ "court",
1143
+ "season",
1144
+ "freedom",
1145
+ "land",
1146
+ "sport",
1147
+ "audience",
1148
+ "classroom",
1149
+ "law",
1150
+ "hook",
1151
+ "win",
1152
+ "carry",
1153
+ "eye",
1154
+ "smell",
1155
+ "distribution",
1156
+ "research",
1157
+ "country",
1158
+ "dare",
1159
+ "hope",
1160
+ "whereas",
1161
+ "stretch",
1162
+ "library",
1163
+ "if",
1164
+ "delay",
1165
+ "college",
1166
+ "plastic",
1167
+ "book",
1168
+ "present",
1169
+ "use",
1170
+ "worry",
1171
+ "champion",
1172
+ "goal",
1173
+ "economy",
1174
+ "march",
1175
+ "election",
1176
+ "reflection",
1177
+ "midnight",
1178
+ "slide",
1179
+ "inflation",
1180
+ "action",
1181
+ "challenge",
1182
+ "guitar",
1183
+ "coast",
1184
+ "apple",
1185
+ "campaign",
1186
+ "field",
1187
+ "jacket",
1188
+ "sense",
1189
+ "way",
1190
+ "visual",
1191
+ "remove",
1192
+ "weather",
1193
+ "trash",
1194
+ "cable",
1195
+ "regret",
1196
+ "buddy",
1197
+ "beach",
1198
+ "historian",
1199
+ "courage",
1200
+ "sympathy",
1201
+ "truck",
1202
+ "tension",
1203
+ "permit",
1204
+ "nose",
1205
+ "bed",
1206
+ "son",
1207
+ "person",
1208
+ "base",
1209
+ "meat",
1210
+ "usual",
1211
+ "air",
1212
+ "meeting",
1213
+ "worth",
1214
+ "game",
1215
+ "independence",
1216
+ "physical",
1217
+ "brief",
1218
+ "play",
1219
+ "raise",
1220
+ "board",
1221
+ "she",
1222
+ "key",
1223
+ "writing",
1224
+ "pick",
1225
+ "command",
1226
+ "party",
1227
+ "yesterday",
1228
+ "spring",
1229
+ "candidate",
1230
+ "physics",
1231
+ "university",
1232
+ "concern",
1233
+ "development",
1234
+ "change",
1235
+ "string",
1236
+ "target",
1237
+ "instance",
1238
+ "room",
1239
+ "bitter",
1240
+ "bird",
1241
+ "football",
1242
+ "normal",
1243
+ "split",
1244
+ "impression",
1245
+ "wood",
1246
+ "long",
1247
+ "meaning",
1248
+ "stock",
1249
+ "cap",
1250
+ "leadership",
1251
+ "media",
1252
+ "ambition",
1253
+ "fishing",
1254
+ "essay",
1255
+ "salad",
1256
+ "repair",
1257
+ "today",
1258
+ "designer",
1259
+ "night",
1260
+ "bank",
1261
+ "drawing",
1262
+ "inevitable",
1263
+ "phase",
1264
+ "vast",
1265
+ "chip",
1266
+ "anger",
1267
+ "switch",
1268
+ "cry",
1269
+ "twist",
1270
+ "personality",
1271
+ "attempt",
1272
+ "storage",
1273
+ "being",
1274
+ "preparation",
1275
+ "bat",
1276
+ "selection",
1277
+ "white",
1278
+ "technology",
1279
+ "contract",
1280
+ "side",
1281
+ "section",
1282
+ "station",
1283
+ "till",
1284
+ "structure",
1285
+ "tongue",
1286
+ "taste",
1287
+ "truth",
1288
+ "difficulty",
1289
+ "group",
1290
+ "limit",
1291
+ "main",
1292
+ "move",
1293
+ "feeling",
1294
+ "light",
1295
+ "example",
1296
+ "mission",
1297
+ "might",
1298
+ "wait",
1299
+ "wheel",
1300
+ "shop",
1301
+ "host",
1302
+ "classic",
1303
+ "alternative",
1304
+ "cause",
1305
+ "agent",
1306
+ "consist",
1307
+ "table",
1308
+ "airline",
1309
+ "text",
1310
+ "pool",
1311
+ "craft",
1312
+ "range",
1313
+ "fuel",
1314
+ "tool",
1315
+ "partner",
1316
+ "load",
1317
+ "entrance",
1318
+ "deposit",
1319
+ "hate",
1320
+ "article",
1321
+ "video",
1322
+ "summer",
1323
+ "feature",
1324
+ "extreme",
1325
+ "mobile",
1326
+ "hospital",
1327
+ "flight",
1328
+ "fall",
1329
+ "pension",
1330
+ "piano",
1331
+ "fail",
1332
+ "result",
1333
+ "rub",
1334
+ "gap",
1335
+ "system",
1336
+ "report",
1337
+ "suck",
1338
+ "ordinary",
1339
+ "wind",
1340
+ "nerve",
1341
+ "ask",
1342
+ "shine",
1343
+ "note",
1344
+ "line",
1345
+ "mom",
1346
+ "perception",
1347
+ "brother",
1348
+ "reference",
1349
+ "bend",
1350
+ "charge",
1351
+ "treat",
1352
+ "trick",
1353
+ "term",
1354
+ "homework",
1355
+ "bake",
1356
+ "bid",
1357
+ "status",
1358
+ "project",
1359
+ "strategy",
1360
+ "orange",
1361
+ "let",
1362
+ "enthusiasm",
1363
+ "parent",
1364
+ "concentrate",
1365
+ "device",
1366
+ "travel",
1367
+ "poetry",
1368
+ "business",
1369
+ "society",
1370
+ "kiss",
1371
+ "end",
1372
+ "vegetable",
1373
+ "employ",
1374
+ "schedule",
1375
+ "hour",
1376
+ "brave",
1377
+ "focus",
1378
+ "process",
1379
+ "movie",
1380
+ "illegal",
1381
+ "general",
1382
+ "coffee",
1383
+ "ad",
1384
+ "highway",
1385
+ "chemistry",
1386
+ "psychology",
1387
+ "hire",
1388
+ "bell",
1389
+ "conference",
1390
+ "relief",
1391
+ "show",
1392
+ "neat",
1393
+ "funny",
1394
+ "weight",
1395
+ "quality",
1396
+ "club",
1397
+ "daughter",
1398
+ "zone",
1399
+ "touch",
1400
+ "tonight",
1401
+ "shock",
1402
+ "burn",
1403
+ "excuse",
1404
+ "name",
1405
+ "survey",
1406
+ "landscape",
1407
+ "advance",
1408
+ "satisfaction",
1409
+ "bread",
1410
+ "disaster",
1411
+ "item",
1412
+ "hat",
1413
+ "prior",
1414
+ "shopping",
1415
+ "visit",
1416
+ "east",
1417
+ "photo",
1418
+ "home",
1419
+ "idea",
1420
+ "father",
1421
+ "comparison",
1422
+ "cat",
1423
+ "pipe",
1424
+ "winner",
1425
+ "count",
1426
+ "lake",
1427
+ "fight",
1428
+ "prize",
1429
+ "foundation",
1430
+ "dog",
1431
+ "keep",
1432
+ "ideal",
1433
+ "fan",
1434
+ "struggle",
1435
+ "peak",
1436
+ "safety",
1437
+ "solution",
1438
+ "hell",
1439
+ "conclusion",
1440
+ "population",
1441
+ "strain",
1442
+ "alarm",
1443
+ "measurement",
1444
+ "second",
1445
+ "train",
1446
+ "race",
1447
+ "due",
1448
+ "insurance",
1449
+ "boss",
1450
+ "tree",
1451
+ "monitor",
1452
+ "sick",
1453
+ "course",
1454
+ "drag",
1455
+ "appointment",
1456
+ "slice",
1457
+ "still",
1458
+ "care",
1459
+ "patience",
1460
+ "rich",
1461
+ "escape",
1462
+ "emotion",
1463
+ "royal",
1464
+ "female",
1465
+ "childhood",
1466
+ "government",
1467
+ "picture",
1468
+ "will",
1469
+ "sock",
1470
+ "big",
1471
+ "gate",
1472
+ "oil",
1473
+ "cross",
1474
+ "pin",
1475
+ "improvement",
1476
+ "championship",
1477
+ "silly",
1478
+ "help",
1479
+ "sky",
1480
+ "pitch",
1481
+ "man",
1482
+ "diamond",
1483
+ "most",
1484
+ "transition",
1485
+ "work",
1486
+ "science",
1487
+ "committee",
1488
+ "moment",
1489
+ "fix",
1490
+ "teaching",
1491
+ "dig",
1492
+ "specialist",
1493
+ "complex",
1494
+ "guide",
1495
+ "people",
1496
+ "dead",
1497
+ "voice",
1498
+ "original",
1499
+ "break",
1500
+ "topic",
1501
+ "data",
1502
+ "degree",
1503
+ "reading",
1504
+ "recording",
1505
+ "bunch",
1506
+ "reach",
1507
+ "judgment",
1508
+ "lie",
1509
+ "regular",
1510
+ "set",
1511
+ "painting",
1512
+ "mode",
1513
+ "list",
1514
+ "player",
1515
+ "bear",
1516
+ "north",
1517
+ "wonder",
1518
+ "carpet",
1519
+ "heavy",
1520
+ "officer",
1521
+ "negative",
1522
+ "clock",
1523
+ "unique",
1524
+ "baby",
1525
+ "pain",
1526
+ "assumption",
1527
+ "disk",
1528
+ "iron",
1529
+ "bill",
1530
+ "drawer",
1531
+ "look",
1532
+ "double",
1533
+ "mistake",
1534
+ "finish",
1535
+ "future",
1536
+ "brilliant",
1537
+ "contact",
1538
+ "math",
1539
+ "rice",
1540
+ "leave",
1541
+ "restaurant",
1542
+ "discount",
1543
+ "sex",
1544
+ "virus",
1545
+ "bit",
1546
+ "trust",
1547
+ "event",
1548
+ "wear",
1549
+ "juice",
1550
+ "failure",
1551
+ "bug",
1552
+ "context",
1553
+ "mud",
1554
+ "whole",
1555
+ "wrap",
1556
+ "intention",
1557
+ "draft",
1558
+ "pressure",
1559
+ "cake",
1560
+ "dark",
1561
+ "explanation",
1562
+ "space",
1563
+ "angle",
1564
+ "word",
1565
+ "efficiency",
1566
+ "management",
1567
+ "habit",
1568
+ "star",
1569
+ "chance",
1570
+ "finding",
1571
+ "transportation",
1572
+ "stand",
1573
+ "criticism",
1574
+ "flow",
1575
+ "door",
1576
+ "injury",
1577
+ "insect",
1578
+ "surprise",
1579
+ "apartment",
1580
+ ] # pylint: disable=line-too-long
1581
+
1582
+ # ISO 639-1 codes to language names.
1583
+ LANGUAGE_CODES = immutabledict.immutabledict(
1584
+ {
1585
+ "en": "English",
1586
+ "es": "Spanish",
1587
+ "pt": "Portuguese",
1588
+ "ar": "Arabic",
1589
+ "hi": "Hindi",
1590
+ "fr": "French",
1591
+ "ru": "Russian",
1592
+ "de": "German",
1593
+ "ja": "Japanese",
1594
+ "it": "Italian",
1595
+ "bn": "Bengali",
1596
+ "uk": "Ukrainian",
1597
+ "th": "Thai",
1598
+ "ur": "Urdu",
1599
+ "ta": "Tamil",
1600
+ "te": "Telugu",
1601
+ "bg": "Bulgarian",
1602
+ "ko": "Korean",
1603
+ "pl": "Polish",
1604
+ "he": "Hebrew",
1605
+ "fa": "Persian",
1606
+ "vi": "Vietnamese",
1607
+ "ne": "Nepali",
1608
+ "sw": "Swahili",
1609
+ "kn": "Kannada",
1610
+ "mr": "Marathi",
1611
+ "gu": "Gujarati",
1612
+ "pa": "Punjabi",
1613
+ "ml": "Malayalam",
1614
+ "fi": "Finnish",
1615
+ }
1616
+ )
1617
+
1618
+ _ALPHABETS = "([A-Za-z])"
1619
+ _PREFIXES = "(Mr|St|Mrs|Ms|Dr)[.]"
1620
+ _SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
1621
+ _STARTERS = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
1622
+ _ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
1623
+ _WEBSITES = "[.](com|net|org|io|gov|edu|me)"
1624
+ _DIGITS = "([0-9])"
1625
+ _MULTIPLE_DOTS = r"\.{2,}"
1626
+
1627
+
1628
+ def split_into_sentences(text):
1629
+ """Split the text into sentences.
1630
+
1631
+ Args:
1632
+ text: A string that consists of more than or equal to one sentences.
1633
+
1634
+ Returns:
1635
+ A list of strings where each string is a sentence.
1636
+ """
1637
+ text = " " + text + " "
1638
+ text = text.replace("\n", " ")
1639
+ text = re.sub(_PREFIXES, "\\1<prd>", text)
1640
+ text = re.sub(_WEBSITES, "<prd>\\1", text)
1641
+ text = re.sub(_DIGITS + "[.]" + _DIGITS, "\\1<prd>\\2", text)
1642
+ text = re.sub(
1643
+ _MULTIPLE_DOTS,
1644
+ lambda match: "<prd>" * len(match.group(0)) + "<stop>",
1645
+ text,
1646
+ )
1647
+ if "Ph.D" in text:
1648
+ text = text.replace("Ph.D.", "Ph<prd>D<prd>")
1649
+ text = re.sub(r"\s" + _ALPHABETS + "[.] ", " \\1<prd> ", text)
1650
+ text = re.sub(_ACRONYMS + " " + _STARTERS, "\\1<stop> \\2", text)
1651
+ text = re.sub(
1652
+ _ALPHABETS + "[.]" + _ALPHABETS + "[.]" + _ALPHABETS + "[.]",
1653
+ "\\1<prd>\\2<prd>\\3<prd>",
1654
+ text,
1655
+ )
1656
+ text = re.sub(_ALPHABETS + "[.]" + _ALPHABETS + "[.]", "\\1<prd>\\2<prd>", text)
1657
+ text = re.sub(" " + _SUFFIXES + "[.] " + _STARTERS, " \\1<stop> \\2", text)
1658
+ text = re.sub(" " + _SUFFIXES + "[.]", " \\1<prd>", text)
1659
+ text = re.sub(" " + _ALPHABETS + "[.]", " \\1<prd>", text)
1660
+ if "”" in text:
1661
+ text = text.replace(".”", "”.")
1662
+ if '"' in text:
1663
+ text = text.replace('."', '".')
1664
+ if "!" in text:
1665
+ text = text.replace('!"', '"!')
1666
+ if "?" in text:
1667
+ text = text.replace('?"', '"?')
1668
+ text = text.replace(".", ".<stop>")
1669
+ text = text.replace("?", "?<stop>")
1670
+ text = text.replace("!", "!<stop>")
1671
+ text = text.replace("<prd>", ".")
1672
+ sentences = text.split("<stop>")
1673
+ sentences = [s.strip() for s in sentences]
1674
+ if sentences and not sentences[-1]:
1675
+ sentences = sentences[:-1]
1676
+ return sentences
1677
+
1678
+
1679
+ def count_words(text):
1680
+ """Counts the number of words."""
1681
+ tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
1682
+ tokens = tokenizer.tokenize(text)
1683
+ num_words = len(tokens)
1684
+ return num_words
1685
+
1686
+
1687
+ @functools.lru_cache(maxsize=None)
1688
+ def _get_sentence_tokenizer():
1689
+ return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
1690
+
1691
+
1692
+ def count_sentences(text):
1693
+ """Count the number of sentences."""
1694
+ tokenizer = _get_sentence_tokenizer()
1695
+ tokenized_sentences = tokenizer.tokenize(text)
1696
+ return len(tokenized_sentences)
1697
+
1698
+
1699
+ def generate_keywords(num_keywords):
1700
+ """Randomly generates a few keywords."""
1701
+ return random.sample(WORD_LIST, k=num_keywords)
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/ifeval/utils.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Dict, Optional, Union
3
+
4
+ from lm_eval.tasks.ifeval import instructions_registry
5
+
6
+
7
+ @dataclasses.dataclass
8
+ class InputExample:
9
+ key: int
10
+ instruction_id_list: list[str]
11
+ prompt: str
12
+ kwargs: list[Dict[str, Optional[Union[str, int]]]]
13
+
14
+
15
+ @dataclasses.dataclass
16
+ class OutputExample:
17
+ instruction_id_list: list[str]
18
+ prompt: str
19
+ response: str
20
+ follow_all_instructions: bool
21
+ follow_instruction_list: list[bool]
22
+
23
+
24
+ def test_instruction_following_strict(
25
+ inp,
26
+ response,
27
+ ):
28
+ """Tests response to see if instructions are followed."""
29
+ instruction_list = inp.instruction_id_list
30
+ is_following_list = []
31
+
32
+ for index, instruction_id in enumerate(instruction_list):
33
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
34
+ instruction = instruction_cls(instruction_id)
35
+
36
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
37
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
38
+ instruction.build_description(**kwargs)
39
+ args = instruction.get_instruction_args()
40
+ if args and "prompt" in args:
41
+ instruction.build_description(prompt=inp.prompt)
42
+
43
+ if response.strip() and instruction.check_following(response):
44
+ is_following_list.append(True)
45
+ else:
46
+ is_following_list.append(False)
47
+
48
+ return OutputExample(
49
+ instruction_id_list=inp.instruction_id_list,
50
+ prompt=inp.prompt,
51
+ response=response,
52
+ follow_all_instructions=all(is_following_list),
53
+ follow_instruction_list=is_following_list,
54
+ )
55
+
56
+
57
+ def test_instruction_following_loose(
58
+ inp,
59
+ response,
60
+ ):
61
+ """Tests response for an upper bound for following instructions."""
62
+ r = response.split("\n")
63
+ response_remove_first = "\n".join(r[1:]).strip()
64
+ response_remove_last = "\n".join(r[:-1]).strip()
65
+ response_remove_both = "\n".join(r[1:-1]).strip()
66
+ revised_response = response.replace("*", "")
67
+ revised_response_remove_first = response_remove_first.replace("*", "")
68
+ revised_response_remove_last = response_remove_last.replace("*", "")
69
+ revised_response_remove_both = response_remove_both.replace("*", "")
70
+ all_responses = [
71
+ response,
72
+ revised_response,
73
+ response_remove_first,
74
+ response_remove_last,
75
+ response_remove_both,
76
+ revised_response_remove_first,
77
+ revised_response_remove_last,
78
+ revised_response_remove_both,
79
+ ]
80
+ instruction_list = inp.instruction_id_list
81
+ is_following_list = []
82
+
83
+ for index, instruction_id in enumerate(instruction_list):
84
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
85
+ instruction = instruction_cls(instruction_id)
86
+
87
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
88
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
89
+ instruction.build_description(**kwargs)
90
+ args = instruction.get_instruction_args()
91
+ if args and "prompt" in args:
92
+ instruction.build_description(prompt=inp.prompt)
93
+
94
+ is_following = False
95
+ for r in all_responses:
96
+ if r.strip() and instruction.check_following(r):
97
+ is_following = True
98
+ break
99
+
100
+ is_following_list.append(is_following)
101
+
102
+ return OutputExample(
103
+ instruction_id_list=inp.instruction_id_list,
104
+ prompt=inp.prompt,
105
+ response=response,
106
+ follow_all_instructions=all(is_following_list),
107
+ follow_instruction_list=is_following_list,
108
+ )
109
+
110
+
111
+ def process_results(doc, results):
112
+ inp = InputExample(
113
+ key=doc["key"],
114
+ instruction_id_list=doc["instruction_id_list"],
115
+ prompt=doc["prompt"],
116
+ kwargs=doc["kwargs"],
117
+ )
118
+ response = results[0]
119
+
120
+ out_strict = test_instruction_following_strict(inp, response)
121
+ out_loose = test_instruction_following_loose(inp, response)
122
+
123
+ return {
124
+ "prompt_level_strict_acc": out_strict.follow_all_instructions,
125
+ "inst_level_strict_acc": out_strict.follow_instruction_list,
126
+ "prompt_level_loose_acc": out_loose.follow_all_instructions,
127
+ "inst_level_loose_acc": out_loose.follow_instruction_list,
128
+ }
129
+
130
+
131
+ def agg_inst_level_acc(items):
132
+ flat_items = [item for sublist in items for item in sublist]
133
+ inst_level_acc = sum(flat_items) / len(flat_items)
134
+ return inst_level_acc
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/math500/math500.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: math500
2
+ dataset_path: HuggingFaceH4/MATH-500
3
+ output_type: generate_until
4
+ test_split: test
5
+ doc_to_text: !function utils.math500_prompt
6
+ doc_to_target: "{{answer}}"
7
+ generation_kwargs:
8
+ until:
9
+ - "[NO_UNTIL_PLACEHOLDER]"
10
+ do_sample: false
11
+ repeats: 1
12
+ num_fewshot: 0
Prism/Dream/Dream_Baseline/eval_instruct/lm_eval/tasks/math500/utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def math500_prompt(doc):
2
+ system_prompt = (
3
+ "You are a math expert. You will be given a question to solve. Solve it step by step. Wrap the final answer in a \\boxed{}. \n"
4
+ "Respond in the following format:\n"
5
+ "<reasoning>\n"
6
+ "Your reasoning here\n"
7
+ "</reasoning>\n"
8
+ "<answer>\n"
9
+ "\\boxed{...}\n"
10
+ "</answer>"
11
+ )
12
+
13
+ prompt = f"{system_prompt}\n\n{doc['problem']}\n\n"
14
+ return prompt