koichi12 commited on
Commit
49ebc99
·
verified ·
1 Parent(s): b27c938

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/yans/lm-evaluation-harness/lm_eval/tasks/babi/README.md +49 -0
  2. scripts/yans/lm-evaluation-harness/lm_eval/tasks/babi/babi.yaml +20 -0
  3. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/README.md +53 -0
  4. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/_generate_configs.py +81 -0
  5. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_bbh.yaml +36 -0
  6. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_bbh_cot_fewshot.yaml +36 -0
  7. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml +27 -0
  8. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/boolean_expressions.yaml +21 -0
  9. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/causal_judgement.yaml +92 -0
  10. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/date_understanding.yaml +73 -0
  11. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/disambiguation_qa.yaml +104 -0
  12. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/dyck_languages.yaml +149 -0
  13. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/formal_fallacies.yaml +97 -0
  14. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml +184 -0
  15. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/hyperbaton.yaml +84 -0
  16. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_five_objects.yaml +93 -0
  17. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_seven_objects.yaml +93 -0
  18. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_three_objects.yaml +93 -0
  19. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/movie_recommendation.yaml +97 -0
  20. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/multistep_arithmetic_two.yaml +48 -0
  21. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/navigate.yaml +93 -0
  22. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/object_counting.yaml +82 -0
  23. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/penguins_in_a_table.yaml +99 -0
  24. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/reasoning_about_colored_objects.yaml +144 -0
  25. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/ruin_names.yaml +115 -0
  26. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/salient_translation_error_detection.yaml +115 -0
  27. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/snarks.yaml +86 -0
  28. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/sports_understanding.yaml +28 -0
  29. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/temporal_sequences.yaml +161 -0
  30. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_five_objects.yaml +94 -0
  31. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_seven_objects.yaml +94 -0
  32. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_three_objects.yaml +94 -0
  33. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/web_of_lies.yaml +79 -0
  34. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/word_sorting.yaml +55 -0
  35. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/_bbh_cot_zeroshot.yaml +36 -0
  36. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml +18 -0
  37. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml +20 -0
  38. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/dyck_languages.yaml +17 -0
  39. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml +18 -0
  40. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml +20 -0
  41. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml +20 -0
  42. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml +18 -0
  43. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml +19 -0
  44. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml +19 -0
  45. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml +19 -0
  46. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/_bbh_zeroshot.yaml +36 -0
  47. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/_zeroshot_template_yaml +26 -0
  48. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/boolean_expressions.yaml +16 -0
  49. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/causal_judgement.yaml +16 -0
  50. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/date_understanding.yaml +18 -0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/babi/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # bAbI
2
+
3
+ ### Paper
4
+
5
+ Title: Towards ai-complete question answering: A set of prerequisite toy tasks
6
+ Abstract: https://arxiv.org/abs/1502.05698
7
+
8
+ One long-term goal of machine learning research is to produce methods that are applicable to reasoning and natural language, in particular building an intelligent dialogue agent. To measure progress towards that goal, we argue for the usefulness of a set of proxy tasks that evaluate reading comprehension via question answering. Our tasks measure understanding in several ways: whether a system is able to answer questions via chaining facts, simple induction, deduction and many more. The tasks are designed to be prerequisites for any system that aims to be capable of conversing with a human. We believe many existing learning systems can currently not solve them, and hence our aim is to classify these tasks into skill sets, so that researchers can identify (and then rectify) the failings of their systems. We also extend and improve the recently introduced Memory Networks model, and show it is able to solve some, but not all, of the tasks.
9
+
10
+ Homepage: https://github.com/facebookarchive/bAbI-tasks
11
+
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @article{weston2015towards,
17
+ title={Towards ai-complete question answering: A set of prerequisite toy tasks},
18
+ author={Weston, Jason and Bordes, Antoine and Chopra, Sumit and Rush, Alexander M and Van Merri{\"e}nboer, Bart and Joulin, Armand and Mikolov, Tomas},
19
+ journal={arXiv preprint arXiv:1502.05698},
20
+ year={2015}
21
+ }
22
+ ```
23
+
24
+ ### Groups, Tags, and Tasks
25
+
26
+ #### Groups
27
+
28
+ * Not part of a group yet
29
+
30
+ #### Tags
31
+
32
+ * No tags applied.
33
+
34
+ #### Tasks
35
+
36
+ * `babi`
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [ ] Is the task an existing benchmark in the literature?
42
+ * [ ] Have you referenced the original paper that introduced the task?
43
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [ ] Is the "Main" variant of this task clearly denoted?
48
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
scripts/yans/lm-evaluation-harness/lm_eval/tasks/babi/babi.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: babi
2
+ dataset_path: Muennighoff/babi
3
+ dataset_name: null
4
+ output_type: generate_until
5
+ training_split: train
6
+ validation_split: valid
7
+ test_split: test
8
+ doc_to_text: "Passage: {{passage}}Question: {{question}}\nAnswer:"
9
+ doc_to_target: " {{answer}}"
10
+ target_delimiter: ""
11
+ generation_kwargs:
12
+ until:
13
+ - "\n"
14
+ - "Passage:"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 1.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BigBenchHard
2
+
3
+ ## Paper
4
+ Title: `Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them`
5
+ Abstract: https://arxiv.org/abs/2210.09261
6
+
7
+ A suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH).
8
+ These are the task for which prior language model evaluations did not outperform
9
+ the average human-rater.
10
+
11
+ Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard
12
+
13
+
14
+ ## Citation
15
+ ```
16
+ @article{suzgun2022challenging,
17
+ title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them},
18
+ author={Suzgun, Mirac and Scales, Nathan and Sch{\"a}rli, Nathanael and Gehrmann, Sebastian and Tay, Yi and Chung, Hyung Won and Chowdhery, Aakanksha and Le, Quoc V and Chi, Ed H and Zhou, Denny and and Wei, Jason},
19
+ journal={arXiv preprint arXiv:2210.09261},
20
+ year={2022}
21
+ }
22
+ ```
23
+
24
+ ### Groups, Tags, and Tasks
25
+
26
+ #### Groups
27
+
28
+ - `bbh`: is the same as `bbh_cot_fewshot`.
29
+ - `bbh_zeroshot`
30
+ - `bbh_fewshot`
31
+ - `bbh_cot_fewshot`
32
+ - `bbh_cot_zeroshot`
33
+
34
+ #### Tags
35
+
36
+ None.
37
+
38
+ #### Tasks
39
+
40
+ - ...
41
+
42
+ ### Checklist
43
+
44
+ - [x] Is in Eval-harness v1.0 ?
45
+ - [ ] Has been checked for regression from v1.0?
46
+ - [ ] Has been checked for equivalence with original paper methodology?
47
+ - [ ] "Main" checked variant clearly denoted?
48
+
49
+ ### Variant Wishlist
50
+
51
+ - [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
52
+ - [ ] Using Verifiers
53
+ - [ ] Majority voting "without CoT"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/_generate_configs.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import re
8
+
9
+ import datasets
10
+ import requests
11
+ import yaml
12
+ from tqdm import tqdm
13
+
14
+ from lm_eval import utils
15
+
16
+
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser()
19
+ parser.add_argument("--base_yaml_path", required=True)
20
+ parser.add_argument("--save_prefix_path", default="zeroshot")
21
+ parser.add_argument("--cot", default=False)
22
+ parser.add_argument("--fewshot", default=False)
23
+ parser.add_argument("--task_prefix", default="")
24
+ return parser.parse_args()
25
+
26
+
27
+ if __name__ == "__main__":
28
+ args = parse_args()
29
+
30
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
31
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
32
+ with open(args.base_yaml_path, encoding="utf-8") as f:
33
+ base_yaml = yaml.full_load(f)
34
+
35
+ base_doc_to_text = "Q: {{input}}\nA:"
36
+ answer_regex = re.compile("(?<=answer is )(.*)(?=.)")
37
+
38
+ dataset_path = "lukaemon/bbh"
39
+ for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
40
+ resp = requests.get(
41
+ f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt"
42
+ ).content.decode("utf-8")
43
+ prompt = resp.split("\n-----\n")[-1]
44
+ description, *few_shot = prompt.split("\n\n")
45
+
46
+ prefix_doc_to_text = ""
47
+ if args.fewshot:
48
+ if args.cot:
49
+ prefix_doc_to_text = "\n\n".join(few_shot) + "\n\n"
50
+ else:
51
+ for shot in few_shot:
52
+ try:
53
+ answer = answer_regex.search(shot)[0]
54
+ except Exception:
55
+ print("task", task)
56
+ print(shot)
57
+ example = shot.split("Let's think step by step.")[0]
58
+ prefix_doc_to_text += f"{example}{answer}\n\n"
59
+
60
+ doc_to_text = prefix_doc_to_text + base_doc_to_text
61
+ if args.cot:
62
+ doc_to_text = doc_to_text + " Let's think step by step.\n"
63
+
64
+ yaml_dict = {
65
+ "include": base_yaml_name,
66
+ "task": f"bbh_{args.task_prefix}_{task}",
67
+ "dataset_name": task,
68
+ "description": description + "\n\n",
69
+ "doc_to_text": doc_to_text,
70
+ }
71
+
72
+ file_save_path = args.save_prefix_path + f"/{task}.yaml"
73
+ utils.eval_logger.info(f"Saving yaml for subset {task} to {file_save_path}")
74
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
75
+ yaml.dump(
76
+ yaml_dict,
77
+ yaml_file,
78
+ width=float("inf"),
79
+ allow_unicode=True,
80
+ default_style='"',
81
+ )
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_bbh.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: bbh
2
+ task:
3
+ - bbh_cot_fewshot_boolean_expressions
4
+ - bbh_cot_fewshot_causal_judgement
5
+ - bbh_cot_fewshot_date_understanding
6
+ - bbh_cot_fewshot_disambiguation_qa
7
+ - bbh_cot_fewshot_dyck_languages
8
+ - bbh_cot_fewshot_formal_fallacies
9
+ - bbh_cot_fewshot_geometric_shapes
10
+ - bbh_cot_fewshot_hyperbaton
11
+ - bbh_cot_fewshot_logical_deduction_five_objects
12
+ - bbh_cot_fewshot_logical_deduction_seven_objects
13
+ - bbh_cot_fewshot_logical_deduction_three_objects
14
+ - bbh_cot_fewshot_movie_recommendation
15
+ - bbh_cot_fewshot_multistep_arithmetic_two
16
+ - bbh_cot_fewshot_navigate
17
+ - bbh_cot_fewshot_object_counting
18
+ - bbh_cot_fewshot_penguins_in_a_table
19
+ - bbh_cot_fewshot_reasoning_about_colored_objects
20
+ - bbh_cot_fewshot_ruin_names
21
+ - bbh_cot_fewshot_salient_translation_error_detection
22
+ - bbh_cot_fewshot_snarks
23
+ - bbh_cot_fewshot_sports_understanding
24
+ - bbh_cot_fewshot_temporal_sequences
25
+ - bbh_cot_fewshot_tracking_shuffled_objects_five_objects
26
+ - bbh_cot_fewshot_tracking_shuffled_objects_seven_objects
27
+ - bbh_cot_fewshot_tracking_shuffled_objects_three_objects
28
+ - bbh_cot_fewshot_web_of_lies
29
+ - bbh_cot_fewshot_word_sorting
30
+ aggregate_metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ weight_by_size: true
34
+ filter_list: get-answer
35
+ metadata:
36
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_bbh_cot_fewshot.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: bbh_cot_fewshot
2
+ task:
3
+ - bbh_cot_fewshot_boolean_expressions
4
+ - bbh_cot_fewshot_causal_judgement
5
+ - bbh_cot_fewshot_date_understanding
6
+ - bbh_cot_fewshot_disambiguation_qa
7
+ - bbh_cot_fewshot_dyck_languages
8
+ - bbh_cot_fewshot_formal_fallacies
9
+ - bbh_cot_fewshot_geometric_shapes
10
+ - bbh_cot_fewshot_hyperbaton
11
+ - bbh_cot_fewshot_logical_deduction_five_objects
12
+ - bbh_cot_fewshot_logical_deduction_seven_objects
13
+ - bbh_cot_fewshot_logical_deduction_three_objects
14
+ - bbh_cot_fewshot_movie_recommendation
15
+ - bbh_cot_fewshot_multistep_arithmetic_two
16
+ - bbh_cot_fewshot_navigate
17
+ - bbh_cot_fewshot_object_counting
18
+ - bbh_cot_fewshot_penguins_in_a_table
19
+ - bbh_cot_fewshot_reasoning_about_colored_objects
20
+ - bbh_cot_fewshot_ruin_names
21
+ - bbh_cot_fewshot_salient_translation_error_detection
22
+ - bbh_cot_fewshot_snarks
23
+ - bbh_cot_fewshot_sports_understanding
24
+ - bbh_cot_fewshot_temporal_sequences
25
+ - bbh_cot_fewshot_tracking_shuffled_objects_five_objects
26
+ - bbh_cot_fewshot_tracking_shuffled_objects_seven_objects
27
+ - bbh_cot_fewshot_tracking_shuffled_objects_three_objects
28
+ - bbh_cot_fewshot_web_of_lies
29
+ - bbh_cot_fewshot_word_sorting
30
+ aggregate_metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ weight_by_size: true
34
+ filter_list: get-answer
35
+ metadata:
36
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: lukaemon/bbh
2
+ output_type: generate_until
3
+ test_split: test
4
+ doc_to_target: "{{target}}"
5
+ metric_list:
6
+ - metric: exact_match
7
+ aggregation: mean
8
+ higher_is_better: true
9
+ # ignore_case: true
10
+ # ignore_punctuation: true
11
+ generation_kwargs:
12
+ max_gen_toks: 1024
13
+ until:
14
+ - "</s>"
15
+ - "Q"
16
+ - "\n\n"
17
+ do_sample: false
18
+ temperature: 0.0
19
+ filter_list:
20
+ - name: "get-answer"
21
+ filter:
22
+ - function: "regex"
23
+ regex_pattern: "(?<=the answer is )(.*)(?=.)"
24
+ - function: "take_first"
25
+ num_fewshot: 3
26
+ metadata:
27
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/boolean_expressions.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: "boolean_expressions"
2
+ description: "Evaluate the result of a random Boolean expression.\n\n"
3
+ doc_to_text: "Q: {{input}}\nA: Let's think step by step.\n"
4
+ include: "_cot_fewshot_template_yaml"
5
+ task: "bbh_cot_fewshot_boolean_expressions"
6
+ fewshot_config:
7
+ sampler: first_n
8
+ samples: [
9
+ {
10
+ "input": "not ( ( not not True ) ) is",
11
+ "target": "Remember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not ( ( not not True ) ) = not ( ( A ) )\" where \"A = not not True\".\nLet's evaluate A: A = not not True = not (not True) = not False = True.\nPlugging in A, we get: Z = not ( ( A ) ) = not ( ( True ) ) = not True = False. So the answer is False."
12
+ },
13
+ {
14
+ "input": "True and False and not True and True is",
15
+ "target": "Remember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = True and False and not True and True = A and B\" where \"A = True and False\" and \"B = not True and True\".\nLet's evaluate A: A = True and False = False.\nLet's evaluate B: B = not True and True = not (True and True) = not (True) = False.\nPlugging in A and B, we get: Z = A and B = False and False = False. So the answer is False."
16
+ },
17
+ {
18
+ "input": "not not ( not ( False ) ) is",
19
+ "target": "Remember that (i) expressions inside brackets are always evaluated first and that (ii) the order of operations from highest priority to lowest priority is \"not\", \"and\", \"or\", respectively.\nWe first simplify this expression \"Z\" as follows: \"Z = not not ( not ( False ) ) = not not ( A )\" where \"A = not ( False )\".\nLet's evaluate A: A = not ( False ) = not False = True.\nPlugging in A, we get: Z = not not ( A ) = not not (True) = not not False = True. So the answer is True."
20
+ }
21
+ ]
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/causal_judgement.yaml ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: causal_judgement
2
+ description: 'Answer questions about causal attribution.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'How would a typical person answer each of the following questions about
15
+ causation?
16
+
17
+ Frank T., had an ongoing dispute with his neighbor over a stretch of land and
18
+ one day decided to shoot his neighbor in the body. Frank T. had no experience
19
+ with guns, his hand slipped on the barrel of the gun, and the shot went wild.
20
+ Nonetheless, the bullet bounced off a large boulder several feet away and hit
21
+ the neighbor''s body, causing significant injury. Did Frank T. intentionally
22
+ shoot his neighbor in the body?
23
+
24
+ Options:
25
+
26
+ - Yes
27
+
28
+ - No'
29
+ target: 'Let''s think step by step.
30
+
31
+ Here in this question, we are told that "Frank T. had no experience with guns,
32
+ his hand slipped on the barrel of the gun, and the shot went wild." A typical
33
+ person would assume that this passage suggests that Frank T. had no intention
34
+ of shooting and injuring someone and that the bullet accidentally hit the neighbor''s
35
+ body; therefore, we conclude that Frank T. did not intentionally hit his neighbor.
36
+ So the answer is No.'
37
+ - input: 'How would a typical person answer each of the following questions about
38
+ causation?
39
+
40
+ Suzy and Billy are working on a project that is very important for our nation''s
41
+ security. The boss tells them both: "Be sure that you are here at exactly 9
42
+ am. It is absolutely essential that you arrive at that time." Both Billy and
43
+ Suzy arrive at 9 am. As it happens, there was a motion detector installed in
44
+ the room where they arrived. The motion detector was set up to be triggered
45
+ if at least one person appeared in the room at the same time. So the motion
46
+ detector went off. Did Billy cause the motion detector to go off?
47
+
48
+ Options:
49
+
50
+ - Yes
51
+
52
+ - No'
53
+ target: 'Let''s think step by step.
54
+
55
+ Here in this question, we are told that the boss ordered them both to arrive
56
+ at the meeting room at the same time and that the motion detector was set up
57
+ to be triggered if at least one person appeared in the room at the same time."
58
+ A typical person would assume that the person probably meant to say the detector
59
+ was set up to be triggered if "both persons" appeared in the room at the same
60
+ time, not at least one person, since otherwise the phrase "at the same time"
61
+ would not make much sense in that sentence. Because the motion detector went
62
+ off, a typical person would therefore come to the conclusion that both Suzy
63
+ and Billy triggered the motion detector to go off; hence, Billy did indeed cause
64
+ the motion detector to go off. So the answer is Yes.'
65
+ - input: 'How would a typical person answer each of the following questions about
66
+ causation?
67
+
68
+ George and his sister Lena reunite at their parents'' house for Thanksgiving.
69
+ Whereas George just got into medical school, Lena is unhappy in her marriage
70
+ and recently lost her job. Over the course of the day, George and Lena get into
71
+ a number of heated arguments. Later in the afternoon they play a game of darts.
72
+ They split the first two games, and the third game is close until the end. Who
73
+ will win comes down to George''s last shot. If he hits a high point region,
74
+ he wins; if he hits a low point region, Lena wins. George thinks of the difficult
75
+ time Lena is having, and he really wants to let her win. He aims the dart at
76
+ the low point region. He sets up his shot and the dart lands in the low point
77
+ region. After his shot, Lena wins the game and is very happy. Did George hit
78
+ the low point region intentionally?
79
+
80
+ Options:
81
+
82
+ - Yes
83
+
84
+ - No'
85
+ target: 'Let''s think step by step.
86
+
87
+ Here in this question, we are told that "He aims the dart at the low point region."
88
+ A typical person might therefore think George did intentionally hit the low
89
+ point region, because he wanted to lift up the spirit of his sister Lena. So
90
+ the answer is Yes.'
91
+ include: _cot_fewshot_template_yaml
92
+ task: bbh_cot_fewshot_causal_judgement
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/date_understanding.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: date_understanding
2
+ description: 'Infer the date from context.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?
15
+
16
+ Options:
17
+
18
+ (A) 12/14/2026
19
+
20
+ (B) 12/14/1950
21
+
22
+ (C) 12/14/2007
23
+
24
+ (D) 12/14/1937
25
+
26
+ (E) 07/14/1938
27
+
28
+ (F) 12/14/1988'
29
+ target: 'Let''s think step by step.
30
+
31
+ If today is Christmas Eve of 1937, then today''s date is December 24, 1937.
32
+ 10 days before today is December 14, 1937, that is 12/14/1937. So the answer
33
+ is (D).'
34
+ - input: 'Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?
35
+
36
+ Options:
37
+
38
+ (A) 09/04/2018
39
+
40
+ (B) 11/11/2018
41
+
42
+ (C) 08/25/2018
43
+
44
+ (D) 11/02/2018
45
+
46
+ (E) 11/04/2018'
47
+ target: 'Let''s think step by step.
48
+
49
+ If tomorrow is 11/12/2019, then today is 11/11/2019. The date one year ago from
50
+ today is 11/11/2018. So the answer is (B).'
51
+ - input: 'Jane and John married on Jan 2, 1958. It is their 5-year anniversary today.
52
+ What is the date tomorrow in MM/DD/YYYY?
53
+
54
+ Options:
55
+
56
+ (A) 01/11/1961
57
+
58
+ (B) 01/03/1963
59
+
60
+ (C) 01/18/1961
61
+
62
+ (D) 10/14/1960
63
+
64
+ (E) 01/03/1982
65
+
66
+ (F) 12/03/1960'
67
+ target: 'Let''s think step by step.
68
+
69
+ If Jane and John married on Jan 2, 1958, then and if it is their 5-year anniversary
70
+ today, then today''s date is Jan 2, 1963. The date tomorrow is Jan 3, 1963,
71
+ that is 01/03/1963. So the answer is (B).'
72
+ include: _cot_fewshot_template_yaml
73
+ task: bbh_cot_fewshot_date_understanding
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/disambiguation_qa.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: disambiguation_qa
2
+ description: 'Clarify the meaning of sentences with ambiguous pronouns.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
15
+ thing the pronoun refers to), or state that it is ambiguous.
16
+
17
+ Sentence: The chief told the counselor that they took the day off.
18
+
19
+ Options:
20
+
21
+ (A) The chief took the day off
22
+
23
+ (B) The counselor took the day off
24
+
25
+ (C) Ambiguous'
26
+ target: 'Let''s think step by step.
27
+
28
+ Here we need to determine who the pronoun "they" might be referring to. There
29
+ are two possible referents for "they", namely the chief and the counselor. The
30
+ verb "told" might be able to help us determine which one is more likely (if
31
+ either). Let X be the chief and Y the counselor. The sentence is then of the
32
+ form "X told Y that (X or Y) did something."
33
+
34
+ Let''s consider Y first: "X told Y that Y did something." This case does not
35
+ make much sense, as Y would already have the information that Y did something,
36
+ because it is information about themself.
37
+
38
+ Now, consider X: "X told Y that X did something." This makes sense, because
39
+ X would be sharing some information about themself that Y might not have known
40
+ before.
41
+
42
+ Because in this context, X is the chief and Y is the counselor, the answer should
43
+ be the chief. So the answer is (A).'
44
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
45
+ thing the pronoun refers to), or state that it is ambiguous.
46
+
47
+ Sentence: The manager sent a message to the secretary, but he didn''t reply
48
+ yet.
49
+
50
+ Options:
51
+
52
+ (A) The secretary didn''t reply yet
53
+
54
+ (B) The manager didn''t reply yet
55
+
56
+ (C) Ambiguous'
57
+ target: 'Let''s think step by step.
58
+
59
+ Here we need to determine who the pronoun "he" might be referring to. There
60
+ are two possible referents for "he", namely the manager and the secretary. The
61
+ verbs "sent" and "reply" might be able to help us determine which one is more
62
+ likely (if either). Let X be the manager and Y the secretary. The sentence is
63
+ then of the form "X sent a message to Y, but (X or Y) didn''t reply yet."
64
+
65
+ Let''s consider Y first: "X sent a message to Y, but Y didn''t reply yet." This
66
+ case makes sense, because of the implicit causality of the sentence. Y was the
67
+ receiver of the message, but Y didn''t get back to X yet.
68
+
69
+ Now, consider X: "X sent a message to Y, but X didn''t reply yet." This case
70
+ doesn''t make sense, because X was the initial sender of the message, so it
71
+ is now Y''s turn to write back to X.
72
+
73
+ Because in this context, X is the manager and Y is the secretary, the answer
74
+ should be the secretary. So the answer is (A).'
75
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
76
+ thing the pronoun refers to), or state that it is ambiguous.
77
+
78
+ Sentence: Bailey will plan to meet the director at his office
79
+
80
+ Options:
81
+
82
+ (A) It will be Bailey''s office
83
+
84
+ (B) It will be the director''s office
85
+
86
+ (C) Ambiguous'
87
+ target: 'Let''s think step by step.
88
+
89
+ Here we need to determine who the pronoun "his" might be referring to. There
90
+ are two possible referents for "his", namely Bailey''s and the director''s.
91
+ The verb phrase "plan to meet" might be able to help us determine which one
92
+ is more likely (if either). Let X be Bailey and Y the director. The sentence
93
+ is then of the form "X will plan to meet Y at (X or Y)''s office."
94
+
95
+ Let''s consider Y first: "X will plan to meet Y at Y''s office." This case makes
96
+ sense, because X might want to meet up with Y at Y''s office.
97
+
98
+ Now, consider X: "X will plan to meet Y at X''s office." This case also makes
99
+ sense, because X might want to meet up with Y at X''s own office.
100
+
101
+ Because both X and Y are possible at the same time, we conclude that the antecedent
102
+ of the pronoun is ambiguous. So the answer is (C).'
103
+ include: _cot_fewshot_template_yaml
104
+ task: bbh_cot_fewshot_disambiguation_qa
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/dyck_languages.yaml ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: dyck_languages
2
+ description: 'Correctly close a Dyck-n word.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
15
+ closed properly. Input: [ { ['
16
+ target: 'Let''s think step by step.
17
+
18
+ We should process each input one by one and keep track of the stack configuration.
19
+
20
+ 0: empty stack
21
+
22
+ 1: [ ; stack: [
23
+
24
+ 2: { ; stack: [ {
25
+
26
+ 3: [ ; stack: [ { [
27
+
28
+ Now, we have reached the end. The final stack is "[ { [".
29
+
30
+ We will need to pop out "[", "{", "[" one by one in that order.
31
+
32
+ So, we need "]", "}", "]". So the answer is ] } ].'
33
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
34
+ closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]'
35
+ target: 'Let''s think step by step.
36
+
37
+ We should process each input one by one and keep track of the stack configuration.
38
+
39
+ 0: empty stack
40
+
41
+ 1: < ; stack: <
42
+
43
+ 2: > ; stack: empty
44
+
45
+ 3: ( ; stack: (
46
+
47
+ 4: ( ; stack: ( (
48
+
49
+ 5: [ ; stack: ( ( [
50
+
51
+ 6: [ ; stack: ( ( [ [
52
+
53
+ 7: ( ; stack: ( ( [ [ (
54
+
55
+ 8: { ; stack: ( ( [ [ ( {
56
+
57
+ 9: } ; stack: ( ( [ [ (
58
+
59
+ 10: ) ; stack: ( ( [ [
60
+
61
+ 11: [ ; stack: ( ( [ [ [
62
+
63
+ 12: < ; stack: ( ( [ [ [ <
64
+
65
+ 13: > ; stack: ( ( [ [ [
66
+
67
+ 14: ] ; stack: ( ( [ [
68
+
69
+ 15: ] ; stack: ( ( [
70
+
71
+ Now, we have reached the end. The final stack is "( ( [".
72
+
73
+ We will need to pop out "[", "(", "(" one by one in that order.
74
+
75
+ So, we need "]", ")", ")". So the answer is ] ) ).'
76
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
77
+ closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < >
78
+ ] > }'
79
+ target: 'Let''s think step by step.
80
+
81
+ We should process each input one by one and keep track of the stack configuration.
82
+
83
+ 0: empty stack
84
+
85
+ 1: < ; stack: <
86
+
87
+ 2: [ ; stack: < [
88
+
89
+ 3: < ; stack: < [ <
90
+
91
+ 4: [ ; stack: < [ < [
92
+
93
+ 5: { ; stack: < [ < [ {
94
+
95
+ 6: < ; stack: < [ < [ { <
96
+
97
+ 7: [ ; stack: < [ < [ { < [
98
+
99
+ 8: ] ; stack: < [ < [ { <
100
+
101
+ 9: < ; stack: < [ < [ { < <
102
+
103
+ 10: { ; stack: < [ < [ { < < {
104
+
105
+ 11: } ; stack: < [ < [ { < <
106
+
107
+ 12: > ; stack: < [ < [ { <
108
+
109
+ 13: > ; stack: < [ < [ {
110
+
111
+ 14: } ; stack: < [ < [
112
+
113
+ 15: ] ; stack: < [ <
114
+
115
+ 16: > ; stack: < [
116
+
117
+ 17: { ; stack: < [ {
118
+
119
+ 18: { ; stack: < [ { {
120
+
121
+ 19: ( ; stack: < [ { { (
122
+
123
+ 20: ) ; stack: < [ { {
124
+
125
+ 21: } ; stack: < [ {
126
+
127
+ 22: { ; stack: < [ { {
128
+
129
+ 23: < ; stack: < [ { { <
130
+
131
+ 24: [ ; stack: < [ { { < [
132
+
133
+ 25: < ; stack: < [ { { < [ <
134
+
135
+ 26: > ; stack: < [ { { < [
136
+
137
+ 27: ] ; stack: < [ { { <
138
+
139
+ 28: > ; stack: < [ { {
140
+
141
+ 29: } ; stack: < [ {
142
+
143
+ Now, we have reached the end. The final stack is "< [ {".
144
+
145
+ We will need to pop out "{", "[", "<" one by one in that order.
146
+
147
+ So, we need "}", "]", ">". So the answer is } ] >.'
148
+ include: _cot_fewshot_template_yaml
149
+ task: bbh_cot_fewshot_dyck_languages
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/formal_fallacies.yaml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: formal_fallacies
2
+ description: 'Distinguish deductively valid arguments from formal fallacies.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
15
+ The following argument pertains to this question: To begin with, Lesley is a
16
+ close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate
17
+ of Lowell is sufficient for being a great-grandfather of Leroy. It follows that
18
+ Lesley is a great-grandfather of Leroy."
19
+
20
+ Is the argument, given the explicitly stated premises, deductively valid or
21
+ invalid?
22
+
23
+ Options:
24
+
25
+ - valid
26
+
27
+ - invalid'
28
+ target: "Let's think step by step.\n(1) Lesley is a close friend of Fernando:\
29
+ \ Lesley = friend(Fernando).\n(2) Being a close friend of Fernando or a schoolmate\
30
+ \ of Lowell is sufficient for being a great-grandfather of Leroy: If X = friend(Fernando)\
31
+ \ OR SCHOOLMATE(Lowell), then X = great-grandfather(Leroy).\nHypothesis: Does\
32
+ \ it follow that Lesley is a great-grandfather of Leroy: Lesley = great-grandfather(Leroy)?\n\
33
+ Let\u2019s see whether the Hypothesis can be deduced from the arguments (1)\
34
+ \ and (2) by logical reasoning?\nBy (1), we have Lesley = friend(Fernando).\
35
+ \ By (2), we have if Lesley = friend(Fernando), then Lesley = great-grandfather(Leroy).\n\
36
+ So, it is true that Lesley is a great-grandfather of Leroy. So the answer is\
37
+ \ valid."
38
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
39
+ The following argument pertains to this question: Whoever is not a great-grandfather
40
+ of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient
41
+ for not being a great-grandfather of Clyde. We may conclude: Everyone who is
42
+ an ancestor of Dana is a stepbrother of Brian, too."
43
+
44
+ Is the argument, given the explicitly stated premises, deductively valid or
45
+ invalid?
46
+
47
+ Options:
48
+
49
+ - valid
50
+
51
+ - invalid'
52
+ target: "Let's think step by step.\n(1) Whoever is not a great-grandfather of\
53
+ \ Clyde is a stepbrother of Brian: If X = NOT (great-grandfather(Clyde)), then\
54
+ \ X = stepbrother(Brian).\n(2): Being an ancestor of Dana is sufficient for\
55
+ \ not being a great-grandfather of Clyde: If X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\n\
56
+ Hypothesis: Does it follow that everyone who is an ancestor of Dana is a stepbrother\
57
+ \ of Brian, too: If X = ancestor(Dana), then X = stepbrother(Brian)?\nLet\u2019\
58
+ s see whether the Hypothesis can be deduced from the arguments (1) and (2) by\
59
+ \ logical reasoning?\nBy (2), we have if X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\n\
60
+ Furthermore, by (1), we have if X = NOT (great-grandfather(Clyde)), then X =\
61
+ \ stepbrother(Brian).\nBy the transitive relation rule in first-order logic,\
62
+ \ we then have: if X = ancestor(Dana), then X = stepbrother(Brian).\nSo, it\
63
+ \ is true that everyone who is an ancestor of Dana is a stepbrother of Brian.\
64
+ \ So the answer is valid."
65
+ - input: '"It is not always easy to grasp who is consuming which products. The following
66
+ argument pertains to this question: Every infrequent user of Paul Mitchell shampoo
67
+ is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap,
68
+ or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo
69
+ and, in the same time, a loyal buyer of Caress soap. It follows that whoever
70
+ is an infrequent user of Paul Mitchell shampoo is not a regular consumer of
71
+ Lush soap."
72
+
73
+ Is the argument, given the explicitly stated premises, deductively valid or
74
+ invalid?
75
+
76
+ Options:
77
+
78
+ - valid
79
+
80
+ - invalid'
81
+ target: "Let's think step by step.\n(1) Every infrequent user of Paul Mitchell\
82
+ \ shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress\
83
+ \ soap, or both: If X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin)\
84
+ \ OR X = loyal-buyer(Caress).\n(2): No regular consumer of Lush soap is a rare\
85
+ \ consumer of Nioxin shampoo and a loyal buyer of Caress soap at the same time.\
86
+ \ If X = regular-consumer(Lush), then X = NOT (rare-consumer(Nioxin) AND loyal-buyer(Caress)).\n\
87
+ Hypothesis: Does it follow that whoever is an infrequent user of Paul Mitchell\
88
+ \ shampoo is not a regular consumer of Lush soap: If X = infrequent-user(Paul\
89
+ \ Mitchell), then X = NOT (regular-consumer(Lush))?\nLet\u2019s see whether\
90
+ \ the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\n\
91
+ By (1), we have if X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin)\
92
+ \ OR X = loyal-buyer(Caress). We need to consider both cases separately:\nThe\
93
+ \ case X = rare-consumer(Nioxin) does not appear in (2).\nThe case X = loyal-buyer(Caress)\
94
+ \ does not appear in (2), either.\nSo, from (1) and (2), we cannot necessarily\
95
+ \ deduce the Hypothesis. So the answer is invalid."
96
+ include: _cot_fewshot_template_yaml
97
+ task: bbh_cot_fewshot_formal_fallacies
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: geometric_shapes
2
+ description: 'Name geometric shapes from their SVG paths.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'This SVG path element <path d="M 31.00,73.00 L 32.00,59.00 L 44.00,50.00
15
+ L 49.00,41.00 L 64.00,37.00 L 71.00,55.00 L 64.00,76.00 L 52.00,61.00 L 31.00,73.00"/>
16
+ draws a
17
+
18
+ Options:
19
+
20
+ (A) circle
21
+
22
+ (B) heptagon
23
+
24
+ (C) hexagon
25
+
26
+ (D) kite
27
+
28
+ (E) line
29
+
30
+ (F) octagon
31
+
32
+ (G) pentagon
33
+
34
+ (H) rectangle
35
+
36
+ (I) sector
37
+
38
+ (J) triangle'
39
+ target: 'Let''s think step by step.
40
+
41
+ This SVG path element contains "M" and "L" commands. M takes two parameters
42
+ (x,y) and moves the current point to the coordinates (x,y). L takes two parameters
43
+ (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).
44
+
45
+ This path can be decomposed into 9 separate commands.
46
+
47
+ (1) M 31.00,73.00: Move the current point to 31.00,73.00.
48
+
49
+ (2) L 32.00,59.00: Create a line from 31.00,73.00 to 32.00,59.00.
50
+
51
+ (3) L 44.00,50.00: Create a line from 32.00,59.00 to 44.00,50.00.
52
+
53
+ (4) L 49.00,41.00: Create a line from 44.00,50.00 to 49.00,41.00.
54
+
55
+ (5) L 64.00,37.00: Create a line from 49.00,41.00 to 64.00,37.00.
56
+
57
+ (6) L 71.00,55.00: Create a line from 64.00,37.00 to 71.00,55.00.
58
+
59
+ (7) L 64.00,76.00: Create a line from 71.00,55.00 to 64.00,76.00.
60
+
61
+ (8) L 52.00,61.00: Create a line from 64.00,76.00 to 52.00,61.00.
62
+
63
+ (9) L 31.00,73.00: Create a line from 52.00,61.00 to 31.00,73.00.
64
+
65
+ This SVG path starts at point 31.00,73.00, creates eight consecutive and touching
66
+ lines, and then returns back its starting point, thereby creating an eight-sided
67
+ shape. It does not have any curves or arches. "octagon" is the only eight-sided
68
+ object on the list. So the answer is (F).'
69
+ - input: 'This SVG path element <path d="M 14.19,26.04 L 51.43,39.21 L 58.44,36.69
70
+ L 56.63,30.17 L 48.53,26.66 L 14.19,26.04"/> draws a
71
+
72
+ Options:
73
+
74
+ (A) circle
75
+
76
+ (B) heptagon
77
+
78
+ (C) hexagon
79
+
80
+ (D) kite
81
+
82
+ (E) line
83
+
84
+ (F) octagon
85
+
86
+ (G) pentagon
87
+
88
+ (H) rectangle
89
+
90
+ (I) sector
91
+
92
+ (J) triangle'
93
+ target: 'Let''s think step by step.
94
+
95
+ This SVG path element contains "M" and "L" commands. M takes two parameters
96
+ (x,y) and moves the current point to the coordinates (x,y). L takes two parameters
97
+ (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).
98
+
99
+ This path can be decomposed into 6 separate commands.
100
+
101
+ (1) M 14.19,26.04: Move the current point to 14.19,26.04.
102
+
103
+ (2) L 51.43,39.21: Create a line from 14.19,26.04 to 51.43,39.21.
104
+
105
+ (3) L 58.44,36.69: Create a line from 51.43,39.21 to 58.44,36.69.
106
+
107
+ (4) L 56.63,30.17: Create a line from 58.44,36.69 to 56.63,30.17.
108
+
109
+ (5) L 48.53,26.66: Create a line from 56.63,30.17 to 48.53,26.66.
110
+
111
+ (6) L 14.19,26.04: Create a line from 48.53,26.66 to 14.19,26.04.
112
+
113
+ This SVG path starts at point 14.19,26.04, creates five consecutive and touching
114
+ lines, and then returns back its starting point, thereby creating a five-sided
115
+ shape. It does not have any curves or arches. "pentagon" is the only five-sided
116
+ polygon on the list. So the answer is (G).'
117
+ - input: 'This SVG path element <path d="M 41.00,43.00 L 37.00,34.00 L 41.00,33.00
118
+ L 45.00,34.00 L 41.00,43.00"/> draws a
119
+
120
+ Options:
121
+
122
+ (A) circle
123
+
124
+ (B) heptagon
125
+
126
+ (C) hexagon
127
+
128
+ (D) kite
129
+
130
+ (E) line
131
+
132
+ (F) octagon
133
+
134
+ (G) pentagon
135
+
136
+ (H) rectangle
137
+
138
+ (I) sector
139
+
140
+ (J) triangle'
141
+ target: 'Let''s think step by step.
142
+
143
+ This SVG path element contains "M" and "L" commands. M takes two parameters
144
+ (x,y) and moves the current point to the coordinates (x,y). L takes two parameters
145
+ (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).
146
+
147
+ This path can be decomposed into 5 separate commands.
148
+
149
+ (1) M 41.00,43.00: Move the current point to 41.00,43.00.
150
+
151
+ (2) L 37.00,34.00: Create a line from 41.00,43.00 to 37.00,34.00.
152
+
153
+ (3) L 41.00,33.00: Create a line from 37.00,34.00 to 41.00,33.00.
154
+
155
+ (4) L 45.00,34.00: Create a line from 41.00,33.00 to 45.00,34.00.
156
+
157
+ (5) L 41.00,43.00: Create a line from 45.00,34.00 to 41.00,43.00.
158
+
159
+ This SVG path starts at point 41.00,43.00, creates four consecutive and touching
160
+ lines, and then returns back its starting point, thereby creating a four-sided
161
+ shape. "kite" and "rectangle" are the only two four-sided polygons on the list.
162
+ So, we need to determine which one is the correct answer.
163
+
164
+ A kite has two pairs of equal-length adjacent sides, whereas a rectangle has
165
+ two pairs of equal-length alternate (opposite) sides. Now, let''s check whether
166
+ the two adjacent sides of this shape are equal.
167
+
168
+ Length of side A: |A| = sqrt((41.00-37.00)^2 + (43.00-34.00)^2) = sqrt((4)^2
169
+ + (9)^2) = sqrt(16 + 81) = sqrt(97).
170
+
171
+ Length of side B: |B| = sqrt((37.00-41.00)^2 + (34.00-33.00)^2)) = sqrt((4)^2
172
+ + (1)^2) = sqrt(16 + 1) = sqrt(17).
173
+
174
+ Length of side C: |C| = sqrt((41.00-45.00)^2 + (33.00-34.00)^2)) = sqrt((-4)^2
175
+ + (-1)^2) = sqrt(16 + 1) = sqrt(17).
176
+
177
+ Length of side D: |D| = sqrt((45.00-41.00)^2 + (34.00-43.00)^2)) = sqrt((4)^2
178
+ + (-9)^2) = sqrt(16 + 81) = sqrt(97).
179
+
180
+ Note that |A| = |D| and |B| = |C|. Furthermore, A and D are adjacent and B and
181
+ C are adjacent. Thus, this polygon has two pairs of equal-length adjacent sides
182
+ and is "kite". So the answer is (D).'
183
+ include: _cot_fewshot_template_yaml
184
+ task: bbh_cot_fewshot_geometric_shapes
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/hyperbaton.yaml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: hyperbaton
2
+ description: 'Order adjectives correctly in English sentences.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Which sentence has the correct adjective order:
15
+
16
+ Options:
17
+
18
+ (A) rubber terrible ship
19
+
20
+ (B) terrible rubber ship'
21
+ target: 'Let''s think step by step.
22
+
23
+ When there is more than one adjective before a noun, the adjectives need to
24
+ respect the following order before a noun: "[1. opinion] [2. size] [3. age]
25
+ [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun".
26
+
27
+ Option (A): "rubber terrible ship". (1) rubber" falls into the material category.
28
+ (2) "terrible" falls into the opinion category. Option (A) has the following
29
+ adjective order: [7. material] [1. opinion] (or, in numeric terms, 7 1). Because
30
+ 7 < 1 is not correct, (A) does not have the correct ordering.
31
+
32
+ Option (B): "terrible rubber ship". Option (B) has the following adjective order:
33
+ [1. opinion] [7. material] (or, in numeric terms, 1 7). Because 1 < 7 is correct,
34
+ (B) has the correct ordering. So the answer is (B).'
35
+ - input: 'Which sentence has the correct adjective order:
36
+
37
+ Options:
38
+
39
+ (A) repulsive small Brazilian exercise ship
40
+
41
+ (B) Brazilian repulsive exercise small ship'
42
+ target: 'Let''s think step by step.
43
+
44
+ When there is more than one adjective before a noun, the adjectives need to
45
+ respect the following order before a noun: "[1. opinion] [2. size] [3. age]
46
+ [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun".
47
+
48
+ Option (A): "repulsive small Brazilian exercise ship". (1) "repulsive" falls
49
+ into the opinion category. (2) "small" falls into the size category. (3) "Brazilian"
50
+ falls into the origin category. (4) "exercise" falls into the purpose category.
51
+ Option (A) has the following adjective order: [1. opinion] [2. size] [6. origin]
52
+ [8. purpose] (or, in numeric terms, 1 2 6 8). Because 1 < 2 < 6 < 8 is correct,
53
+ (A) has the correct ordering.
54
+
55
+ Option (B): "Brazilian repulsive exercise small ship". Option (B) has the following
56
+ adjective order: [6. origin] [1. opinion] [8. purpose] [2. size] (or, in numeric
57
+ terms, 6 1 8 2). Because 6 < 1 < 8 < 2 is not correct, (B) does not have the
58
+ correct ordering. So the answer is (A).'
59
+ - input: 'Which sentence has the correct adjective order:
60
+
61
+ Options:
62
+
63
+ (A) blue gold wonderful square shoe
64
+
65
+ (B) wonderful square blue gold shoe'
66
+ target: 'Let''s think step by step.
67
+
68
+ When there is more than one adjective before a noun, the adjectives need to
69
+ respect the following order before a noun: "[1. opinion] [2. size] [3. age]
70
+ [4. shape] [5. color] [6. origin] [7. material] [8. purpose] noun".
71
+
72
+ Option (A): "blue gold wonderful square shoe". (1) "blue" falls into the color
73
+ category. (2) "gold" falls into the material category. (3) "wonderful" falls
74
+ into the opinion category. (4) "square" falls into the shape category. The adjective
75
+ order that Option (A) has is [5. color] [7. material] [1. opinion] [4. shape]
76
+ (or, in numeric terms, 5 7 1 4). Because 5 < 7 < 1 < 4 is not correct, (A) does
77
+ not have the correct ordering.
78
+
79
+ Option (B): "wonderful square blue gold shoe". Option (B) has the following
80
+ adjective order: [1. opinion] [4. shape] [5. color] [7. material] (or, in numeric
81
+ terms, 1 4 5 7 ). Because 1 < 4 < 5 < 7 is correct, (B) has the correct ordering.
82
+ So the answer is (B).'
83
+ include: _cot_fewshot_template_yaml
84
+ task: bbh_cot_fewshot_hyperbaton
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_five_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'The following paragraphs each describe a set of three objects arranged
16
+ in a fixed order. The statements are logically consistent within each paragraph.
17
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
18
+ above Amy. Eli finished below Amy.
19
+
20
+ Options:
21
+
22
+ (A) Amy finished last
23
+
24
+ (B) Eli finished last
25
+
26
+ (C) Eve finished last'
27
+ target: 'Let''s think step by step.
28
+
29
+ (1) Eve finished above Amy: "(above) ? Eve ? Amy ? (below)".
30
+
31
+ (2) Eli finished below Amy: "(above) ? Amy ? Eli ? (below)".
32
+
33
+ (3) Combining (1) and (2) we get the following ordering: "(above) Eve Amy Eli
34
+ (below)".
35
+
36
+ According to this ordering, the person who finished last (the one at the bottom
37
+ of this list) is Eli.
38
+
39
+ Eli finished last. So the answer is (B).'
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a white book, a green book, and an orange
43
+ book. The green book is to the right of the white book. The orange book is the
44
+ rightmost.
45
+
46
+ Options:
47
+
48
+ (A) The white book is the leftmost
49
+
50
+ (B) The green book is the leftmost
51
+
52
+ (C) The orange book is the leftmost'
53
+ target: 'Let''s think step by step.
54
+
55
+ (1) The green book is to the right of the white book: "(left) ? white ? green
56
+ ? (right)".
57
+
58
+ (2) The orange book is the rightmost: "(left) ? white ? green orange (right)".
59
+
60
+ (3) Combining (1) and (2) we get the following ordering: "(left) white green
61
+ orange (right)".
62
+
63
+ According to this ordering, the leftmost book is the white book.
64
+
65
+ The white book is the leftmost. So the answer is (A).'
66
+ - input: 'The following paragraphs each describe a set of three objects arranged
67
+ in a fixed order. The statements are logically consistent within each paragraph.
68
+ On a shelf, there are three books: a red book, a gray book, and a white book.
69
+ The white book is to the left of the gray book. The red book is the second from
70
+ the left.
71
+
72
+ Options:
73
+
74
+ (A) The red book is the leftmost
75
+
76
+ (B) The gray book is the leftmost
77
+
78
+ (C) The white book is the leftmost'
79
+ target: 'Let''s think step by step.
80
+
81
+ (1) The white book is to the left of the gray book: "(left) ? white ? gray ?
82
+ (right)".
83
+
84
+ (2) The red book is the second from the left: "(left) ? white red gray ? (right)".
85
+
86
+ (3) Combining (1) and (2) we get the following ordering: "(left) white red gray
87
+ (right)".
88
+
89
+ According to this ordering, the leftmost book is the white book.
90
+
91
+ The white book is the leftmost. So the answer is (C).'
92
+ include: _cot_fewshot_template_yaml
93
+ task: bbh_cot_fewshot_logical_deduction_five_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_seven_objects.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_seven_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'The following paragraphs each describe a set of three objects arranged
16
+ in a fixed order. The statements are logically consistent within each paragraph.
17
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
18
+ above Amy. Eli finished below Amy.
19
+
20
+ Options:
21
+
22
+ (A) Amy finished last
23
+
24
+ (B) Eli finished last
25
+
26
+ (C) Eve finished last'
27
+ target: 'Let''s think step by step.
28
+
29
+ (1) Eve finished above Amy: "(above) ? Eve ? Amy ? (below)".
30
+
31
+ (2) Eli finished below Amy: "(above) ? Amy ? Eli ? (below)".
32
+
33
+ (3) Combining (1) and (2) we get the following ordering: "(above) Eve Amy Eli
34
+ (below)".
35
+
36
+ According to this ordering, the person who finished last (the one at the bottom
37
+ of this list) is Eli.
38
+
39
+ Eli finished last. So the answer is (B).'
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a white book, a green book, and an orange
43
+ book. The green book is to the right of the white book. The orange book is the
44
+ rightmost.
45
+
46
+ Options:
47
+
48
+ (A) The white book is the leftmost
49
+
50
+ (B) The green book is the leftmost
51
+
52
+ (C) The orange book is the leftmost'
53
+ target: 'Let''s think step by step.
54
+
55
+ (1) The green book is to the right of the white book: "(left) ? white ? green
56
+ ? (right)".
57
+
58
+ (2) The orange book is the rightmost: "(left) ? white ? green orange (right)".
59
+
60
+ (3) Combining (1) and (2) we get the following ordering: "(left) white green
61
+ orange (right)".
62
+
63
+ According to this ordering, the leftmost book is the white book.
64
+
65
+ The white book is the leftmost. So the answer is (A).'
66
+ - input: 'The following paragraphs each describe a set of three objects arranged
67
+ in a fixed order. The statements are logically consistent within each paragraph.
68
+ On a shelf, there are three books: a red book, a gray book, and a white book.
69
+ The white book is to the left of the gray book. The red book is the second from
70
+ the left.
71
+
72
+ Options:
73
+
74
+ (A) The red book is the leftmost
75
+
76
+ (B) The gray book is the leftmost
77
+
78
+ (C) The white book is the leftmost'
79
+ target: 'Let''s think step by step.
80
+
81
+ (1) The white book is to the left of the gray book: "(left) ? white ? gray ?
82
+ (right)".
83
+
84
+ (2) The red book is the second from the left: "(left) ? white red gray ? (right)".
85
+
86
+ (3) Combining (1) and (2) we get the following ordering: "(left) white red gray
87
+ (right)".
88
+
89
+ According to this ordering, the leftmost book is the white book.
90
+
91
+ The white book is the leftmost. So the answer is (C).'
92
+ include: _cot_fewshot_template_yaml
93
+ task: bbh_cot_fewshot_logical_deduction_seven_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_three_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'The following paragraphs each describe a set of three objects arranged
16
+ in a fixed order. The statements are logically consistent within each paragraph.
17
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
18
+ above Amy. Eli finished below Amy.
19
+
20
+ Options:
21
+
22
+ (A) Amy finished last
23
+
24
+ (B) Eli finished last
25
+
26
+ (C) Eve finished last'
27
+ target: 'Let''s think step by step.
28
+
29
+ (1) Eve finished above Amy: "(above) ? Eve ? Amy ? (below)".
30
+
31
+ (2) Eli finished below Amy: "(above) ? Amy ? Eli ? (below)".
32
+
33
+ (3) Combining (1) and (2) we get the following ordering: "(above) Eve Amy Eli
34
+ (below)".
35
+
36
+ According to this ordering, the person who finished last (the one at the bottom
37
+ of this list) is Eli.
38
+
39
+ Eli finished last. So the answer is (B).'
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a white book, a green book, and an orange
43
+ book. The green book is to the right of the white book. The orange book is the
44
+ rightmost.
45
+
46
+ Options:
47
+
48
+ (A) The white book is the leftmost
49
+
50
+ (B) The green book is the leftmost
51
+
52
+ (C) The orange book is the leftmost'
53
+ target: 'Let''s think step by step.
54
+
55
+ (1) The green book is to the right of the white book: "(left) ? white ? green
56
+ ? (right)".
57
+
58
+ (2) The orange book is the rightmost: "(left) ? white ? green orange (right)".
59
+
60
+ (3) Combining (1) and (2) we get the following ordering: "(left) white green
61
+ orange (right)".
62
+
63
+ According to this ordering, the leftmost book is the white book.
64
+
65
+ The white book is the leftmost. So the answer is (A).'
66
+ - input: 'The following paragraphs each describe a set of three objects arranged
67
+ in a fixed order. The statements are logically consistent within each paragraph.
68
+ On a shelf, there are three books: a red book, a gray book, and a white book.
69
+ The white book is to the left of the gray book. The red book is the second from
70
+ the left.
71
+
72
+ Options:
73
+
74
+ (A) The red book is the leftmost
75
+
76
+ (B) The gray book is the leftmost
77
+
78
+ (C) The white book is the leftmost'
79
+ target: 'Let''s think step by step.
80
+
81
+ (1) The white book is to the left of the gray book: "(left) ? white ? gray ?
82
+ (right)".
83
+
84
+ (2) The red book is the second from the left: "(left) ? white red gray ? (right)".
85
+
86
+ (3) Combining (1) and (2) we get the following ordering: "(left) white red gray
87
+ (right)".
88
+
89
+ According to this ordering, the leftmost book is the white book.
90
+
91
+ The white book is the leftmost. So the answer is (C).'
92
+ include: _cot_fewshot_template_yaml
93
+ task: bbh_cot_fewshot_logical_deduction_three_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/movie_recommendation.yaml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: movie_recommendation
2
+ description: 'Recommend movies similar to the given list of movies.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones
15
+ and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big
16
+ Lebowski:
17
+
18
+ Options:
19
+
20
+ (A) Tetsuo
21
+
22
+ (B) the Ironman
23
+
24
+ (C) The Princess Bride
25
+
26
+ (D) The Barkley Marathons The Race That Eats Its Young
27
+
28
+ (E) Bug'
29
+ target: 'Let''s think step by step.
30
+
31
+ - Star Wars Episode IV - A New Hope (action, adventure, fantasy; 1977)
32
+
33
+ - Indiana Jones and the Last Crusade (action, adventure; 1989)
34
+
35
+ - Star Wars Episode V - The Empire Strikes Back (action, adventure, fantasy;
36
+ 1980)
37
+
38
+ - The Big Lebowski (action, drama, comedy; 1998)
39
+
40
+ These are all famous classic American movies produced before 2000. Amongst all
41
+ the options, the only movie similar to these ones seems to be The Princess Bride
42
+ (1987). So the answer is (C).'
43
+ - input: 'Find a movie similar to Twister, The Silence of the Lambs, Independence
44
+ Day, Braveheart:
45
+
46
+ Options:
47
+
48
+ (A) They Shoot Horses
49
+
50
+ (B) Don''t They
51
+
52
+ (C) Forrest Gump
53
+
54
+ (D) The Salton Sea
55
+
56
+ (E) Extreme Days'
57
+ target: 'Let''s think step by step.
58
+
59
+ - Twister (action, adventure, thriller; 1996)
60
+
61
+ - The Silence of the Lambs (crime, drama, thriller; 1991)
62
+
63
+ - Independence Day (action, science-fiction, drama; 1996)
64
+
65
+ - Braveheart (biography, drama, epic; 1995)
66
+
67
+ These are all famous Hollywood movies produced around the 1990s. Amongst all
68
+ the options, the only movie similar to these ones seems to be Forrest Gump (comedy,
69
+ drama, romance; 1994). So the answer is (C).'
70
+ - input: 'Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest
71
+ Gump:
72
+
73
+ Options:
74
+
75
+ (A) Phenomena
76
+
77
+ (B) Lilting
78
+
79
+ (C) Catwoman
80
+
81
+ (D) Edge of Tomorrow'
82
+ target: 'Let''s think step by step.
83
+
84
+ - Minority Report (action, crime, mystery; 2002)
85
+
86
+ - Total Recall (action, adventure, science-fiction; 2012)
87
+
88
+ - Inside Out (animation, family, comedy; 2015)
89
+
90
+ - Forrest Gump (comedy, drama, romance; 1994)
91
+
92
+ These are all famous movies produced in the past few decades.Amongst all the
93
+ options, the only movie similar to these ones seems to be Edge of Tomorrow (action,
94
+ adventure, crime, mystery; 2014), as it is also a science-fiction movie and
95
+ features Tom Cruise. So the answer is (D).'
96
+ include: _cot_fewshot_template_yaml
97
+ task: bbh_cot_fewshot_movie_recommendation
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/multistep_arithmetic_two.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: multistep_arithmetic_two
2
+ description: 'Solve multi-step arithmetic problems.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =
15
+ target: "Let's think step by step.\nLet\u2019s recall that the order of operations\
16
+ \ in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication\
17
+ \ and division (from left to right), (4) addition and multiplication (from left\
18
+ \ to right). So, remember to always compute the expressions inside parentheses\
19
+ \ or brackets first.\nThis equation can be written as \"A * B\", where A = (-5\
20
+ \ + 9 * -4 - 0) and B = (4 + -7 + 0 * -5).\nLet's calculate A = (-5 + 9 * -4\
21
+ \ - 0) = (-5 + (9 * -4) - 0) = (-5 + (-36) - 0) = (-5 + -36 - 0) = -5 - 36 =\
22
+ \ -41.\nLet's calculate B = (4 + -7 + 0 * -5) = (4 + -7 + (0 * -5)) = (4 + -7\
23
+ \ + 0) = (4 + -7) = (4 - 7) = -3.\nThen, the final equation is A * B = -41 *\
24
+ \ -3 = (-61) * (-3) = 123. So the answer is 123."
25
+ - input: ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =
26
+ target: "Let's think step by step.\nLet\u2019s recall that the order of operations\
27
+ \ in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication\
28
+ \ and division (from left to right), (4) addition and multiplication (from left\
29
+ \ to right). So, remember to always compute the expressions inside parentheses\
30
+ \ or brackets first.\nThis equation can be written as \"A + B\", where A = (-9\
31
+ \ * 7 * 7 * -9) and B = (4 * -9 - 8 - -4).\nLet's calculate A = (-9 * 7 * 7\
32
+ \ * -9) = ((-9 * 7) * (7 * -9)) = ((-63) * (-63)) = 3969.\nLet's calculate\
33
+ \ B = (4 * -9 - 8 - (-4)) = ((4 * -9) - 8 - (-4)) = ((-36) - 8 - (-4)) = ((-36\
34
+ \ - 8) - (-4)) = (-44 - (-4)) = -40.\nThen, the final equation is A + B = 3969\
35
+ \ + -40 = 3969 - 40 = 3929. So the answer is 3929."
36
+ - input: ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =
37
+ target: "Let's think step by step.\nLet\u2019s recall that the order of operations\
38
+ \ in mathematics is as follows: (1) Parentheses, (2) exponents, (3) multiplication\
39
+ \ and division (from left to right), (4) addition and multiplication (from left\
40
+ \ to right). So, remember to always compute the expressions inside parentheses\
41
+ \ or brackets first.\nThis equation can be written as \"A - B\", where A = (-3\
42
+ \ + 5 * 8 * -4) and B = (9 - 8 * -7 + -9).\nLet's calculate A = (-3 + 5 * 8\
43
+ \ * -4) = (-3 + (5 * 8) * -4) = (-3 + (40) * -4) = (-3 + (40 * -4)) = (-3 +\
44
+ \ -160) = -163.\nLet's calculate B = (9 - 8 * -7 + -9) = (9 - (8 * -7) + -9)\
45
+ \ = (9 - (-56) + -9) = ((9 - (-56)) + -9) = ((65) + -9)= (65 - 9) = 56.\nThen,\
46
+ \ the final equation is A - B = -163 - 56 = -219. So the answer is -219."
47
+ include: _cot_fewshot_template_yaml
48
+ task: bbh_cot_fewshot_multistep_arithmetic_two
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/navigate.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: navigate
2
+ description: 'Given a series of navigation instructions, determine whether one would
3
+ end up back at the starting point.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'If you follow these instructions, do you return to the starting point?
16
+ Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps.
17
+ Take 8 steps.
18
+
19
+ Options:
20
+
21
+ - Yes
22
+
23
+ - No'
24
+ target: 'Let''s think step by step.
25
+
26
+ We start at the origin (0, 0), facing the positive y-axis.
27
+
28
+ (1) Turn left: (0, 0), facing the negative x-axis.
29
+
30
+ (2) Turn around: (0, 0), facing the positive x-axis.
31
+
32
+ (3) Turn left: (0, 0), facing the positive y-axis.
33
+
34
+ (4) Take 7 steps: (0, 7), facing the positive y-axis.
35
+
36
+ (5) Take 2 steps: (0, 9), facing the positive y-axis.
37
+
38
+ (6) Take 4 steps: (0, 13), facing the positive y-axis.
39
+
40
+ (7) Take 8 steps: (0, 21), facing the positive y-axis.
41
+
42
+ Since (0, 21) is not (0, 0), we are not where we started. So the answer is No.'
43
+ - input: 'If you follow these instructions, do you return to the starting point?
44
+ Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps.
45
+ Take 1 step.
46
+
47
+ Options:
48
+
49
+ - Yes
50
+
51
+ - No'
52
+ target: 'Let''s think step by step.
53
+
54
+ We start at the origin (0, 0), facing the positive y-axis.
55
+
56
+ (1) Turn around: (0, 0), facing the negative y-axis.
57
+
58
+ (2) Take 1 step: (0, -1), facing the negative y-axis.
59
+
60
+ (3) Take 6 steps: (0, -7), facing the negative y-axis.
61
+
62
+ (4) Turn around: (0, -7), facing the positive y-axis.
63
+
64
+ (5) Take 6 steps: (0, -1), facing the positive y-axis.
65
+
66
+ (6) Take 9 steps: (0, 8), facing the positive y-axis.
67
+
68
+ (7) Take 1 step: (0, 9), facing the positive y-axis.
69
+
70
+ Since (0, 9) is not (0, 0), we are not where we started. So the answer is No.'
71
+ - input: 'If you follow these instructions, do you return to the starting point?
72
+ Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.
73
+
74
+ Options:
75
+
76
+ - Yes
77
+
78
+ - No'
79
+ target: 'Let''s think step by step.
80
+
81
+ We start at the origin (0, 0), facing the positive y-axis.
82
+
83
+ (1) Always face forward: (0, 0), facing the positive y-axis.
84
+
85
+ (2) Take 2 steps right: (0, 2), facing the positive y-axis.
86
+
87
+ (3) Take 9 steps left: (0, -7), facing the positive y-axis.
88
+
89
+ (4) Take 7 steps right: (0, 7), facing the positive y-axis.
90
+
91
+ Since (0, 0) is (0, 0), we are indeed where we started. So the answer is Yes.'
92
+ include: _cot_fewshot_template_yaml
93
+ task: bbh_cot_fewshot_navigate
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/object_counting.yaml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: object_counting
2
+ description: 'Questions that involve enumerating objects and asking the model to count
3
+ them.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana,
16
+ a flute, an orange, and a violin. How many fruits do I have?
17
+ target: 'Let''s think step by step.
18
+
19
+ We first identify the fruits on the list and include their quantity in parentheses:
20
+
21
+ - blackberry (1)
22
+
23
+ - nectarine (1)
24
+
25
+ - plum (1)
26
+
27
+ - strawberry (1)
28
+
29
+ - banana (1)
30
+
31
+ - orange (1)
32
+
33
+ Now, let''s add the numbers in parentheses: 1 + 1 + 1 + 1 + 1 + 1 = 6. So the
34
+ answer is 6.'
35
+ - input: I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape,
36
+ a nectarine, and three plums. How many fruits do I have?
37
+ target: 'Let''s think step by step.
38
+
39
+ We first identify the fruits on the list and include their quantity in parentheses:
40
+
41
+ - orange (1)
42
+
43
+ - raspberry (1)
44
+
45
+ - peaches (2)
46
+
47
+ - blackberry (1)
48
+
49
+ - apple (1)
50
+
51
+ - grape (1)
52
+
53
+ - nectarine (1)
54
+
55
+ - plums (3)
56
+
57
+ Now, let''s add the numbers in parentheses: 1 + 1 + 2 + 1 + 1 + 1 + 1 + 3 =
58
+ 11. So the answer is 11.'
59
+ - input: I have a lettuce head, a head of broccoli, an onion, a stalk of celery,
60
+ two carrots, a garlic, and a yam. How many vegetables do I have?
61
+ target: 'Let''s think step by step.
62
+
63
+ We first identify the vegetables on the list and include their quantity in parentheses:
64
+
65
+ - lettuce (1)
66
+
67
+ - broccoli (1)
68
+
69
+ - onion (1)
70
+
71
+ - celery (1)
72
+
73
+ - carrots (2)
74
+
75
+ - garlic (1)
76
+
77
+ - yam (1)
78
+
79
+ Now, let''s add the numbers in parentheses: 1 + 1 + 1 + 1 + 2 + 1 + 1 = 8. So
80
+ the answer is 8.'
81
+ include: _cot_fewshot_template_yaml
82
+ task: bbh_cot_fewshot_object_counting
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: penguins_in_a_table
2
+ description: 'Answer questions about a table of penguins and their attributes.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Here is a table where the first line is a header and each subsequent line
15
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
16
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
17
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add
18
+ a penguin to the table:
19
+
20
+ James, 12, 90, 12
21
+
22
+ How many penguins are less than 8 years old?
23
+
24
+ Options:
25
+
26
+ (A) 1
27
+
28
+ (B) 2
29
+
30
+ (C) 3
31
+
32
+ (D) 4
33
+
34
+ (E) 5'
35
+ target: 'Let''s think step by step.
36
+
37
+ This question focuses on age. We know the following: Louis is 7 years old, Bernard
38
+ is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.
39
+
40
+ Now, we add James to this table: James is 12 years old.
41
+
42
+ The penguins that are less than 8 years old are Louis and Bernard.
43
+
44
+ There are 2 penguins less than 8 years old. So the answer is (B).'
45
+ - input: 'Here is a table where the first line is a header and each subsequent line
46
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
47
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
48
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is
49
+ the youngest penguin?
50
+
51
+ Options:
52
+
53
+ (A) Louis
54
+
55
+ (B) Bernard
56
+
57
+ (C) Vincent
58
+
59
+ (D) Gwen
60
+
61
+ (E) James'
62
+ target: 'Let''s think step by step.
63
+
64
+ This question focuses on age. We know the following: Louis is 7 years old, Bernard
65
+ is 5 years old, Vincent is 9 years old, and Gwen is 8 years old.
66
+
67
+ According to the table, Bernard (5) is the youngest amongst them.
68
+
69
+ The youngest penguin is Bernard. So the answer is (B).'
70
+ - input: 'Here is a table where the first line is a header and each subsequent line
71
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
72
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
73
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is
74
+ the name of the second penguin sorted by alphabetic order?
75
+
76
+ Options:
77
+
78
+ (A) Louis
79
+
80
+ (B) Bernard
81
+
82
+ (C) Vincent
83
+
84
+ (D) Gwen
85
+
86
+ (E) James'
87
+ target: 'Let''s think step by step.
88
+
89
+ This question focuses on the name. We know the following: The names of the penguin
90
+ in the table are Louis, Bernard, Vincent, and Gwen.
91
+
92
+ When we sort their names alphabetically, we get Bernard, Gwen, Louis, Vincent.
93
+
94
+ The name of the second penguin sorted by alphabetical order is Gwen.
95
+
96
+ The name of the second penguin sorted by alphabetic order is Gwen. So the answer
97
+ is (D).'
98
+ include: _cot_fewshot_template_yaml
99
+ task: bbh_cot_fewshot_penguins_in_a_table
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/reasoning_about_colored_objects.yaml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: reasoning_about_colored_objects
2
+ description: 'Answer extremely simple questions about the colors of objects on a surface.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'On the nightstand, there is a red pencil, a purple mug, a burgundy keychain,
15
+ a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the
16
+ stress ball?
17
+
18
+ Options:
19
+
20
+ (A) red
21
+
22
+ (B) orange
23
+
24
+ (C) yellow
25
+
26
+ (D) green
27
+
28
+ (E) blue
29
+
30
+ (F) brown
31
+
32
+ (G) magenta
33
+
34
+ (H) fuchsia
35
+
36
+ (I) mauve
37
+
38
+ (J) teal
39
+
40
+ (K) turquoise
41
+
42
+ (L) burgundy
43
+
44
+ (M) silver
45
+
46
+ (N) gold
47
+
48
+ (O) black
49
+
50
+ (P) grey
51
+
52
+ (Q) purple
53
+
54
+ (R) pink'
55
+ target: 'Let''s think step by step.
56
+
57
+ According to this question, the color of the stress ball is blue. So the answer
58
+ is (E).'
59
+ - input: 'On the table, you see a bunch of objects arranged in a row: a purple paperclip,
60
+ a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve
61
+ fidget spinner, and a burgundy pen. What is the color of the object directly
62
+ to the right of the stress ball?
63
+
64
+ Options:
65
+
66
+ (A) red
67
+
68
+ (B) orange
69
+
70
+ (C) yellow
71
+
72
+ (D) green
73
+
74
+ (E) blue
75
+
76
+ (F) brown
77
+
78
+ (G) magenta
79
+
80
+ (H) fuchsia
81
+
82
+ (I) mauve
83
+
84
+ (J) teal
85
+
86
+ (K) turquoise
87
+
88
+ (L) burgundy
89
+
90
+ (M) silver
91
+
92
+ (N) gold
93
+
94
+ (O) black
95
+
96
+ (P) grey
97
+
98
+ (Q) purple
99
+
100
+ (R) pink'
101
+ target: 'Let''s think step by step.
102
+
103
+ According to this question, the objects are arranged in a row, from left to
104
+ right, as follows: (1) a purple paperclip, (2) a pink stress ball, (3) a brown
105
+ keychain, (4) a green scrunchiephone charger, (5) a mauve fidget spinner, (6)
106
+ a burgundy pen.
107
+
108
+ The stress ball is the second object on the list, namely (2). The object that
109
+ is to the right of the stress ball corresponds to (3), which is a brown keychain.
110
+
111
+ The color of the keychain is brown. So the answer is (F).'
112
+ - input: 'On the nightstand, you see the following items arranged in a row: a teal
113
+ plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug,
114
+ a pink notebook, and a grey cup. How many non-orange items do you see to the
115
+ left of the teal item?
116
+
117
+ Options:
118
+
119
+ (A) zero
120
+
121
+ (B) one
122
+
123
+ (C) two
124
+
125
+ (D) three
126
+
127
+ (E) four
128
+
129
+ (F) five
130
+
131
+ (G) six'
132
+ target: 'Let''s think step by step.
133
+
134
+ According to this question, the objects are arranged in a row, from left to
135
+ right, as follows: (1) a teal plate, (2) a burgundy keychain, (3) a yellow scrunchiephone
136
+ charger, (4) an orange mug, (5) a pink notebook, (6) a grey cup.
137
+
138
+ The teal plate is the first item, namely (1). There is no item to the left of
139
+ the teal item.
140
+
141
+ The number of non-orange items to the left of the teal item is zero. So the
142
+ answer is (A).'
143
+ include: _cot_fewshot_template_yaml
144
+ task: bbh_cot_fewshot_reasoning_about_colored_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/ruin_names.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: ruin_names
2
+ description: 'Select the humorous edit that ''ruins'' the input movie or musical artist
3
+ name.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
16
+ ''whitesnake''?
17
+
18
+ Options:
19
+
20
+ (A) whitesnape
21
+
22
+ (B) whitesnapke
23
+
24
+ (C) whitesnuake
25
+
26
+ (D) mwhitesnake'
27
+ target: 'Let''s think step by step.
28
+
29
+ The original name is "whitesnake". This is the name of an old English hard rock
30
+ band. It is a compound word, formed by the words "white" and "snake".
31
+
32
+ (A) "whitesnape": It is formed by the combination of "white" and "snake"; therefore,
33
+ "snake" has been changed to "snape". Snape makes a reference to the fictional
34
+ character Severus Snape in the Harry Potter series, so (A) is indeed a meaningful
35
+ and funny edit.
36
+
37
+ (B) "whitesnapke": It is formed by the combination of "white" and "snapke",
38
+ but "snapke" is not an actual word; therefore, "whitesnapke" is not humorous.
39
+
40
+ (C) "whitesnuake": It is formed by the combination of "white" and "snuake",
41
+ but "snuake" is not an actual word; therefore, "whitesnuake" is not humorous.
42
+
43
+ (D) "mwhitesnake": It is formed by the combination of "m", "white", and "snake",
44
+ but the prefix "-m "seems arbitrary; therefore, "mwhitesnake" is not meaningful
45
+ or humorous.
46
+
47
+ Above the above, the only humorous edit is (A). So the answer is (A).'
48
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
49
+ ''one of our dinosaurs is missing''?
50
+
51
+ Options:
52
+
53
+ (A) ofne of our dinosaurs is missing
54
+
55
+ (B) one af our dinosaurs is missing
56
+
57
+ (C) one of our dinosaurs is pissing
58
+
59
+ (D) one of our dinosaur is missing'
60
+ target: 'Let''s think step by step.
61
+
62
+ The original name is "one of our dinosaurs is missing". This is the name of
63
+ an old British movie.
64
+
65
+ (A) "ofne of our dinosaurs is missing": Here "one of" is changed to "ofne",
66
+ but the word "ofne" is not an actual word.
67
+
68
+ (B) "one af our dinosaurs is missing": Here the word "of" is changed to "af",
69
+ but the word "af" is not an actual word.
70
+
71
+ (C) "one of our dinosaurs is pissing": Here the word "missing" is changed to
72
+ "pissing", and "one of our dinosaurs is pissing" is indeed a very whimsical
73
+ and mischievous edit. This change truly ruins the original title of the movie.
74
+
75
+ (D) "one of our dinosaur is missing": Here the word "dinosaurs" is changed to
76
+ "dinosaur", but "dinosaur" is singular but should be plural in the title; this
77
+ change therefore feels arbitrary and not humorous.
78
+
79
+ Above the above, the only humorous edit is (C).
80
+
81
+ Above the above, the only humorous edit is (C). So the answer is (C).'
82
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
83
+ ''counting crows''?
84
+
85
+ Options:
86
+
87
+ (A) countingy crows
88
+
89
+ (B) counting cows
90
+
91
+ (C) courting crows
92
+
93
+ (D) coutnting crows'
94
+ target: 'Let''s think step by step.
95
+
96
+ The original name is "counting crows". This is the name of an American rock
97
+ band. Historically, the band name comes from the British nursery rhyme "One
98
+ for Sorrow", which is about counting of magpies.
99
+
100
+ (A) "countingy crows": Here the word "counting" is changed to "countingy", but
101
+ the word "countingy" is not an actual word.
102
+
103
+ (B) "counting cows": Here the word "crows" is changed to "cows", and this is
104
+ indeed a playful and meaningful edit that ruins the original name of the band.
105
+
106
+ (C) "courting crows": Here the word "counting" is changed to "courting", and
107
+ "courting" is an actual word; however, "courting crows" does not sound as humorous
108
+ as "counting cows".
109
+
110
+ (D) "coutnting crows": Here the word "counting" is changed to "coutnting", but
111
+ the word "coutnting" is not an actual word.
112
+
113
+ Above the above, the only humorous edit is (B). So the answer is (B).'
114
+ include: _cot_fewshot_template_yaml
115
+ task: bbh_cot_fewshot_ruin_names
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/salient_translation_error_detection.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: salient_translation_error_detection
2
+ description: 'Detect the type of error in an English translation of a German source
3
+ sentence.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'The following translations from German to English contain a particular
16
+ error. That error will be one of the following types: Named Entities: An entity
17
+ (names, places, locations, etc.) is changed to a different entity. Numerical
18
+ Values: Numerical values (ordinals or cardinals), dates, and/or units are changed.
19
+ Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are
20
+ changed. Negation or Antonyms: Introduce or remove a negation or change comparatives
21
+ to their antonyms. Facts: Trivial factual errors not pertaining to the above
22
+ classes are introduced in the translations. Dropped Content: A significant clause
23
+ in the translation is removed. Please identify that error. Source: In der Liste
24
+ der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen
25
+ Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.
26
+
27
+ Translation: In the list of architectural monuments in Lenzen all architectural
28
+ monuments of the Brandenburg city of Lenzen and its districts are listed.
29
+
30
+ The translation contains an error pertaining to
31
+
32
+ Options:
33
+
34
+ (A) Modifiers or Adjectives
35
+
36
+ (B) Numerical Values
37
+
38
+ (C) Negation or Antonyms
39
+
40
+ (D) Named Entities
41
+
42
+ (E) Dropped Content
43
+
44
+ (F) Facts'
45
+ target: 'Let''s think step by step.
46
+
47
+ We solve this question by first translating the source sentence to English and
48
+ then by comparing our translation with the provided translation. According to
49
+ Google Translate, the correct translation of the source sentence from German
50
+ to English is "The list of monuments in Lenzen (Elbe) includes all the monuments
51
+ in the Brandenburg town of Lenzen (Elbe) and its districts." On the other hand,
52
+ the provided translation is "In the list of architectural monuments in Lenzen
53
+ all architectural monuments of the Brandenburg city of Lenzen and its districts
54
+ are listed." Note that Lenzen (Elbe) is changed to Lenzen in the original translation;
55
+ so, there is a named entity error. Because an entity in the original source
56
+ sentence is changed to a different entity in the translation, the translation
57
+ contains an error pertaining to Named Entities. So the answer is (D).'
58
+ - input: "The following translations from German to English contain a particular\
59
+ \ error. That error will be one of the following types: Named Entities: An entity\
60
+ \ (names, places, locations, etc.) is changed to a different entity. Numerical\
61
+ \ Values: Numerical values (ordinals or cardinals), dates, and/or units are\
62
+ \ changed. Modifiers or Adjectives: The modifiers and adjectives pertaining\
63
+ \ to a noun are changed. Negation or Antonyms: Introduce or remove a negation\
64
+ \ or change comparatives to their antonyms. Facts: Trivial factual errors not\
65
+ \ pertaining to the above classes are introduced in the translations. Dropped\
66
+ \ Content: A significant clause in the translation is removed. Please identify\
67
+ \ that error. Source: Auf dieser Seite sind die Baudenkm\xE4ler der oberbayerischen\
68
+ \ Gro\xDFen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On\
69
+ \ this page are compiled the architectural monuments of the town of Landsberg\
70
+ \ am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers\
71
+ \ or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n\
72
+ (E) Dropped Content\n(F) Facts"
73
+ target: 'Let''s think step by step.
74
+
75
+ We solve this question by first translating the source sentence to English and
76
+ then by comparing our translation with the provided translation. According to
77
+ Google Translate, the correct translation of the source sentence from German
78
+ to English is "The monuments of the Upper Bavarian district town of Landsberg
79
+ am Lech are compiled on this page." On the other hand, the provided translation
80
+ is "On this page are compiled the architectural monuments of the town of Landsberg
81
+ am Lech." Note that an important detail about the location of Landsberg am Lech
82
+ is omitted in the original translation: The translation should have said "Upper
83
+ Bavarian district town of Landsberg am Lech". Because a significant clause in
84
+ the translation was removed, the translation contains an error pertaining to
85
+ Dropped Content. So the answer is (E).'
86
+ - input: "The following translations from German to English contain a particular\
87
+ \ error. That error will be one of the following types: Named Entities: An entity\
88
+ \ (names, places, locations, etc.) is changed to a different entity. Numerical\
89
+ \ Values: Numerical values (ordinals or cardinals), dates, and/or units are\
90
+ \ changed. Modifiers or Adjectives: The modifiers and adjectives pertaining\
91
+ \ to a noun are changed. Negation or Antonyms: Introduce or remove a negation\
92
+ \ or change comparatives to their antonyms. Facts: Trivial factual errors not\
93
+ \ pertaining to the above classes are introduced in the translations. Dropped\
94
+ \ Content: A significant clause in the translation is removed. Please identify\
95
+ \ that error. Source: \u0141eba ist eine Kleinstadt und ein Badeort im Powiat\
96
+ \ L\u0119borski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not\
97
+ \ a small town and seaside resort in the Powiat L\xE9borski county of the Pomeranian\
98
+ \ Voivodeship of Poland.\nThe translation contains an error pertaining to\n\
99
+ Options:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or\
100
+ \ Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts"
101
+ target: "Let's think step by step.\nWe solve this question by first translating\
102
+ \ the source sentence to English and then by comparing our translation with\
103
+ \ the provided translation. According to Google Translate, the correct translation\
104
+ \ of the source sentence from German to English is \"\u0141eba is a small town\
105
+ \ and seaside resort in the Powiat L\u0119borski of the Polish Pomeranian Voivodeship.\"\
106
+ \ On the other hand, the provided translation is \"\u0141eba is not a small\
107
+ \ town and seaside resort in the Powiat L\xE9borski county of the Pomeranian\
108
+ \ Voivodeship of Poland.\" Note that the provided sentence says, \"\u0141eba\
109
+ \ is not a small town ...\" However, the translation should have been \"\u0141\
110
+ eba is a small town ...\" Because a negation is introduced at the beginning\
111
+ \ of the sentence and has fundamentally changed the meaning of the original\
112
+ \ source, the translation contains an error pertaining to Negation or Antonyms.\
113
+ \ So the answer is (C)."
114
+ include: _cot_fewshot_template_yaml
115
+ task: bbh_cot_fewshot_salient_translation_error_detection
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/snarks.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: snarks
2
+ description: 'Determine which of two sentences is sarcastic.
3
+
4
+
5
+ According to Cambridge University Dictionary, sarcasm is "the use of remarks that
6
+ clearly mean the opposite of what they say, made in order to hurt someone''s feelings
7
+ or to criticize something in a humorous way." Sarcastic sentences often contain
8
+ satirical or ironic utterances, hyperboles, ambivalent or witty remarks.
9
+
10
+
11
+ '
12
+ doc_to_text: 'Q: {{input}}
13
+
14
+ A: Let''s think step by step.
15
+
16
+ '
17
+ fewshot_config:
18
+ sampler: first_n
19
+ samples:
20
+ - input: 'Which statement is sarcastic?
21
+
22
+ Options:
23
+
24
+ (A) Yes, because having interests and actively researching them is a huge waste
25
+
26
+ (B) Yes, because having interests and actively researching them is a huge deal'
27
+ target: 'Let''s think step by step.
28
+
29
+ If we look at (A), it says that having interests and actively researching them
30
+ is a huge waste, implying that it is a useless effort. However, we know that
31
+ having interests and actively researching them is typically not a waste but
32
+ rather is beneficial to the individual. The presence of such a juxtaposition
33
+ in (A) suggests that it contains a taste of irony and sarcasm.
34
+
35
+ If we look at (B), it says that having interests and actively researching them
36
+ is a huge deal, implying that it is an important and consequential effort. This
37
+ is arguably a neutral and correct statement.
38
+
39
+ Above the above, the sarcastic option is (A). So the answer is (A).'
40
+ - input: 'Which statement is sarcastic?
41
+
42
+ Options:
43
+
44
+ (A) No one is going to disagree with you on this. Avoiding ad hominem attacks
45
+ really help your case
46
+
47
+ (B) No one is going to disagree with you on this. Ad hominem attacks really
48
+ help your case'
49
+ target: 'Let''s think step by step.
50
+
51
+ If we look at (A), it says that avoiding ad hominem attacks really help your
52
+ case, implying that ad hominem attacks are adverse and injurious. Because ad
53
+ hominem attacks are adressed at a person rather than an idea, it is indeed true
54
+ that avoiding them is often useful and helpful; so, (A) is a neutral (valid
55
+ and agreeable) statement.
56
+
57
+ If we look at (B), it says that ad hominem attacks really help your case, implying
58
+ that ad hominem attacks are a positive thing. However, we stated previously
59
+ that ad hominem attacks are often not useful or constructive. The speaker in
60
+ this sentence therefore seems to mean the opposite of what they are saying;
61
+ so, there appears to have a taste of irony and sarcasm in (B).
62
+
63
+ Above the above, the sarcastic option is (B). So the answer is (B).'
64
+ - input: 'Which statement is sarcastic?
65
+
66
+ Options:
67
+
68
+ (A) Consistency in the league''s punishments? What do you think this is supposed
69
+ to be, politics?
70
+
71
+ (B) Consistency in the league''s punishments? What do you think this is supposed
72
+ to be, moral?'
73
+ target: 'Let''s think step by step.
74
+
75
+ If we look at (A), it likens the consistency in the league''s punishments with
76
+ that in politics. Because politics or political affairs are often not considered
77
+ to be consistent or dependable, this sentence appears to be satirical.
78
+
79
+ If we look at (B), it likens the consistency in the league''s punishments with
80
+ that in morality. Discussing the consistency of the league''s punishments in
81
+ the context of morality, ethics, or law makes sense and does not appear to make
82
+ a satirical point about anything.
83
+
84
+ Above the above, the sarcastic option is (A). So the answer is (A).'
85
+ include: _cot_fewshot_template_yaml
86
+ task: bbh_cot_fewshot_snarks
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/sports_understanding.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: sports_understanding
2
+ description: 'Determine whether an artificially constructed sentence relating to sports
3
+ is plausible or not.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: Is the following sentence plausible? "Bam Adebayo scored a reverse layup
16
+ in the Western Conference Finals."
17
+ target: Let's think step by step. Bam Adebayo is an American basketball player.
18
+ Scoring a reverse layup in the Western Conference Finals is part of the NBA
19
+ Finals. So the answer is yes.
20
+ - input: Is the following sentence plausible? "Santi Cazorla scored a touchdown."
21
+ target: Let's think step by step. Santi Cazorla is a soccer player. Touchdown
22
+ is part of American football and rugby. So the answer is no.
23
+ - input: Is the following sentence plausible? "DeMar DeRozan was called for the
24
+ goal tend."
25
+ target: Let's think step by step. DeMar DeRozan is an American basketball player.
26
+ Goal tending is part of basketball. So the answer is yes.
27
+ include: _cot_fewshot_template_yaml
28
+ task: bbh_cot_fewshot_sports_understanding
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/temporal_sequences.yaml ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: temporal_sequences
2
+ description: 'Task description: Answer questions about which times certain events
3
+ could have occurred.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'Today, Emily went to the museum. Between what times could they have gone?
16
+
17
+ We know that:
18
+
19
+ Emily woke up at 1pm.
20
+
21
+ Elizabeth saw Emily reading at the library from 2pm to 4pm.
22
+
23
+ Jessica saw Emily watching a movie at the theater from 4pm to 5pm.
24
+
25
+ Leslie saw Emily waiting at the airport from 5pm to 6pm.
26
+
27
+ William saw Emily buying clothes at the mall from 6pm to 7pm.
28
+
29
+ The museum was closed after 7pm.
30
+
31
+ Between what times could Emily have gone to the museum?
32
+
33
+ Options:
34
+
35
+ (A) 1pm to 2pm
36
+
37
+ (B) 6pm to 7pm
38
+
39
+ (C) 5pm to 6pm
40
+
41
+ (D) 2pm to 4pm'
42
+ target: 'Let''s think step by step.
43
+
44
+ Wake-up time: 1pm.
45
+
46
+ 1pm-2pm: free.
47
+
48
+ 2pm-4pm: reading at the library.
49
+
50
+ 4pm-5pm: watching a movie at the theater.
51
+
52
+ 5pm-6pm: waiting at the airport.
53
+
54
+ 6pm-7pm: buying clothes at the mall.
55
+
56
+ The museum closure time: 7pm.
57
+
58
+ The only time when Emily could have gone to the museum was 1pm to 2pm. So the
59
+ answer is (A).'
60
+ - input: 'Today, Elizabeth went to the amusement park. Between what times could
61
+ they have gone?
62
+
63
+ We know that:
64
+
65
+ Elizabeth woke up at 7am.
66
+
67
+ David saw Elizabeth fixing their computer at the electronic store from 1pm to
68
+ 2pm.
69
+
70
+ Sarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.
71
+
72
+ Susan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.
73
+
74
+ Andrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.
75
+
76
+ Emily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.
77
+
78
+ The amusement park was closed after 10pm.
79
+
80
+ Between what times could Elizabeth have gone to the amusement park?
81
+
82
+ Options:
83
+
84
+ (A) 7am to 1pm
85
+
86
+ (B) 9pm to 10pm
87
+
88
+ (C) 1pm to 2pm
89
+
90
+ (D) 3pm to 6pm'
91
+ target: 'Let''s think step by step.
92
+
93
+ Wake-up time: 7am.
94
+
95
+ 7am-1pm: free.
96
+
97
+ 1pm-2pm: fixing their computer at the electronic store.
98
+
99
+ 2pm-3pm: playing tennis at the tennis court.
100
+
101
+ 3pm-6pm: walking towards the Statue of Liberty.
102
+
103
+ 6pm-9pm: taking photos near the Eiffel Tower.
104
+
105
+ 9pm-10pm: getting a coffee at the cafe.
106
+
107
+ The amusement park closure time: 10pm.
108
+
109
+ The only time when Elizabeth could have gone to the amusement park was 7am to
110
+ 1pm. So the answer is (A).'
111
+ - input: 'Today, Tiffany went to the beach. Between what times could they have gone?
112
+
113
+ We know that:
114
+
115
+ Tiffany woke up at 5am.
116
+
117
+ Betty saw Tiffany getting a coffee at the cafe from 5am to 6am.
118
+
119
+ Jessica saw Tiffany working at the office from 6am to 9am.
120
+
121
+ John saw Tiffany stretching at a yoga studio from 9am to 12pm.
122
+
123
+ Sean saw Tiffany sitting on a rooftop from 12pm to 2pm.
124
+
125
+ Sarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.
126
+
127
+ The beach was closed after 4pm.
128
+
129
+ Between what times could Tiffany have gone to the beach?
130
+
131
+ Options:
132
+
133
+ (A) 9am to 12pm
134
+
135
+ (B) 12pm to 2pm
136
+
137
+ (C) 5am to 6am
138
+
139
+ (D) 3pm to 4pm'
140
+ target: 'Let''s think step by step.
141
+
142
+ Wake-up time: 5am.
143
+
144
+ 5am-6am: getting a coffee at the cafe.
145
+
146
+ 6am-9am: working at the office.
147
+
148
+ 9am-12pm: stretching at a yoga studio.
149
+
150
+ 12pm-2pm: sitting on a rooftop.
151
+
152
+ 2pm-3pm: playing tennis at the tennis court.
153
+
154
+ 3pm-4pm: free.
155
+
156
+ The beach closure time: 4pm.
157
+
158
+ The only time when Tiffany could have gone to the beach was 3pm to 4pm. So the
159
+ answer is (D).'
160
+ include: _cot_fewshot_template_yaml
161
+ task: bbh_cot_fewshot_temporal_sequences
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_five_objects.yaml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_five_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
16
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
17
+ has a pink ball.
18
+
19
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
20
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
21
+ At the end of the game, Bob has the
22
+
23
+ Options:
24
+
25
+ (A) yellow ball
26
+
27
+ (B) blue ball
28
+
29
+ (C) pink ball'
30
+ target: 'Let''s think step by step.
31
+
32
+ (0) At the start: Alice: yellow, Bob: blue, Claire: pink.
33
+
34
+ (1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.
35
+
36
+ (2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.
37
+
38
+ (3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.
39
+
40
+ At the end of the game, Bob has the yellow ball. So the answer is (A).'
41
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
42
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
43
+ Claire has a pink ball.
44
+
45
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
46
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
47
+ the end of the game, Alice has the
48
+
49
+ Options:
50
+
51
+ (A) white ball
52
+
53
+ (B) purple ball
54
+
55
+ (C) pink ball'
56
+ target: 'Let''s think step by step.
57
+
58
+ (0) At the start: Alice: white, Bob: purple, Claire: pink.
59
+
60
+ (1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.
61
+
62
+ (2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.
63
+
64
+ (3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.
65
+
66
+ At the end of the game, Alice has the pink ball. So the answer is (C).'
67
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
68
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
69
+ with Rodrigo, and Claire is dancing with Patrick.
70
+
71
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
72
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
73
+ switch partners. At the end of the dance, Alice is dancing with
74
+
75
+ Options:
76
+
77
+ (A) Lola
78
+
79
+ (B) Rodrigo
80
+
81
+ (C) Patrick'
82
+ target: 'Let''s think step by step.
83
+
84
+ (0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.
85
+
86
+ (1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.
87
+
88
+ (2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.
89
+
90
+ (3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.
91
+
92
+ At the end of the dance, Alice is dancing with Patrick. So the answer is (C).'
93
+ include: _cot_fewshot_template_yaml
94
+ task: bbh_cot_fewshot_tracking_shuffled_objects_five_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_seven_objects.yaml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_seven_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
16
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
17
+ has a pink ball.
18
+
19
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
20
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
21
+ At the end of the game, Bob has the
22
+
23
+ Options:
24
+
25
+ (A) yellow ball
26
+
27
+ (B) blue ball
28
+
29
+ (C) pink ball'
30
+ target: 'Let''s think step by step.
31
+
32
+ (0) At the start: Alice: yellow, Bob: blue, Claire: pink.
33
+
34
+ (1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.
35
+
36
+ (2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.
37
+
38
+ (3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.
39
+
40
+ At the end of the game, Bob has the yellow ball. So the answer is (A).'
41
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
42
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
43
+ Claire has a pink ball.
44
+
45
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
46
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
47
+ the end of the game, Alice has the
48
+
49
+ Options:
50
+
51
+ (A) white ball
52
+
53
+ (B) purple ball
54
+
55
+ (C) pink ball'
56
+ target: 'Let''s think step by step.
57
+
58
+ (0) At the start: Alice: white, Bob: purple, Claire: pink.
59
+
60
+ (1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.
61
+
62
+ (2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.
63
+
64
+ (3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.
65
+
66
+ At the end of the game, Alice has the pink ball. So the answer is (C).'
67
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
68
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
69
+ with Rodrigo, and Claire is dancing with Patrick.
70
+
71
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
72
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
73
+ switch partners. At the end of the dance, Alice is dancing with
74
+
75
+ Options:
76
+
77
+ (A) Lola
78
+
79
+ (B) Rodrigo
80
+
81
+ (C) Patrick'
82
+ target: 'Let''s think step by step.
83
+
84
+ (0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.
85
+
86
+ (1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.
87
+
88
+ (2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.
89
+
90
+ (3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.
91
+
92
+ At the end of the dance, Alice is dancing with Patrick. So the answer is (C).'
93
+ include: _cot_fewshot_template_yaml
94
+ task: bbh_cot_fewshot_tracking_shuffled_objects_seven_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_three_objects.yaml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_three_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A: Let''s think step by step.
10
+
11
+ '
12
+ fewshot_config:
13
+ sampler: first_n
14
+ samples:
15
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
16
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
17
+ has a pink ball.
18
+
19
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
20
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
21
+ At the end of the game, Bob has the
22
+
23
+ Options:
24
+
25
+ (A) yellow ball
26
+
27
+ (B) blue ball
28
+
29
+ (C) pink ball'
30
+ target: 'Let''s think step by step.
31
+
32
+ (0) At the start: Alice: yellow, Bob: blue, Claire: pink.
33
+
34
+ (1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.
35
+
36
+ (2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.
37
+
38
+ (3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.
39
+
40
+ At the end of the game, Bob has the yellow ball. So the answer is (A).'
41
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
42
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
43
+ Claire has a pink ball.
44
+
45
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
46
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
47
+ the end of the game, Alice has the
48
+
49
+ Options:
50
+
51
+ (A) white ball
52
+
53
+ (B) purple ball
54
+
55
+ (C) pink ball'
56
+ target: 'Let''s think step by step.
57
+
58
+ (0) At the start: Alice: white, Bob: purple, Claire: pink.
59
+
60
+ (1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.
61
+
62
+ (2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.
63
+
64
+ (3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.
65
+
66
+ At the end of the game, Alice has the pink ball. So the answer is (C).'
67
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
68
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
69
+ with Rodrigo, and Claire is dancing with Patrick.
70
+
71
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
72
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
73
+ switch partners. At the end of the dance, Alice is dancing with
74
+
75
+ Options:
76
+
77
+ (A) Lola
78
+
79
+ (B) Rodrigo
80
+
81
+ (C) Patrick'
82
+ target: 'Let''s think step by step.
83
+
84
+ (0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.
85
+
86
+ (1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.
87
+
88
+ (2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.
89
+
90
+ (3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.
91
+
92
+ At the end of the dance, Alice is dancing with Patrick. So the answer is (C).'
93
+ include: _cot_fewshot_template_yaml
94
+ task: bbh_cot_fewshot_tracking_shuffled_objects_three_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/web_of_lies.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: web_of_lies
2
+ description: 'Evaluate a random boolean function expressed as a word problem.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina
15
+ says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent
16
+ lies. Does Raymond tell the truth?'
17
+ target: 'Let''s think step by step.
18
+
19
+ (1) Fidel tells the truth. So, we know that Fidel tells the truth.
20
+
21
+ (2) Jerry says Fidel tells the truth. Since we know from (1) that Fidel tells
22
+ the truth, if Jerry says that Fidel tells the truth, then Jerry tells the truth.
23
+
24
+ (3) Vina says Jerry tells the truth. Since we know from (2) that Jerry tells
25
+ the truth, if Vina says Jerry tells the truth, then Vine tells the truth.
26
+
27
+ (4) Millicent says Vina lies. Since we know from (3) that Vina tells the truth,
28
+ if Millicent says Vina lies, then Millicent lies.
29
+
30
+ (5) Raymond says Millicent lies. Since we know from (4) that Millicent lies,
31
+ if Raymond says Millicent lies, then Raymond tells the truth.
32
+
33
+ Now, the question asks: Does Raymond tell the truth? We know from (5) that Raymond
34
+ tells the truth. So the answer is Yes.'
35
+ - input: 'Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie
36
+ tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell
37
+ the truth?'
38
+ target: 'Let''s think step by step.
39
+
40
+ (1) Kristian lies. So, we know that Kristian lies.
41
+
42
+ (2) Millie says Kristian lies. Since we know from (1) that Kristian lies, if
43
+ Millie says Kristian lies, then Millie tells the truth.
44
+
45
+ (3) Maybelle says Millie tells the truth. Since we know from (2) that Millie
46
+ tells the truth, if Maybelle says Millie tells the truth, then Maybelle tells
47
+ the truth.
48
+
49
+ (4) Fidel says Maybelle lies. Since we know from (3) that Maybelle tells the
50
+ truth, if Fidel says Maybelle lies, then Fidel lies.
51
+
52
+ (5) Leda says Fidel lies. Since we know from (4) that Fidel lies, if Leda says
53
+ Fidel lies, then Leda tells the truth.
54
+
55
+ Now, the question asks: Does Leda tell the truth? We know from (5) that Leda
56
+ tells the truth. So the answer is Yes.'
57
+ - input: 'Question: Kristian tells the truth. Michaela says Kristian lies. Raymond
58
+ says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says
59
+ Osvaldo tells the truth. Does Jamey tell the truth?'
60
+ target: 'Let''s think step by step.
61
+
62
+ (1) Kristian tells the truth. So, we know that Kristian tells the truth.
63
+
64
+ (2) Michaela says Kristian lies. Since we know from (1) that Kristian tells
65
+ the truth, if Michaela says Kristian lies, then Michaela lies.
66
+
67
+ (3) Raymond says Michaela tells the truth. Since we know from (2) that Michaela
68
+ lies, if Raymond says Michaela tells the truth, then Raymond lies.
69
+
70
+ (4) Osvaldo says Raymond tells the truth. Since we know from (3) that Raymond
71
+ lies, if Osvaldo says Raymond tells the truth, then Osvaldo lies.
72
+
73
+ (5) Jamey says Osvaldo tells the truth. Since we know from (4) that Osvaldo
74
+ lies, if Jamey says Osvaldo tells the truth, then Jamey lies.
75
+
76
+ Now, the question asks: Does Jamey tell the truth? We know from (5) that Jamey
77
+ lies. So the answer is No.'
78
+ include: _cot_fewshot_template_yaml
79
+ task: bbh_cot_fewshot_web_of_lies
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/word_sorting.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: word_sorting
2
+ description: 'Sort a list of words.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A: Let''s think step by step.
9
+
10
+ '
11
+ fewshot_config:
12
+ sampler: first_n
13
+ samples:
14
+ - input: 'Sort the following words alphabetically: List: oven costume counterpart'
15
+ target: 'Let''s think step by step.
16
+
17
+ The first letter: "oven": "o" (15). "costume": "c" (3). "counterpart": "c" (3).
18
+ We now have: (3) ["costume" ? "counterpart"] < (15) "oven". Now let''s sort
19
+ this subpart ["costume" ? "counterpart"] by looking at their second letters.
20
+
21
+ The second letter: "costume": "o" (15). "counterpart": "o" (15). We now have:
22
+ (15) ["costume" ? "counterpart"]. Now let''s sort this subpart ["costume" ?
23
+ "counterpart"] by looking at their third letters.
24
+
25
+ The third letter: "costume": "s" (19). "counterpart": "u" (21). We now have:
26
+ (19) "costume" < (21) "counterpart". Hence, we have ["costume" < "counterpart"]
27
+ < "oven". So the answer is costume counterpart oven.'
28
+ - input: 'Sort the following words alphabetically: List: hypochlorite ponderosa
29
+ phone credulity'
30
+ target: 'Let''s think step by step.
31
+
32
+ The first letter: "hypochlorite": "h" (8). "ponderosa": "p" (16). "phone": "p"
33
+ (16). "credulity": "c" (3). We now have: (3) "credulity" < (8) "hypochlorite"
34
+ < (16) ["ponderosa" ? "phone"]. Now let''s sort this subpart ["ponderosa" ?
35
+ "phone"] by looking at their second letters.
36
+
37
+ The second letter: "ponderosa": "o" (15). "phone": "h" (8). We now have: (8)
38
+ "phone" < (15) "ponderosa". Hence, we have "credulity" < "hypochlorite" < ["phone"
39
+ <"ponderosa"]. So the answer is credulity hypochlorite phone ponderosa.'
40
+ - input: 'Sort the following words alphabetically: List: newt arson parthia seismography
41
+ mugho aspect census'
42
+ target: 'Let''s think step by step.
43
+
44
+ The first letter: "newt": "n" (14). "arson": "a" (1). "parthia": "p" (16). "seismography":
45
+ "s" (19). "mugho": "m" (13). "aspect": "a" (1). "census": "c" (3). We now have:
46
+ (1) ["arson" ? "aspect"] < (3) "census" < (13) "mugho" < (14) "newt" < (16)
47
+ "parthia" < (19) "seismography". Now let''s sort this subpart ["arson" ? "aspect"]
48
+ by looking at their second letters.
49
+
50
+ The second letter: "arson": "r" (18). "aspect": "s" (19). We now have: (18)
51
+ "arson" < (19) "aspect". Hence, we have ["arson" < "aspect"] < "census" < "mugho"
52
+ < "newt" < "parthia" < "seismography". So the answer is arson aspect census
53
+ mugho newt parthia seismography.'
54
+ include: _cot_fewshot_template_yaml
55
+ task: bbh_cot_fewshot_word_sorting
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/_bbh_cot_zeroshot.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: bbh_cot_zeroshot
2
+ task:
3
+ - bbh_cot_zeroshot_boolean_expressions
4
+ - bbh_cot_zeroshot_causal_judgement
5
+ - bbh_cot_zeroshot_date_understanding
6
+ - bbh_cot_zeroshot_disambiguation_qa
7
+ - bbh_cot_zeroshot_dyck_languages
8
+ - bbh_cot_zeroshot_formal_fallacies
9
+ - bbh_cot_zeroshot_geometric_shapes
10
+ - bbh_cot_zeroshot_hyperbaton
11
+ - bbh_cot_zeroshot_logical_deduction_five_objects
12
+ - bbh_cot_zeroshot_logical_deduction_seven_objects
13
+ - bbh_cot_zeroshot_logical_deduction_three_objects
14
+ - bbh_cot_zeroshot_movie_recommendation
15
+ - bbh_cot_zeroshot_multistep_arithmetic_two
16
+ - bbh_cot_zeroshot_navigate
17
+ - bbh_cot_zeroshot_object_counting
18
+ - bbh_cot_zeroshot_penguins_in_a_table
19
+ - bbh_cot_zeroshot_reasoning_about_colored_objects
20
+ - bbh_cot_zeroshot_ruin_names
21
+ - bbh_cot_zeroshot_salient_translation_error_detection
22
+ - bbh_cot_zeroshot_snarks
23
+ - bbh_cot_zeroshot_sports_understanding
24
+ - bbh_cot_zeroshot_temporal_sequences
25
+ - bbh_cot_zeroshot_tracking_shuffled_objects_five_objects
26
+ - bbh_cot_zeroshot_tracking_shuffled_objects_seven_objects
27
+ - bbh_cot_zeroshot_tracking_shuffled_objects_three_objects
28
+ - bbh_cot_zeroshot_web_of_lies
29
+ - bbh_cot_zeroshot_word_sorting
30
+ aggregate_metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ weight_by_size: true
34
+ filter_list: flexible-extract
35
+ metadata:
36
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "boolean_expressions"
2
+ "description": "Evaluate the result of a random Boolean expression.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_boolean_expressions"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(True|False)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "date_understanding"
2
+ "description": "Infer the date from context.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_date_understanding"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/dyck_languages.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "dyck_languages"
2
+ "description": "Correctly close a Dyck-n word.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_dyck_languages"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: "regex"
10
+ group_select: -1
11
+ regex_pattern: "(?<= )([\" \\[\\(<{}>\\)\\]]+)|([\" \\[\\(<{}>\\)\\]]+)"
12
+ - function: "take_first"
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
17
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "formal_fallacies"
2
+ "description": "Distinguish deductively valid arguments from formal fallacies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_formal_fallacies"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(valid|invalid)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "geometric_shapes"
2
+ "description": "Name geometric shapes from their SVG paths.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_geometric_shapes"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "hyperbaton"
2
+ "description": "Order adjectives correctly in English sentences.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_hyperbaton"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "multistep_arithmetic_two"
2
+ "description": "Solve multi-step arithmetic problems.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_multistep_arithmetic_two"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.NumberParseRegexFilter
11
+ group_select: -1
12
+ regex_pattern: "([-0-9]+)"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "penguins_in_a_table"
2
+ "description": "Answer questions about a table of penguins and their attributes.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_penguins_in_a_table"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "reasoning_about_colored_objects"
2
+ "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_reasoning_about_colored_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_five_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_five_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/_bbh_zeroshot.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: bbh_zeroshot
2
+ task:
3
+ - bbh_zeroshot_boolean_expressions
4
+ - bbh_zeroshot_causal_judgement
5
+ - bbh_zeroshot_date_understanding
6
+ - bbh_zeroshot_disambiguation_qa
7
+ - bbh_zeroshot_dyck_languages
8
+ - bbh_zeroshot_formal_fallacies
9
+ - bbh_zeroshot_geometric_shapes
10
+ - bbh_zeroshot_hyperbaton
11
+ - bbh_zeroshot_logical_deduction_five_objects
12
+ - bbh_zeroshot_logical_deduction_seven_objects
13
+ - bbh_zeroshot_logical_deduction_three_objects
14
+ - bbh_zeroshot_movie_recommendation
15
+ - bbh_zeroshot_multistep_arithmetic_two
16
+ - bbh_zeroshot_navigate
17
+ - bbh_zeroshot_object_counting
18
+ - bbh_zeroshot_penguins_in_a_table
19
+ - bbh_zeroshot_reasoning_about_colored_objects
20
+ - bbh_zeroshot_ruin_names
21
+ - bbh_zeroshot_salient_translation_error_detection
22
+ - bbh_zeroshot_snarks
23
+ - bbh_zeroshot_sports_understanding
24
+ - bbh_zeroshot_temporal_sequences
25
+ - bbh_zeroshot_tracking_shuffled_objects_five_objects
26
+ - bbh_zeroshot_tracking_shuffled_objects_seven_objects
27
+ - bbh_zeroshot_tracking_shuffled_objects_three_objects
28
+ - bbh_zeroshot_web_of_lies
29
+ - bbh_zeroshot_word_sorting
30
+ aggregate_metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ weight_by_size: true
34
+ filter_list: flexible-extract
35
+ metadata:
36
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/_zeroshot_template_yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: lukaemon/bbh
2
+ output_type: generate_until
3
+ test_split: test
4
+ doc_to_target: "{{target}}"
5
+ metric_list:
6
+ - metric: exact_match
7
+ aggregation: mean
8
+ higher_is_better: true
9
+ ignore_case: true
10
+ # ignore_punctuation: true
11
+ regexes_to_ignore:
12
+ - "\\.$"
13
+ - ","
14
+ - "\n"
15
+ - "\\\\"
16
+ - '"'
17
+ generation_kwargs:
18
+ until:
19
+ - "</s>"
20
+ - "Q:"
21
+ - "<|im_end|>"
22
+ do_sample: false
23
+ temperature: 0.0
24
+ num_fewshot: 0
25
+ metadata:
26
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/boolean_expressions.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "boolean_expressions"
2
+ "description": "Evaluate the result of a random Boolean expression.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_boolean_expressions"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: "regex"
14
+ group_select: 0
15
+ regex_pattern: "\\b(True|False)\\b"
16
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/causal_judgement.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "causal_judgement"
2
+ "description": "Answer questions about causal attribution.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_causal_judgement"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: "regex"
14
+ group_select: 0
15
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
16
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/zeroshot/date_understanding.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "date_understanding"
2
+ "description": "Infer the date from context.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_date_understanding"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.MultiChoiceRegexFilter
14
+ group_select: 0
15
+ ignore_case: true
16
+ ignore_punctuation: true
17
+ regex_pattern: "(\\([A-Z]\\))"
18
+ - function: "take_first"