{ "results": { "molestiae-aperiam_lsat-rc_cot": { "acc,none": 0.4758364312267658, "acc_stderr,none": 0.030506674211283072, "alias": "molestiae-aperiam_lsat-rc_cot" }, "molestiae-aperiam_lsat-lr_cot": { "acc,none": 0.4196078431372549, "acc_stderr,none": 0.021873771696750574, "alias": "molestiae-aperiam_lsat-lr_cot" }, "molestiae-aperiam_lsat-ar_cot": { "acc,none": 0.24347826086956523, "acc_stderr,none": 0.02836109930007507, "alias": "molestiae-aperiam_lsat-ar_cot" }, "molestiae-aperiam_logiqa_cot": { "acc,none": 0.3753993610223642, "acc_stderr,none": 0.019369034287392513, "alias": "molestiae-aperiam_logiqa_cot" }, "molestiae-aperiam_logiqa2_cot": { "acc,none": 0.477735368956743, "acc_stderr,none": 0.012602331890057498, "alias": "molestiae-aperiam_logiqa2_cot" }, "iure-at_lsat-rc_cot": { "acc,none": 0.5241635687732342, "acc_stderr,none": 0.03050667421128308, "alias": "iure-at_lsat-rc_cot" }, "iure-at_lsat-lr_cot": { "acc,none": 0.4666666666666667, "acc_stderr,none": 0.02211280638156422, "alias": "iure-at_lsat-lr_cot" }, "iure-at_lsat-ar_cot": { "acc,none": 0.19130434782608696, "acc_stderr,none": 0.025991852462828483, "alias": "iure-at_lsat-ar_cot" }, "iure-at_logiqa_cot": { "acc,none": 0.36421725239616615, "acc_stderr,none": 0.019248399225001777, "alias": "iure-at_logiqa_cot" }, "iure-at_logiqa2_cot": { "acc,none": 0.4446564885496183, "acc_stderr,none": 0.012537330526916487, "alias": "iure-at_logiqa2_cot" }, "facere-optio_lsat-rc_cot": { "acc,none": 0.5018587360594795, "acc_stderr,none": 0.030542150046756426, "alias": "facere-optio_lsat-rc_cot" }, "facere-optio_lsat-lr_cot": { "acc,none": 0.43333333333333335, "acc_stderr,none": 0.021964230412067975, "alias": "facere-optio_lsat-lr_cot" }, "facere-optio_lsat-ar_cot": { "acc,none": 0.1826086956521739, "acc_stderr,none": 0.02553042195273417, "alias": "facere-optio_lsat-ar_cot" }, "facere-optio_logiqa_cot": { "acc,none": 0.3706070287539936, "acc_stderr,none": 0.019318693909977667, "alias": "facere-optio_logiqa_cot" }, "facere-optio_logiqa2_cot": { "acc,none": 0.44656488549618323, "acc_stderr,none": 0.012542599303456, "alias": "facere-optio_logiqa2_cot" }, "et-praesentium_lsat-rc_cot": { "acc,none": 0.49814126394052044, "acc_stderr,none": 0.030542150046756422, "alias": "et-praesentium_lsat-rc_cot" }, "et-praesentium_lsat-lr_cot": { "acc,none": 0.3784313725490196, "acc_stderr,none": 0.02149706741180824, "alias": "et-praesentium_lsat-lr_cot" }, "et-praesentium_lsat-ar_cot": { "acc,none": 0.2217391304347826, "acc_stderr,none": 0.02745149660405892, "alias": "et-praesentium_lsat-ar_cot" }, "et-praesentium_logiqa_cot": { "acc,none": 0.36261980830670926, "acc_stderr,none": 0.019230254618400246, "alias": "et-praesentium_logiqa_cot" }, "et-praesentium_logiqa2_cot": { "acc,none": 0.46946564885496184, "acc_stderr,none": 0.012591300013425958, "alias": "et-praesentium_logiqa2_cot" }, "eligendi-commodi_lsat-rc_cot": { "acc,none": 0.5018587360594795, "acc_stderr,none": 0.030542150046756426, "alias": "eligendi-commodi_lsat-rc_cot" }, "eligendi-commodi_lsat-lr_cot": { "acc,none": 0.42549019607843136, "acc_stderr,none": 0.021914653579107275, "alias": "eligendi-commodi_lsat-lr_cot" }, "eligendi-commodi_lsat-ar_cot": { "acc,none": 0.23478260869565218, "acc_stderr,none": 0.02800964707093011, "alias": "eligendi-commodi_lsat-ar_cot" }, "eligendi-commodi_logiqa_cot": { "acc,none": 0.3466453674121406, "acc_stderr,none": 0.019036064999420094, "alias": "eligendi-commodi_logiqa_cot" }, "eligendi-commodi_logiqa2_cot": { "acc,none": 0.4643765903307888, "acc_stderr,none": 0.012582786901750204, "alias": "eligendi-commodi_logiqa2_cot" }, "doloremque-rem_lsat-rc_cot": { "acc,none": 0.5092936802973977, "acc_stderr,none": 0.030537084593525398, "alias": "doloremque-rem_lsat-rc_cot" }, "doloremque-rem_lsat-lr_cot": { "acc,none": 0.4117647058823529, "acc_stderr,none": 0.02181429628344194, "alias": "doloremque-rem_lsat-lr_cot" }, "doloremque-rem_lsat-ar_cot": { "acc,none": 0.21304347826086956, "acc_stderr,none": 0.027057754389936194, "alias": "doloremque-rem_lsat-ar_cot" }, "doloremque-rem_logiqa_cot": { "acc,none": 0.3610223642172524, "acc_stderr,none": 0.019211880355748154, "alias": "doloremque-rem_logiqa_cot" }, "doloremque-rem_logiqa2_cot": { "acc,none": 0.46119592875318066, "acc_stderr,none": 0.012576797669813296, "alias": "doloremque-rem_logiqa2_cot" } }, "configs": { "doloremque-rem_logiqa2_cot": { "task": "doloremque-rem_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "doloremque-rem-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "doloremque-rem_logiqa_cot": { "task": "doloremque-rem_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "doloremque-rem-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "doloremque-rem_lsat-ar_cot": { "task": "doloremque-rem_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "doloremque-rem-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "doloremque-rem_lsat-lr_cot": { "task": "doloremque-rem_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "doloremque-rem-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "doloremque-rem_lsat-rc_cot": { "task": "doloremque-rem_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "doloremque-rem-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eligendi-commodi_logiqa2_cot": { "task": "eligendi-commodi_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eligendi-commodi-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eligendi-commodi_logiqa_cot": { "task": "eligendi-commodi_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eligendi-commodi-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eligendi-commodi_lsat-ar_cot": { "task": "eligendi-commodi_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eligendi-commodi-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eligendi-commodi_lsat-lr_cot": { "task": "eligendi-commodi_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eligendi-commodi-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eligendi-commodi_lsat-rc_cot": { "task": "eligendi-commodi_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eligendi-commodi-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "et-praesentium_logiqa2_cot": { "task": "et-praesentium_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "et-praesentium-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "et-praesentium_logiqa_cot": { "task": "et-praesentium_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "et-praesentium-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "et-praesentium_lsat-ar_cot": { "task": "et-praesentium_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "et-praesentium-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "et-praesentium_lsat-lr_cot": { "task": "et-praesentium_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "et-praesentium-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "et-praesentium_lsat-rc_cot": { "task": "et-praesentium_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "et-praesentium-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "facere-optio_logiqa2_cot": { "task": "facere-optio_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "facere-optio-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "facere-optio_logiqa_cot": { "task": "facere-optio_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "facere-optio-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "facere-optio_lsat-ar_cot": { "task": "facere-optio_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "facere-optio-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "facere-optio_lsat-lr_cot": { "task": "facere-optio_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "facere-optio-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "facere-optio_lsat-rc_cot": { "task": "facere-optio_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "facere-optio-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iure-at_logiqa2_cot": { "task": "iure-at_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iure-at-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iure-at_logiqa_cot": { "task": "iure-at_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iure-at-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iure-at_lsat-ar_cot": { "task": "iure-at_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iure-at-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iure-at_lsat-lr_cot": { "task": "iure-at_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iure-at-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iure-at_lsat-rc_cot": { "task": "iure-at_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iure-at-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "molestiae-aperiam_logiqa2_cot": { "task": "molestiae-aperiam_logiqa2_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "molestiae-aperiam-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "molestiae-aperiam_logiqa_cot": { "task": "molestiae-aperiam_logiqa_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "molestiae-aperiam-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "molestiae-aperiam_lsat-ar_cot": { "task": "molestiae-aperiam_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "molestiae-aperiam-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "molestiae-aperiam_lsat-lr_cot": { "task": "molestiae-aperiam_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "molestiae-aperiam-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "molestiae-aperiam_lsat-rc_cot": { "task": "molestiae-aperiam_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "cot-leaderboard/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "molestiae-aperiam-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } } }, "versions": { "doloremque-rem_logiqa2_cot": 0.0, "doloremque-rem_logiqa_cot": 0.0, "doloremque-rem_lsat-ar_cot": 0.0, "doloremque-rem_lsat-lr_cot": 0.0, "doloremque-rem_lsat-rc_cot": 0.0, "eligendi-commodi_logiqa2_cot": 0.0, "eligendi-commodi_logiqa_cot": 0.0, "eligendi-commodi_lsat-ar_cot": 0.0, "eligendi-commodi_lsat-lr_cot": 0.0, "eligendi-commodi_lsat-rc_cot": 0.0, "et-praesentium_logiqa2_cot": 0.0, "et-praesentium_logiqa_cot": 0.0, "et-praesentium_lsat-ar_cot": 0.0, "et-praesentium_lsat-lr_cot": 0.0, "et-praesentium_lsat-rc_cot": 0.0, "facere-optio_logiqa2_cot": 0.0, "facere-optio_logiqa_cot": 0.0, "facere-optio_lsat-ar_cot": 0.0, "facere-optio_lsat-lr_cot": 0.0, "facere-optio_lsat-rc_cot": 0.0, "iure-at_logiqa2_cot": 0.0, "iure-at_logiqa_cot": 0.0, "iure-at_lsat-ar_cot": 0.0, "iure-at_lsat-lr_cot": 0.0, "iure-at_lsat-rc_cot": 0.0, "molestiae-aperiam_logiqa2_cot": 0.0, "molestiae-aperiam_logiqa_cot": 0.0, "molestiae-aperiam_lsat-ar_cot": 0.0, "molestiae-aperiam_lsat-lr_cot": 0.0, "molestiae-aperiam_lsat-rc_cot": 0.0 }, "n-shot": { "doloremque-rem_logiqa2_cot": 0, "doloremque-rem_logiqa_cot": 0, "doloremque-rem_lsat-ar_cot": 0, "doloremque-rem_lsat-lr_cot": 0, "doloremque-rem_lsat-rc_cot": 0, "eligendi-commodi_logiqa2_cot": 0, "eligendi-commodi_logiqa_cot": 0, "eligendi-commodi_lsat-ar_cot": 0, "eligendi-commodi_lsat-lr_cot": 0, "eligendi-commodi_lsat-rc_cot": 0, "et-praesentium_logiqa2_cot": 0, "et-praesentium_logiqa_cot": 0, "et-praesentium_lsat-ar_cot": 0, "et-praesentium_lsat-lr_cot": 0, "et-praesentium_lsat-rc_cot": 0, "facere-optio_logiqa2_cot": 0, "facere-optio_logiqa_cot": 0, "facere-optio_lsat-ar_cot": 0, "facere-optio_lsat-lr_cot": 0, "facere-optio_lsat-rc_cot": 0, "iure-at_logiqa2_cot": 0, "iure-at_logiqa_cot": 0, "iure-at_lsat-ar_cot": 0, "iure-at_lsat-lr_cot": 0, "iure-at_lsat-rc_cot": 0, "molestiae-aperiam_logiqa2_cot": 0, "molestiae-aperiam_logiqa_cot": 0, "molestiae-aperiam_lsat-ar_cot": 0, "molestiae-aperiam_lsat-lr_cot": 0, "molestiae-aperiam_lsat-rc_cot": 0 }, "config": { "model": "vllm", "model_args": "pretrained=openchat/openchat-3.5-0106,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=4096", "batch_size": "auto", "batch_sizes": [], "device": null, "use_cache": null, "limit": null, "bootstrap_iters": 100000, "gen_kwargs": null }, "git_hash": "a1d6b70" }