{ "results": { "veritatis-velit_lsat-rc_cot": { "acc,none": 0.4163568773234201, "acc_stderr,none": 0.03011196940753653, "alias": "veritatis-velit_lsat-rc_cot" }, "veritatis-velit_lsat-lr_cot": { "acc,none": 0.3137254901960784, "acc_stderr,none": 0.02056671577177923, "alias": "veritatis-velit_lsat-lr_cot" }, "veritatis-velit_lsat-ar_cot": { "acc,none": 0.23043478260869565, "acc_stderr,none": 0.027827807522276156, "alias": "veritatis-velit_lsat-ar_cot" }, "veritatis-velit_logiqa_cot": { "acc,none": 0.2987220447284345, "acc_stderr,none": 0.01830790800596066, "alias": "veritatis-velit_logiqa_cot" }, "veritatis-velit_logiqa2_cot": { "acc,none": 0.3530534351145038, "acc_stderr,none": 0.012057751628201937, "alias": "veritatis-velit_logiqa2_cot" }, "saepe-fuga_lsat-rc_cot": { "acc,none": 0.4312267657992565, "acc_stderr,none": 0.030252065567665472, "alias": "saepe-fuga_lsat-rc_cot" }, "saepe-fuga_lsat-lr_cot": { "acc,none": 0.296078431372549, "acc_stderr,none": 0.020235159438512108, "alias": "saepe-fuga_lsat-lr_cot" }, "saepe-fuga_lsat-ar_cot": { "acc,none": 0.18695652173913044, "acc_stderr,none": 0.02576377239851234, "alias": "saepe-fuga_lsat-ar_cot" }, "saepe-fuga_logiqa_cot": { "acc,none": 0.28913738019169327, "acc_stderr,none": 0.018134473494097365, "alias": "saepe-fuga_logiqa_cot" }, "saepe-fuga_logiqa2_cot": { "acc,none": 0.356234096692112, "acc_stderr,none": 0.012082133651061318, "alias": "saepe-fuga_logiqa2_cot" }, "nisi-sunt_lsat-rc_cot": { "acc,none": 0.34572490706319703, "acc_stderr,none": 0.029052140190085934, "alias": "nisi-sunt_lsat-rc_cot" }, "nisi-sunt_lsat-lr_cot": { "acc,none": 0.27647058823529413, "acc_stderr,none": 0.019824108780753004, "alias": "nisi-sunt_lsat-lr_cot" }, "nisi-sunt_lsat-ar_cot": { "acc,none": 0.27391304347826084, "acc_stderr,none": 0.029470189815005897, "alias": "nisi-sunt_lsat-ar_cot" }, "nisi-sunt_logiqa_cot": { "acc,none": 0.31629392971246006, "acc_stderr,none": 0.018601164683514252, "alias": "nisi-sunt_logiqa_cot" }, "nisi-sunt_logiqa2_cot": { "acc,none": 0.3708651399491094, "acc_stderr,none": 0.012186859070473788, "alias": "nisi-sunt_logiqa2_cot" }, "laboriosam-molestiae_lsat-rc_cot": { "acc,none": 0.3680297397769517, "acc_stderr,none": 0.029459297142360178, "alias": "laboriosam-molestiae_lsat-rc_cot" }, "laboriosam-molestiae_lsat-lr_cot": { "acc,none": 0.2823529411764706, "acc_stderr,none": 0.019952288758197854, "alias": "laboriosam-molestiae_lsat-lr_cot" }, "laboriosam-molestiae_lsat-ar_cot": { "acc,none": 0.2217391304347826, "acc_stderr,none": 0.027451496604058916, "alias": "laboriosam-molestiae_lsat-ar_cot" }, "laboriosam-molestiae_logiqa_cot": { "acc,none": 0.3083067092651757, "acc_stderr,none": 0.018471759300608265, "alias": "laboriosam-molestiae_logiqa_cot" }, "laboriosam-molestiae_logiqa2_cot": { "acc,none": 0.36895674300254455, "acc_stderr,none": 0.012173885104839207, "alias": "laboriosam-molestiae_logiqa2_cot" }, "iste-molestias_lsat-rc_cot": { "acc,none": 0.4275092936802974, "acc_stderr,none": 0.030219662071838058, "alias": "iste-molestias_lsat-rc_cot" }, "iste-molestias_lsat-lr_cot": { "acc,none": 0.2647058823529412, "acc_stderr,none": 0.01955480325785009, "alias": "iste-molestias_lsat-lr_cot" }, "iste-molestias_lsat-ar_cot": { "acc,none": 0.20434782608695654, "acc_stderr,none": 0.02664580815001135, "alias": "iste-molestias_lsat-ar_cot" }, "iste-molestias_logiqa_cot": { "acc,none": 0.3003194888178914, "acc_stderr,none": 0.018335874932123606, "alias": "iste-molestias_logiqa_cot" }, "iste-molestias_logiqa2_cot": { "acc,none": 0.3505089058524173, "acc_stderr,none": 0.012037825298569541, "alias": "iste-molestias_logiqa2_cot" }, "eum-saepe_lsat-rc_cot": { "acc,none": 0.44609665427509293, "acc_stderr,none": 0.030364356394504122, "alias": "eum-saepe_lsat-rc_cot" }, "eum-saepe_lsat-lr_cot": { "acc,none": 0.2803921568627451, "acc_stderr,none": 0.019910033171474082, "alias": "eum-saepe_lsat-lr_cot" }, "eum-saepe_lsat-ar_cot": { "acc,none": 0.23478260869565218, "acc_stderr,none": 0.028009647070930125, "alias": "eum-saepe_lsat-ar_cot" }, "eum-saepe_logiqa_cot": { "acc,none": 0.30670926517571884, "acc_stderr,none": 0.018445105229565346, "alias": "eum-saepe_logiqa_cot" }, "eum-saepe_logiqa2_cot": { "acc,none": 0.36323155216284986, "acc_stderr,none": 0.012133733683836157, "alias": "eum-saepe_logiqa2_cot" } }, "configs": { "eum-saepe_logiqa2_cot": { "task": "eum-saepe_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eum-saepe-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eum-saepe_logiqa_cot": { "task": "eum-saepe_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eum-saepe-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eum-saepe_lsat-ar_cot": { "task": "eum-saepe_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eum-saepe-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eum-saepe_lsat-lr_cot": { "task": "eum-saepe_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eum-saepe-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "eum-saepe_lsat-rc_cot": { "task": "eum-saepe_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "eum-saepe-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iste-molestias_logiqa2_cot": { "task": "iste-molestias_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iste-molestias-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iste-molestias_logiqa_cot": { "task": "iste-molestias_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iste-molestias-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iste-molestias_lsat-ar_cot": { "task": "iste-molestias_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iste-molestias-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iste-molestias_lsat-lr_cot": { "task": "iste-molestias_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iste-molestias-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "iste-molestias_lsat-rc_cot": { "task": "iste-molestias_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "iste-molestias-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "laboriosam-molestiae_logiqa2_cot": { "task": "laboriosam-molestiae_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "laboriosam-molestiae-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "laboriosam-molestiae_logiqa_cot": { "task": "laboriosam-molestiae_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "laboriosam-molestiae-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "laboriosam-molestiae_lsat-ar_cot": { "task": "laboriosam-molestiae_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "laboriosam-molestiae-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "laboriosam-molestiae_lsat-lr_cot": { "task": "laboriosam-molestiae_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "laboriosam-molestiae-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "laboriosam-molestiae_lsat-rc_cot": { "task": "laboriosam-molestiae_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "laboriosam-molestiae-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "nisi-sunt_logiqa2_cot": { "task": "nisi-sunt_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "nisi-sunt-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "nisi-sunt_logiqa_cot": { "task": "nisi-sunt_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "nisi-sunt-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "nisi-sunt_lsat-ar_cot": { "task": "nisi-sunt_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "nisi-sunt-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "nisi-sunt_lsat-lr_cot": { "task": "nisi-sunt_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "nisi-sunt-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "nisi-sunt_lsat-rc_cot": { "task": "nisi-sunt_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "nisi-sunt-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "saepe-fuga_logiqa2_cot": { "task": "saepe-fuga_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "saepe-fuga-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "saepe-fuga_logiqa_cot": { "task": "saepe-fuga_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "saepe-fuga-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "saepe-fuga_lsat-ar_cot": { "task": "saepe-fuga_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "saepe-fuga-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "saepe-fuga_lsat-lr_cot": { "task": "saepe-fuga_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "saepe-fuga-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "saepe-fuga_lsat-rc_cot": { "task": "saepe-fuga_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "saepe-fuga-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "veritatis-velit_logiqa2_cot": { "task": "veritatis-velit_logiqa2_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "veritatis-velit-logiqa2/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "veritatis-velit_logiqa_cot": { "task": "veritatis-velit_logiqa_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "veritatis-velit-logiqa/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "veritatis-velit_lsat-ar_cot": { "task": "veritatis-velit_lsat-ar_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "veritatis-velit-lsat-ar/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "veritatis-velit_lsat-lr_cot": { "task": "veritatis-velit_lsat-lr_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "veritatis-velit-lsat-lr/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } }, "veritatis-velit_lsat-rc_cot": { "task": "veritatis-velit_lsat-rc_cot", "group": "logikon-bench", "dataset_path": "logikon/cot-eval-traces", "dataset_kwargs": { "data_files": { "test": "veritatis-velit-lsat-rc/test-00000-of-00001.parquet" } }, "test_split": "test", "doc_to_text": "def doc_to_text_cot(doc) -> str:\n \"\"\"\n Answer the following question about the given passage. [Base your answer on the reasoning below.]\n \n Passage: \n \n Question: \n A. \n B. \n C. \n D. \n [E. ]\n \n [Reasoning: ]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\" \n prompt += \"Answer:\"\n return prompt\n", "doc_to_target": "{{answer}}", "doc_to_choice": "{{options}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 0.0 } } }, "versions": { "eum-saepe_logiqa2_cot": 0.0, "eum-saepe_logiqa_cot": 0.0, "eum-saepe_lsat-ar_cot": 0.0, "eum-saepe_lsat-lr_cot": 0.0, "eum-saepe_lsat-rc_cot": 0.0, "iste-molestias_logiqa2_cot": 0.0, "iste-molestias_logiqa_cot": 0.0, "iste-molestias_lsat-ar_cot": 0.0, "iste-molestias_lsat-lr_cot": 0.0, "iste-molestias_lsat-rc_cot": 0.0, "laboriosam-molestiae_logiqa2_cot": 0.0, "laboriosam-molestiae_logiqa_cot": 0.0, "laboriosam-molestiae_lsat-ar_cot": 0.0, "laboriosam-molestiae_lsat-lr_cot": 0.0, "laboriosam-molestiae_lsat-rc_cot": 0.0, "nisi-sunt_logiqa2_cot": 0.0, "nisi-sunt_logiqa_cot": 0.0, "nisi-sunt_lsat-ar_cot": 0.0, "nisi-sunt_lsat-lr_cot": 0.0, "nisi-sunt_lsat-rc_cot": 0.0, "saepe-fuga_logiqa2_cot": 0.0, "saepe-fuga_logiqa_cot": 0.0, "saepe-fuga_lsat-ar_cot": 0.0, "saepe-fuga_lsat-lr_cot": 0.0, "saepe-fuga_lsat-rc_cot": 0.0, "veritatis-velit_logiqa2_cot": 0.0, "veritatis-velit_logiqa_cot": 0.0, "veritatis-velit_lsat-ar_cot": 0.0, "veritatis-velit_lsat-lr_cot": 0.0, "veritatis-velit_lsat-rc_cot": 0.0 }, "n-shot": { "eum-saepe_logiqa2_cot": 0, "eum-saepe_logiqa_cot": 0, "eum-saepe_lsat-ar_cot": 0, "eum-saepe_lsat-lr_cot": 0, "eum-saepe_lsat-rc_cot": 0, "iste-molestias_logiqa2_cot": 0, "iste-molestias_logiqa_cot": 0, "iste-molestias_lsat-ar_cot": 0, "iste-molestias_lsat-lr_cot": 0, "iste-molestias_lsat-rc_cot": 0, "laboriosam-molestiae_logiqa2_cot": 0, "laboriosam-molestiae_logiqa_cot": 0, "laboriosam-molestiae_lsat-ar_cot": 0, "laboriosam-molestiae_lsat-lr_cot": 0, "laboriosam-molestiae_lsat-rc_cot": 0, "nisi-sunt_logiqa2_cot": 0, "nisi-sunt_logiqa_cot": 0, "nisi-sunt_lsat-ar_cot": 0, "nisi-sunt_lsat-lr_cot": 0, "nisi-sunt_lsat-rc_cot": 0, "saepe-fuga_logiqa2_cot": 0, "saepe-fuga_logiqa_cot": 0, "saepe-fuga_lsat-ar_cot": 0, "saepe-fuga_lsat-lr_cot": 0, "saepe-fuga_lsat-rc_cot": 0, "veritatis-velit_logiqa2_cot": 0, "veritatis-velit_logiqa_cot": 0, "veritatis-velit_lsat-ar_cot": 0, "veritatis-velit_lsat-lr_cot": 0, "veritatis-velit_lsat-rc_cot": 0 }, "config": { "model": "vllm", "model_args": "pretrained=Deci/DeciLM-7B,revision=main,dtype=auto,tensor_parallel_size=1,gpu_memory_utilization=0.9,trust_remote_code=true,max_length=4096", "batch_size": "auto", "batch_sizes": [], "device": null, "use_cache": null, "limit": null, "bootstrap_iters": 100000, "gen_kwargs": null }, "git_hash": "5044cf9" }