File size: 38,291 Bytes
b9ffbbe
1
{"eli5.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "eli5.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1174541, "num_examples": 117929, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 130497, "num_examples": 13104, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-FKsZknoDE7bh0fKucs8nNu72IKFxfuo": {"num_bytes": 820757, "checksum": "92304a6f44fee943c29e67c0097dbe287e4a420c101fb865d4d8d6098299a8c2"}, "https://drive.google.com/uc?id=108bG1CJMaqANIqLvxthuQvsZro-5qwbX": {"num_bytes": 91188, "checksum": "37d29f228db3a35e871c2124fe0b854f5a399951cda0a877891e7180ee080884"}}, "download_size": 911945, "post_processing_size": null, "dataset_size": 1305038, "size_in_bytes": 2216983}, "eli5.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "eli5.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1174541, "num_examples": 117929, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 130497, "num_examples": 13104, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11FfVASpa-Agie4AJfEPdjkR-QrvEO5Sd": {"num_bytes": 820757, "checksum": "92304a6f44fee943c29e67c0097dbe287e4a420c101fb865d4d8d6098299a8c2"}, "https://drive.google.com/uc?id=11XlFn79xGSGmPu-TU2SYariQgg2bby2h": {"num_bytes": 91188, "checksum": "37d29f228db3a35e871c2124fe0b854f5a399951cda0a877891e7180ee080884"}}, "download_size": 911945, "post_processing_size": null, "dataset_size": 1305038, "size_in_bytes": 2216983}, "gooaq.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "gooaq.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 95226834, "num_examples": 146253, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 20122, "num_examples": 33, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=10A8rPzMECn3s-qXSZRGKnlDMx5h6-U6k": {"num_bytes": 93129949, "checksum": "9cd916d9ab41e70df0f6feec2d5818dfe733209cfa4803e5cfb033ebbba0133c"}, "https://drive.google.com/uc?id=10PNaYwLxFBfjm2AMM65sp-kRs9rE-8_A": {"num_bytes": 19740, "checksum": "3c127cd582e52889b7a78dc7084333d646019054a39ca9d6849d2ea8fa156a6f"}}, "download_size": 93149689, "post_processing_size": null, "dataset_size": 95246956, "size_in_bytes": 188396645}, "gooaq.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "gooaq.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 85723336, "num_examples": 131657, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 9523620, "num_examples": 14629, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=10cZ6Kx7v_UHzHkvBgduoiJ7Ny3aeO-Ww": {"num_bytes": 83835750, "checksum": "1e72feb89d26e6fc2bac718115c4a8aca63f6f6278d75585952e61976ea9dd77"}, "https://drive.google.com/uc?id=11YdwCGTaw7jKk612tRc2T6NSYRUgmAjk": {"num_bytes": 9313939, "checksum": "0e48376bde7400059cc874aca3864e39909ad51a6dbb97ba6ff5e2d7b05ab331"}}, "download_size": 93149689, "post_processing_size": null, "dataset_size": 95246956, "size_in_bytes": 188396645}, "hotpotqa.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "hotpotqa.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4465930, "num_examples": 355, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 437034, "num_examples": 35, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=103bDQK53aT1wIFbJqe9B8Usi0_TCROBT": {"num_bytes": 4484332, "checksum": "0b954243ecbf904cc3ae0b7b85b922380f2febd6a802fb5ab04ac313198f1705"}, "https://drive.google.com/uc?id=1-ZSJNCjxdh5wEifkVVseieYYu45tVbEN": {"num_bytes": 438660, "checksum": "2f5151490204e71fcb63d17288c2907666125ca1f35601935d2e8a7101df100f"}}, "download_size": 4922992, "post_processing_size": null, "dataset_size": 4902964, "size_in_bytes": 9825956}, "hotpotqa.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "hotpotqa.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4393383, "num_examples": 351, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 509581, "num_examples": 39, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11GYpcyT98XTomZhQxai5OLrTFoTGrfJJ": {"num_bytes": 4411273, "checksum": "6ba37d116a3bd64e88e63a613e7d74122ec749e8ee9195dc8c90ced03b1bf57c"}, "https://drive.google.com/uc?id=11B-cH_N8VIyLFCyM_l4dWfkOURXf4ky-": {"num_bytes": 511719, "checksum": "992d67501169f1624572d897eda080f4bb08df1321bba18e64f559473156a9e9"}}, "download_size": 4922992, "post_processing_size": null, "dataset_size": 4902964, "size_in_bytes": 9825956}, "msmarco.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "msmarco.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 169604379, "num_examples": 23011, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 17559831, "num_examples": 2558, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-jMNHG6rS9b6TnRZ6iNbjRLXRwr8Znb_": {"num_bytes": 169308630, "checksum": "6d08c5da8205fd0ea8313d0e9bcc032b88ee5c53ce9a96081659be57a5157d61"}, "https://drive.google.com/uc?id=1-BtYcEWwgaD0aI5hHCHFXCZOZu8I2e8o": {"num_bytes": 17527966, "checksum": "01cea9955ec48381b9933179b6174642f65be72148b286128a5c0bbe89e25005"}}, "download_size": 186836596, "post_processing_size": null, "dataset_size": 187164210, "size_in_bytes": 374000806}, "msmarco.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "msmarco.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 168417032, "num_examples": 23012, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 18827061, "num_examples": 2557, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=111RXRWuNk2CMhDbzY2gIudmcB70lsQSy": {"num_bytes": 168122339, "checksum": "6be361f3386c2e30ed59572b7a25bec3afafef65da9a19ded4c6efaa79f43f50"}, "https://drive.google.com/uc?id=11OLGeEkS6Wkv3q5ObNAinim5hqB5UU4D": {"num_bytes": 18794406, "checksum": "426c0716422d6298caeda64663ca29d0758e007d32b00849064003cdb07b40c2"}}, "download_size": 186916745, "post_processing_size": null, "dataset_size": 187244093, "size_in_bytes": 374160838}, "naturalquestions.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "naturalquestions.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 111644341, "num_examples": 1137, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 7074666, "num_examples": 71, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-M_G-V7p0XvGmw3JWtOSfeAlrGLMu8U3": {"num_bytes": 111709402, "checksum": "7f74571bec9b0f55c5c527d831e547e0f860e3c241a5b06e7a6de5148deecd03"}, "https://drive.google.com/uc?id=1-hjnE4TvEp76eznP14DHIsqYnitY0rNW": {"num_bytes": 7078462, "checksum": "90b0130b16bdd3e429cbc07b094270748327127f841538a98d8dda1ac83f6897"}}, "download_size": 118787864, "post_processing_size": null, "dataset_size": 118719007, "size_in_bytes": 237506871}, "naturalquestions.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "naturalquestions.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107149045, "num_examples": 1087, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 11573438, "num_examples": 121, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=118ALG23_Ayi7qrAAdExJKLiN21Xy7VO5": {"num_bytes": 107204249, "checksum": "993fc8e988edcc5b85984578adc77c03e42699b1ce5244e19eb2918a480a5d5e"}, "https://drive.google.com/uc?id=11L8JW9llwDI-vg2LSXL4NmnHbOOHETBr": {"num_bytes": 11587103, "checksum": "bd113addf691d20b45f007b80950dec7f74b36dc860b5eb2b05449a623a16dc8"}}, "download_size": 118791352, "post_processing_size": null, "dataset_size": 118722483, "size_in_bytes": 237513835}, "newsqa.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "newsqa.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4671092, "num_examples": 623, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 253141, "num_examples": 29, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-hvqrR98PcajkorCvFvqz4fcuinZfgvr": {"num_bytes": 4680426, "checksum": "7a92c04b59de7fd565c6bcb9f70285638d9796a53fd7c7138df9016f22b78c6f"}, "https://drive.google.com/uc?id=1-oZbc9QFvuDfxzDhwotOYwvcLfFbCJQC": {"num_bytes": 253785, "checksum": "1ac723e5500a33b5803bc004217d97ea1b37c345666e9080d5a1926b4d2e2ef3"}}, "download_size": 4934211, "post_processing_size": null, "dataset_size": 4924233, "size_in_bytes": 9858444}, "newsqa.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "newsqa.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4422767, "num_examples": 586, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 513622, "num_examples": 66, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11mraicFI6bb6KFcUl3unZxW0OSGg18UB": {"num_bytes": 4431727, "checksum": "dd3959b6f0d73c034d159bc6abd58bddd3eccd8262742c172662ff9f676725cb"}, "https://drive.google.com/uc?id=10rM5-BYr1mrSVSRFqgiFuQdYvKuCDzD1": {"num_bytes": 514773, "checksum": "5b99daa84e3a1cd33a27bae3b419961183286e981df2f96b150096388508a3ee"}}, "download_size": 4946500, "post_processing_size": null, "dataset_size": 4936389, "size_in_bytes": 9882889}, "paq.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "paq.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1009499834, "num_examples": 692645, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 112181249, "num_examples": 76961, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=10PrKt6kEgq07SNss5rZizRcqQxcxS84X": {"num_bytes": 1002956199, "checksum": "bc5aa81e12bb689b47442ba65ad8be41f99b8aed6a88a1cdf7addd11d3ec652a"}, "https://drive.google.com/uc?id=1-kuu0RihKcve-EGFtwjYz8jOdN6rXbcM": {"num_bytes": 111450221, "checksum": "3be6746245b3479a3a4e0f1144ce5bfb09e5dad1976e9996e9a22ba38cf11955"}}, "download_size": 1114406420, "post_processing_size": null, "dataset_size": 1121681083, "size_in_bytes": 2236087503}, "paq.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "paq.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1009499834, "num_examples": 692645, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 112181249, "num_examples": 76961, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11W8G2mmQ78LBwet5GFvwvNu5hfPWOblN": {"num_bytes": 1002956199, "checksum": "bc5aa81e12bb689b47442ba65ad8be41f99b8aed6a88a1cdf7addd11d3ec652a"}, "https://drive.google.com/uc?id=10iItHXCfQ9wIsFmDUIMjdXbOSapRCAwj": {"num_bytes": 111450221, "checksum": "3be6746245b3479a3a4e0f1144ce5bfb09e5dad1976e9996e9a22ba38cf11955"}}, "download_size": 1114406420, "post_processing_size": null, "dataset_size": 1121681083, "size_in_bytes": 2236087503}, "searchqa.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "searchqa.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 31403453, "num_examples": 663, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 5578556, "num_examples": 117, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-AOYSVQf4GI7UnXcZ2EYbDXszHetjdrS": {"num_bytes": 31416040, "checksum": "f615faee688b858cf08a30b31032a64b8df7ff3cca042b0b3bbbefdbd35fb1de"}, "https://drive.google.com/uc?id=1-ZCCflByWZ3sBE_Hxirhfy9KQQ8d2ABN": {"num_bytes": 5581361, "checksum": "804944d505f0940060703b75b103321f8338cddb7bb0c782151cdede1d4896d8"}}, "download_size": 36997401, "post_processing_size": null, "dataset_size": 36982009, "size_in_bytes": 73979410}, "searchqa.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "searchqa.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 33071979, "num_examples": 702, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 3910030, "num_examples": 78, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11ivqoK_aVjK6RpaT_QWjqJ3V9VMvw190": {"num_bytes": 33085689, "checksum": "d3a679a077bcc12c7e1f12f3504d4a5ec194d14ba20ec37c4db068ea536f6192"}, "https://drive.google.com/uc?id=11Uvh0s17N7hvwfPF75x0Ko6xdccfsZcl": {"num_bytes": 3911712, "checksum": "1cdc105f2d926210e70df5dadcf6457925cee057a9c06e13142cc3ef0d4b3203"}}, "download_size": 36997401, "post_processing_size": null, "dataset_size": 36982009, "size_in_bytes": 73979410}, "squad2.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "squad2.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5461054, "num_examples": 2957, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 549784, "num_examples": 252, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=10Akh_VNH5Kvp0BiKbq9irA-u5zgdoxPy": {"num_bytes": 5421114, "checksum": "8369ea80f63e550153051e63173ba0ecc5a9409e02e5b06839af483620191633"}, "https://drive.google.com/uc?id=10QszRRFigIz_bAWuhOkn3r3dngHLpDSy": {"num_bytes": 546425, "checksum": "ae2fa26d97f826c8496765c06767a71d5141c47860b2fc9f9b6df70cd288c807"}}, "download_size": 5967539, "post_processing_size": null, "dataset_size": 6010838, "size_in_bytes": 11978377}, "squad2.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "squad2.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5374088, "num_examples": 2888, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 589922, "num_examples": 321, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=110xR1sye2Qx-QrrOPb3IDLhkjLCqG1Zy": {"num_bytes": 5335047, "checksum": "f0f9202725d14c2ad867f0e7767112e8e7f059ece2de0b5fbeaae8dc5f9ff804"}, "https://drive.google.com/uc?id=1144-Zt5-b8nFZOgUXbnk7l-RLSHTnJmN": {"num_bytes": 585542, "checksum": "b4ed07a2def1a3ea6d482b52f5815701a5651f41cdc0a0306b08e2ec5bac58ad"}}, "download_size": 5920589, "post_processing_size": null, "dataset_size": 5964010, "size_in_bytes": 11884599}, "triviaqa.original-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "triviaqa.original-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11278937, "num_examples": 637, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 1229922, "num_examples": 66, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=1-lb4JylW7olUzbLJBaBqNK-HAcxulxEI": {"num_bytes": 11279573, "checksum": "8ebce60165aabfee8b3faab4999da9e06c615d1d46e10c4f5368069b11ecbc02"}, "https://drive.google.com/uc?id=1-T0LHqgSvKyIx6YehQ-TrBOhJqygV-Si": {"num_bytes": 1230213, "checksum": "1fd4524f83484d90275a945ecbcacbcca089db0d79bb534df1232b7ac3d5f70e"}}, "download_size": 12509786, "post_processing_size": null, "dataset_size": 12508859, "size_in_bytes": 25018645}, "triviaqa.random-split": {"description": "Causal Question Answering Dataset is machine reading comprehension dataset from 10 QA datasets that are filtered using regex to get causal question. The dataset is from a paper titled CausalQA: A Benchmark for Causal Question Answering. 2022. Alexander Bondarenko, Magdalena Wolska, Stefan Heindorf, Lukas Bl\u00fcbaum, Axel-Cyrille Ngonga Ngomo, Benno Stein, Pavel Braslavski, Matthias Hagen, Martin Potthast. In COLING.", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_processed": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "context_processed": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "causalqa", "config_name": "triviaqa.random-split", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11358699, "num_examples": 632, "dataset_name": "causalqa"}, "validation": {"name": "validation", "num_bytes": 1268034, "num_examples": 71, "dataset_name": "causalqa"}}, "download_checksums": {"https://drive.google.com/uc?id=11-y9PSfAAP8-L8PtBd_51RaA-5MjsvPH": {"num_bytes": 11359562, "checksum": "27067cb12e15f7177c83bf5eebe666890d50bd24c629101a3e89fba24576c023"}, "https://drive.google.com/uc?id=11WzPoeBLWbfMyR8xozfl-xMOSqevIIrs": {"num_bytes": 1268294, "checksum": "bae89fd42b101dca27b03dc354f4c34ed78fbccd7e2640f2e65d4e01fd0f16cd"}}, "download_size": 12627856, "post_processing_size": null, "dataset_size": 12626733, "size_in_bytes": 25254589}}