Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
open-domain-qa
Languages:
English
Size:
100K - 1M
ArXiv:
License:
Commit
•
0433523
1
Parent(s):
debe34d
Delete legacy JSON metadata
Browse filesDelete legacy `dataset_infos.json`.
- dataset_infos.json +0 -1
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"answer_selection_analysis": {"description": "The SelQA dataset provides crowdsourced annotation for two selection-based question answer tasks, \nanswer sentence selection and answer triggering.\n", "citation": "@InProceedings{7814688,\n author={T. {Jurczyk} and M. {Zhai} and J. D. {Choi}},\n booktitle={2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)}, \n title={SelQA: A New Benchmark for Selection-Based Question Answering}, \n year={2016},\n volume={},\n number={},\n pages={820-827},\n doi={10.1109/ICTAI.2016.0128}\n}\n", "homepage": "", "license": "", "features": {"section": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"dtype": "string", "id": null, "_type": "Value"}, "is_paraphrase": {"dtype": "bool", "id": null, "_type": "Value"}, "topic": {"num_classes": 10, "names": ["MUSIC", "TV", "TRAVEL", "ART", "SPORT", "COUNTRY", "MOVIES", "HISTORICAL EVENTS", "SCIENCE", "FOOD"], "names_file": null, "id": null, "_type": "ClassLabel"}, "answers": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "q_types": {"feature": {"num_classes": 7, "names": ["what", "why", "when", "who", "where", "how", ""], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "selqa", "config_name": "answer_selection_analysis", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9676758, "num_examples": 5529, "dataset_name": "selqa"}, "test": {"name": "test", "num_bytes": 2798537, "num_examples": 1590, "dataset_name": "selqa"}, "validation": {"name": "validation", "num_bytes": 1378407, "num_examples": 785, "dataset_name": "selqa"}}, "download_checksums": {"https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-train.json": {"num_bytes": 10320158, "checksum": "30622b7820bb2fa8e766d0ad3c7cf29dac658772cd763a9dabf81d9cab1fd534"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-dev.json": {"num_bytes": 1470163, "checksum": "b4e6687e44a30b486e24d2b06aa3012ec07d61145f3521f35b7d49daae3e0ca4"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-test.json": {"num_bytes": 2983123, "checksum": "ca1184d94cc9030883723fab76ef8180b3cf5fb142549a5648d22f59fe7c6fc6"}}, "download_size": 14773444, "post_processing_size": null, "dataset_size": 13853702, "size_in_bytes": 28627146}, "answer_selection_experiments": {"description": "The SelQA dataset provides crowdsourced annotation for two selection-based question answer tasks, \nanswer sentence selection and answer triggering.\n", "citation": "@InProceedings{7814688,\n author={T. {Jurczyk} and M. {Zhai} and J. D. {Choi}},\n booktitle={2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)}, \n title={SelQA: A New Benchmark for Selection-Based Question Answering}, \n year={2016},\n volume={},\n number={},\n pages={820-827},\n doi={10.1109/ICTAI.2016.0128}\n}\n", "homepage": "", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "candidate": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "selqa", "config_name": "answer_selection_experiments", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13782826, "num_examples": 66438, "dataset_name": "selqa"}, "test": {"name": "test", "num_bytes": 4008077, "num_examples": 19435, "dataset_name": "selqa"}, "validation": {"name": "validation", "num_bytes": 1954877, "num_examples": 9377, "dataset_name": "selqa"}}, "download_checksums": {"https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-train.tsv": {"num_bytes": 12985514, "checksum": "9f40017c0bf97f2f5816fba5ac18c7eafb847a9e351d85584afaecd1296010db"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-dev.tsv": {"num_bytes": 1842345, "checksum": "0f0d73b379bb4efc6e678e36b122ea17c957998a1d002e3c480b3bc7854f77a9"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/ass/selqa-ass-test.tsv": {"num_bytes": 3774841, "checksum": "4129ffa31237eb7f673baf6313bdd7d01658000c253b45e195d235493a435b91"}}, "download_size": 18602700, "post_processing_size": null, "dataset_size": 19745780, "size_in_bytes": 38348480}, "answer_triggering_analysis": {"description": "The SelQA dataset provides crowdsourced annotation for two selection-based question answer tasks, \nanswer sentence selection and answer triggering.\n", "citation": "@InProceedings{7814688,\n author={T. {Jurczyk} and M. {Zhai} and J. D. {Choi}},\n booktitle={2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)}, \n title={SelQA: A New Benchmark for Selection-Based Question Answering}, \n year={2016},\n volume={},\n number={},\n pages={820-827},\n doi={10.1109/ICTAI.2016.0128}\n}\n", "homepage": "", "license": "", "features": {"section": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"dtype": "string", "id": null, "_type": "Value"}, "is_paraphrase": {"dtype": "bool", "id": null, "_type": "Value"}, "topic": {"num_classes": 10, "names": ["MUSIC", "TV", "TRAVEL", "ART", "SPORT", "COUNTRY", "MOVIES", "HISTORICAL EVENTS", "SCIENCE", "FOOD"], "names_file": null, "id": null, "_type": "ClassLabel"}, "q_types": {"feature": {"num_classes": 7, "names": ["what", "why", "when", "who", "where", "how", ""], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "candidate_list": {"feature": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "section": {"dtype": "string", "id": null, "_type": "Value"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "selqa", "config_name": "answer_triggering_analysis", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 30176650, "num_examples": 5529, "dataset_name": "selqa"}, "test": {"name": "test", "num_bytes": 8766787, "num_examples": 1590, "dataset_name": "selqa"}, "validation": {"name": "validation", "num_bytes": 4270904, "num_examples": 785, "dataset_name": "selqa"}}, "download_checksums": {"https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-train.json": {"num_bytes": 32230643, "checksum": "6af1e82dbec94d2c87c0cd6463a0d7eba1dd746cbdc72f481697843c466f4952"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-dev.json": {"num_bytes": 4562321, "checksum": "8cf266e9b8404e9ba1c062a1dbf43c79ae9bd2da929cb11351872c4f221815ac"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-test.json": {"num_bytes": 9356712, "checksum": "38971e74506b74c808756fefb1816453eb1a3c3989f2feb77d864c93da468905"}}, "download_size": 46149676, "post_processing_size": null, "dataset_size": 43214341, "size_in_bytes": 89364017}, "answer_triggering_experiments": {"description": "The SelQA dataset provides crowdsourced annotation for two selection-based question answer tasks, \nanswer sentence selection and answer triggering.\n", "citation": "@InProceedings{7814688,\n author={T. {Jurczyk} and M. {Zhai} and J. D. {Choi}},\n booktitle={2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)}, \n title={SelQA: A New Benchmark for Selection-Based Question Answering}, \n year={2016},\n volume={},\n number={},\n pages={820-827},\n doi={10.1109/ICTAI.2016.0128}\n}\n", "homepage": "", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "candidate": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "selqa", "config_name": "answer_triggering_experiments", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42956518, "num_examples": 205075, "dataset_name": "selqa"}, "test": {"name": "test", "num_bytes": 12504961, "num_examples": 59845, "dataset_name": "selqa"}, "validation": {"name": "validation", "num_bytes": 6055616, "num_examples": 28798, "dataset_name": "selqa"}}, "download_checksums": {"https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-train.tsv": {"num_bytes": 40495450, "checksum": "9cf58039e30583187e7e93e19043dceb2540d72fc13eb4eb09fd8147b3022346"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-dev.tsv": {"num_bytes": 5710016, "checksum": "76466b282ab62353e029af4292acb658c0659860c716c637c3e5f3faa9c693d1"}, "https://raw.githubusercontent.com/emorynlp/selqa/master/at/selqa-at-test.tsv": {"num_bytes": 11786773, "checksum": "4151fa580983f7d3903ea70e71d5d86f20abe75cb975b7d77434ea2e978fc132"}}, "download_size": 57992239, "post_processing_size": null, "dataset_size": 61517095, "size_in_bytes": 119509334}}
|
|
|
|