Datasets:

Sub-tasks:
extractive-qa
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
albertvillanova HF staff commited on
Commit
aa6b236
1 Parent(s): 4f1ebb4

Delete legacy JSON metadata

Browse files

Delete legacy `dataset_infos.json`.

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "A Span-Extraction dataset for Chinese machine reading comprehension to add language\ndiversities in this area. The dataset is composed by near 20,000 real questions annotated\non Wikipedia paragraphs by human experts. We also annotated a challenge set which\ncontains the questions that need comprehensive understanding and multi-sentence\ninference throughout the context.\n", "citation": "@inproceedings{cui-emnlp2019-cmrc2018,\n title = {A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension},\n author = {Cui, Yiming and\n Liu, Ting and\n Che, Wanxiang and\n Xiao, Li and\n Chen, Zhipeng and\n Ma, Wentao and\n Wang, Shijin and\n Hu, Guoping},\n booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},\n month = {nov},\n year = {2019},\n address = {Hong Kong, China},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/D19-1600},\n doi = {10.18653/v1/D19-1600},\n pages = {5886--5891}}\n", "homepage": "https://github.com/ymcui/cmrc2018", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "question-answering-extractive", "question_column": "question", "context_column": "context", "answers_column": "answers"}], "builder_name": "cmrc2018", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15508110, "num_examples": 10142, "dataset_name": "cmrc2018"}, "validation": {"name": "validation", "num_bytes": 5183809, "num_examples": 3219, "dataset_name": "cmrc2018"}, "test": {"name": "test", "num_bytes": 1606931, "num_examples": 1002, "dataset_name": "cmrc2018"}}, "download_checksums": {"https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/": {"num_bytes": 7408757, "checksum": "5497aa2f81908e31d6b0e27d99b1f90ab63a8f58fa92fffe5d17cf62eba0c212"}, "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/": {"num_bytes": 3299139, "checksum": "e9ff74231f05c230c6fa88b84441ee334d97234cbb610991cd94b82db00c7f1f"}, "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/": {"num_bytes": 800221, "checksum": "f3fae95b57da8e03afb2b57467dd221417060ef4d82db13bf22fc88589f3a6f3"}}, "download_size": 11508117, "post_processing_size": null, "dataset_size": 22298850, "size_in_bytes": 33806967}}