Datasets:

Sub-tasks:
extractive-qa
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
system HF staff commited on
Commit
fbd84a7
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. cmrc2018.py +118 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.1.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
cmrc2018.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(cmrc2018): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ # TODO(cmrc2018): BibTeX citation
11
+ _CITATION = """\
12
+ @inproceedings{cui-emnlp2019-cmrc2018,
13
+ title = {A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension},
14
+ author = {Cui, Yiming and
15
+ Liu, Ting and
16
+ Che, Wanxiang and
17
+ Xiao, Li and
18
+ Chen, Zhipeng and
19
+ Ma, Wentao and
20
+ Wang, Shijin and
21
+ Hu, Guoping},
22
+ booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
23
+ month = {nov},
24
+ year = {2019},
25
+ address = {Hong Kong, China},
26
+ publisher = {Association for Computational Linguistics},
27
+ url = {https://www.aclweb.org/anthology/D19-1600},
28
+ doi = {10.18653/v1/D19-1600},
29
+ pages = {5886--5891}}
30
+ """
31
+
32
+ # TODO(cmrc2018):
33
+ _DESCRIPTION = """\
34
+ A Span-Extraction dataset for Chinese machine reading comprehension to add language
35
+ diversities in this area. The dataset is composed by near 20,000 real questions annotated
36
+ on Wikipedia paragraphs by human experts. We also annotated a challenge set which
37
+ contains the questions that need comprehensive understanding and multi-sentence
38
+ inference throughout the context.
39
+ """
40
+ _URL = "https://github.com/ymcui/cmrc2018"
41
+ _TRAIN_FILE = "https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/"
42
+ _DEV_FILE = "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/"
43
+ _TEST_FILE = "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/"
44
+
45
+
46
+ class Cmrc2018(datasets.GeneratorBasedBuilder):
47
+ """TODO(cmrc2018): Short description of my dataset."""
48
+
49
+ # TODO(cmrc2018): Set up version.
50
+ VERSION = datasets.Version("0.1.0")
51
+
52
+ def _info(self):
53
+ # TODO(cmrc2018): Specifies the datasets.DatasetInfo object
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # datasets.features.FeatureConnectors
58
+ features=datasets.Features(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "context": datasets.Value("string"),
62
+ "question": datasets.Value("string"),
63
+ "answers": datasets.features.Sequence(
64
+ {
65
+ "text": datasets.Value("string"),
66
+ "answer_start": datasets.Value("int32"),
67
+ }
68
+ ),
69
+ # These are the features of your dataset like images, labels ...
70
+ }
71
+ ),
72
+ # If there's a common (input, target) tuple from the features,
73
+ # specify them here. They'll be used if as_supervised=True in
74
+ # builder.as_dataset.
75
+ supervised_keys=None,
76
+ # Homepage of the dataset for documentation
77
+ homepage=_URL,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+ # TODO(cmrc2018): Downloads the data and defines the splits
84
+ # dl_manager is a datasets.download.DownloadManager that can be used to
85
+ # download and extract URLs
86
+ urls_to_download = {"train": _TRAIN_FILE, "dev": _DEV_FILE, "test": _TEST_FILE}
87
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
88
+
89
+ return [
90
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
91
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
92
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
93
+ ]
94
+
95
+ def _generate_examples(self, filepath):
96
+ """Yields examples."""
97
+ # TODO(cmrc2018): Yields (key, example) tuples from the dataset
98
+ with open(filepath, encoding="utf-8") as f:
99
+ data = json.load(f)
100
+ for example in data["data"]:
101
+ for paragraph in example["paragraphs"]:
102
+ context = paragraph["context"].strip()
103
+ for qa in paragraph["qas"]:
104
+ question = qa["question"].strip()
105
+ id_ = qa["id"]
106
+
107
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
108
+ answers = [answer["text"].strip() for answer in qa["answers"]]
109
+
110
+ yield id_, {
111
+ "context": context,
112
+ "question": question,
113
+ "id": id_,
114
+ "answers": {
115
+ "answer_start": answer_starts,
116
+ "text": answers,
117
+ },
118
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "A Span-Extraction dataset for Chinese machine reading comprehension to add language\ndiversities in this area. The dataset is composed by near 20,000 real questions annotated\non Wikipedia paragraphs by human experts. We also annotated a challenge set which\ncontains the questions that need comprehensive understanding and multi-sentence\ninference throughout the context.\n", "citation": "@inproceedings{cui-emnlp2019-cmrc2018,\n title = \"A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension\",\n author = \"Cui, Yiming and\n Liu, Ting and\n Che, Wanxiang and\n Xiao, Li and\n Chen, Zhipeng and\n Ma, Wentao and\n Wang, Shijin and\n Hu, Guoping\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)\",\n month = nov,\n year = \"2019\",\n address = \"Hong Kong, China\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/D19-1600\",\n doi = \"10.18653/v1/D19-1600\",\n pages = \"5886--5891\",\n}\n", "homepage": "https://github.com/ymcui/cmrc2018", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "cmrc2018", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1608065, "num_examples": 1002, "dataset_name": "cmrc2018"}, "train": {"name": "train", "num_bytes": 15519498, "num_examples": 10142, "dataset_name": "cmrc2018"}, "validation": {"name": "validation", "num_bytes": 5189046, "num_examples": 3219, "dataset_name": "cmrc2018"}}, "download_checksums": {"https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/": {"num_bytes": 7408757, "checksum": "5497aa2f81908e31d6b0e27d99b1f90ab63a8f58fa92fffe5d17cf62eba0c212"}, "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/": {"num_bytes": 3299139, "checksum": "e9ff74231f05c230c6fa88b84441ee334d97234cbb610991cd94b82db00c7f1f"}, "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/": {"num_bytes": 800221, "checksum": "f3fae95b57da8e03afb2b57467dd221417060ef4d82db13bf22fc88589f3a6f3"}}, "download_size": 11508117, "dataset_size": 22316609, "size_in_bytes": 33824726}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22c8d3196864a039d66d93c314ce4581017ef357dc18682d15c7e270eaa04c95
3
+ size 3944