system HF staff commited on
Commit
8f1868b
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"v1.1.0": {"description": "automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish\n", "citation": "@article{2016arXiv160605250R,\n author = {Casimiro Pio , Carrino and Marta R. , Costa-jussa and Jose A. R. , Fonollosa},\n title = \"{Automatic Spanish Translation of the SQuAD Dataset for Multilingual\nQuestion Answering}\",\n journal = {arXiv e-prints},\n year = 2019,\n eid = {arXiv:1912.05200v1},\n pages = {arXiv:1912.05200v1},\narchivePrefix = {arXiv},\n eprint = {1912.05200v2},\n}\n", "homepage": "https://github.com/ccasimiro88/TranslateAlignRetrieve", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "squad_es", "config_name": "v1.1.0", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 83680438, "num_examples": 87595, "dataset_name": "squad_es"}, "validation": {"name": "validation", "num_bytes": 10955800, "num_examples": 10570, "dataset_name": "squad_es"}}, "download_checksums": {"https://raw.githubusercontent.com/ccasimiro88/TranslateAlignRetrieve/master/SQuAD-es-v1.1/train-v1.1-es.json": {"num_bytes": 33946311, "checksum": "196cfa14b6ba7d903f02e6edc20485bb959b72c965b6a2d6883b8ffd74dc2c3b"}, "https://raw.githubusercontent.com/ccasimiro88/TranslateAlignRetrieve/master/SQuAD-es-v1.1/dev-v1.1-es.json": {"num_bytes": 5345051, "checksum": "969c846df740ee2725ac23b4d2882a58ef19a4c696767b306227c243ed99f461"}}, "download_size": 39291362, "dataset_size": 94636238, "size_in_bytes": 133927600}}
dummy/v1.1.0/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:401abe1449adda90860469f4199488871be3dcfa4f12766c77717a132c0d65ae
3
+ size 1888
dummy/v2.0.0/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9efb96b7b59af7ef7a0b8f1302852c0efa4c6e2068ac17679ae301afaf4ce5
3
+ size 1888
squad_es.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(squad_es): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(squad_es): BibTeX citation
12
+ _CITATION = """\
13
+ @article{2016arXiv160605250R,
14
+ author = {Casimiro Pio , Carrino and Marta R. , Costa-jussa and Jose A. R. , Fonollosa},
15
+ title = "{Automatic Spanish Translation of the SQuAD Dataset for Multilingual
16
+ Question Answering}",
17
+ journal = {arXiv e-prints},
18
+ year = 2019,
19
+ eid = {arXiv:1912.05200v1},
20
+ pages = {arXiv:1912.05200v1},
21
+ archivePrefix = {arXiv},
22
+ eprint = {1912.05200v2},
23
+ }
24
+ """
25
+
26
+ # TODO(squad_es_v1):
27
+ _DESCRIPTION = """\
28
+ automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish
29
+ """
30
+
31
+ _URL = "https://raw.githubusercontent.com/ccasimiro88/TranslateAlignRetrieve/master/"
32
+
33
+
34
+ class SquadEsConfig(datasets.BuilderConfig):
35
+ """BuilderConfig for SQUADEsV2."""
36
+
37
+ def __init__(self, **kwargs):
38
+ """BuilderConfig for SQUADEsV2.
39
+
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(SquadEsConfig, self).__init__(**kwargs)
44
+
45
+
46
+ class SquadEs(datasets.GeneratorBasedBuilder):
47
+ """TODO(squad_es): Short description of my dataset."""
48
+
49
+ # TODO(squad_es): Set up version.
50
+ VERSION = datasets.Version("0.1.0")
51
+
52
+ BUILDER_CONFIGS = [
53
+ SquadEsConfig(
54
+ name="v1.1.0",
55
+ version=datasets.Version("1.1.0", ""),
56
+ description="Plain text Spanish squad version 1",
57
+ ),
58
+ SquadEsConfig(
59
+ name="v2.0.0",
60
+ version=datasets.Version("2.0.0", ""),
61
+ description="Plain text Spanish squad version 2",
62
+ ),
63
+ ]
64
+
65
+ def _info(self):
66
+ # TODO(squad_es): Specifies the datasets.DatasetInfo object
67
+ return datasets.DatasetInfo(
68
+ # This is the description that will appear on the datasets page.
69
+ description=_DESCRIPTION,
70
+ # datasets.features.FeatureConnectors
71
+ features=datasets.Features(
72
+ {
73
+ # These are the features of your dataset like images, labels ...
74
+ "id": datasets.Value("string"),
75
+ "title": datasets.Value("string"),
76
+ "context": datasets.Value("string"),
77
+ "question": datasets.Value("string"),
78
+ "answers": datasets.features.Sequence(
79
+ {
80
+ "text": datasets.Value("string"),
81
+ "answer_start": datasets.Value("int32"),
82
+ }
83
+ ),
84
+ }
85
+ ),
86
+ # If there's a common (input, target) tuple from the features,
87
+ # specify them here. They'll be used if as_supervised=True in
88
+ # builder.as_dataset.
89
+ supervised_keys=None,
90
+ # Homepage of the dataset for documentation
91
+ homepage="https://github.com/ccasimiro88/TranslateAlignRetrieve",
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager):
96
+ """Returns SplitGenerators."""
97
+ # TODO(squad_es): Downloads the data and defines the splits
98
+ # dl_manager is a datasets.download.DownloadManager that can be used to
99
+
100
+ # download and extract URLs
101
+ v1_urls = {
102
+ "train": os.path.join(_URL, "SQuAD-es-v1.1/train-v1.1-es.json"),
103
+ "dev": os.path.join(_URL, "SQuAD-es-v1.1/dev-v1.1-es.json"),
104
+ }
105
+ v2_urls = {
106
+ "train": os.path.join(_URL, "SQuAD-es-v2.0/train-v2.0-es.json"),
107
+ "dev": os.path.join(_URL, "SQuAD-es-v2.0/dev-v2.0-es.json"),
108
+ }
109
+ if self.config.name == "v1.1.0":
110
+ dl_dir = dl_manager.download_and_extract(v1_urls)
111
+ elif self.config.name == "v2.0.0":
112
+ dl_dir = dl_manager.download_and_extract(v2_urls)
113
+ else:
114
+ raise Exception("version does not match any existing one")
115
+ return [
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TRAIN,
118
+ # These kwargs will be passed to _generate_examples
119
+ gen_kwargs={"filepath": dl_dir["train"]},
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={"filepath": dl_dir["dev"]},
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath):
129
+ """Yields examples."""
130
+ # TODO(squad_es): Yields (key, example) tuples from the dataset
131
+ with open(filepath, encoding="utf-8") as f:
132
+ data = json.load(f)
133
+ for example in data["data"]:
134
+ title = example.get("title", "").strip()
135
+ for paragraph in example["paragraphs"]:
136
+ context = paragraph["context"].strip()
137
+ for qa in paragraph["qas"]:
138
+ question = qa["question"].strip()
139
+ id_ = qa["id"]
140
+
141
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
142
+ answers = [answer["text"].strip() for answer in qa["answers"]]
143
+
144
+ yield id_, {
145
+ "title": title,
146
+ "context": context,
147
+ "question": question,
148
+ "id": id_,
149
+ "answers": {
150
+ "answer_start": answer_starts,
151
+ "text": answers,
152
+ },
153
+ }