Evelyn18 commited on
Commit
a1d0f7a
1 Parent(s): c69b2b6

Upload becasv2.py

Browse files
Files changed (1) hide show
  1. becasv2.py +158 -0
becasv2.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets.utils import version
2
+ """TODO(squad_es): Add a description here."""
3
+
4
+
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ # TODO(squad_es): BibTeX citation
11
+ _CITATION = """\
12
+ @article{2016arXiv160605250R,
13
+ author = {Casimiro Pio , Carrino and Marta R. , Costa-jussa and Jose A. R. , Fonollosa},
14
+ title = "{Automatic Spanish Translation of the SQuAD Dataset for Multilingual
15
+ Question Answering}",
16
+ journal = {arXiv e-prints},
17
+ year = 2019,
18
+ eid = {arXiv:1912.05200v1},
19
+ pages = {arXiv:1912.05200v1},
20
+ archivePrefix = {arXiv},
21
+ eprint = {1912.05200v2},
22
+ }
23
+ """
24
+
25
+ # TODO(squad_es_v1):
26
+ _DESCRIPTION = """\
27
+ automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish
28
+ """
29
+
30
+ _URL = "https://raw.githubusercontent.com/EvelynQuevedo/becas/main/"
31
+ print(_URL)
32
+ _URLS_V1 = {
33
+ "train": _URL + "datos/data.json"
34
+
35
+ #"dev": _URL + "SQuAD-es-v1.1/dev-v1.1-es.json",
36
+ }
37
+ print(_URLS_V1)
38
+ #_URLS_V2 = {
39
+ # "train": _URL + "SQuAD-es-v2.0/train-v2.0-es.json",
40
+ #"dev": _URL + "SQuAD-es-v2.0/dev-v2.0-es.json",
41
+ #}
42
+
43
+
44
+ class SquadEsConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for SQUADEsV2."""
46
+
47
+ def __init__(self, **kwargs):
48
+ """BuilderConfig for SQUADEsV2.
49
+ Args:
50
+ **kwargs: keyword arguments forwarded to super.
51
+ """
52
+ super(SquadEsConfig, self).__init__(**kwargs)
53
+
54
+
55
+ class SquadEs(datasets.GeneratorBasedBuilder):
56
+ """TODO(squad_es): Short description of my dataset."""
57
+
58
+ # TODO(squad_es): Set up version.
59
+ VERSION = datasets.Version("0.1.0")
60
+ BUILDER_CONFIGS = [
61
+ SquadEsConfig(
62
+ name="v1.1.0",
63
+ version=datasets.Version("1.1.0", ""),
64
+ description="Plain text Spanish squad version 1",
65
+ ),
66
+ #SquadEsConfig(
67
+ # name="v2.0.0",
68
+ # version=datasets.Version("2.0.0", ""),
69
+ # description="Plain text Spanish squad version 2",
70
+ #),
71
+ ]
72
+
73
+ def _info(self):
74
+ # TODO(squad_es): Specifies the datasets.DatasetInfo object
75
+ return datasets.DatasetInfo(
76
+ # This is the description that will appear on the datasets page.
77
+ description=_DESCRIPTION,
78
+ # datasets.features.FeatureConnectors
79
+ features=datasets.Features(
80
+ {
81
+
82
+ # These are the features of your dataset like images, labels ...
83
+ "id": datasets.Value("string"),
84
+ "title": datasets.Value("string"),
85
+ "context": datasets.Value("string"),
86
+ "question": datasets.Value("string"),
87
+ "answers": datasets.features.Sequence(
88
+ {
89
+ "text": datasets.Value("string"),
90
+ "answer_start": datasets.Value("int32"),
91
+ }
92
+ ),
93
+ }
94
+ ),
95
+ # If there's a common (input, target) tuple from the features,
96
+ # specify them here. They'll be used if as_supervised=True in
97
+ # builder.as_dataset.
98
+ supervised_keys=None,
99
+ # Homepage of the dataset for documentation
100
+ homepage="https://github.com/EvelynQuevedo/becas",
101
+
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ """Returns SplitGenerators."""
107
+ # TODO(squad_es): Downloads the data and defines the splits
108
+ # dl_manager is a datasets.download.DownloadManager that can be used to
109
+
110
+ # download and extract URLs
111
+ if self.config.name == "v1.1.0":
112
+ dl_dir = dl_manager.download_and_extract(_URLS_V1)
113
+ print(dl_dir)
114
+ # elif self.config.name == "v2.0.0":
115
+ # dl_dir = dl_manager.download_and_extract(_URLS_V2)
116
+ else:
117
+ raise Exception("version does not match any existing one")
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={"filepath": dl_dir["train"]},
123
+ ),
124
+ # datasets.SplitGenerator(
125
+ # name=datasets.Split.VALIDATION,
126
+ # These kwargs will be passed to _generate_examples
127
+ # gen_kwargs={"filepath": dl_dir["dev"]},
128
+ #),
129
+ ]
130
+
131
+ def _generate_examples(self, filepath):
132
+ """Yields examples."""
133
+ # TODO(squad_es): Yields (key, example) tuples from the dataset
134
+ with open(filepath, encoding="utf-8") as f:
135
+ data = json.load(f)
136
+ for example in data["data"]:
137
+ title = example.get("title", "").strip()
138
+ for paragraph in example["paragraphs"]:
139
+ context = paragraph["context"].strip()
140
+ for qa in paragraph["qas"]:
141
+ question = qa["question"].strip()
142
+ id_ = qa["id"]
143
+
144
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
145
+ answers = [answer["text"].strip() for answer in qa["answers"]]
146
+
147
+ yield id_, {
148
+ "title": title,
149
+ "context": context,
150
+ "question": question,
151
+ "id": id_,
152
+ "answers": {
153
+ "answer_start": answer_starts,
154
+ "text": answers,
155
+
156
+ },
157
+
158
+ }