Datasets:

Sub-tasks:
extractive-qa
Multilinguality:
multilingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
expert-generated
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
4b06ecd
1 Parent(s): 3e78e45

Delete loading script

Browse files
Files changed (1) hide show
  1. xquad_r.py +0 -141
xquad_r.py DELETED
@@ -1,141 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: Add a description here."""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @article{roy2020lareqa,
27
- title={LAReQA: Language-agnostic answer retrieval from a multilingual pool},
28
- author={Roy, Uma and Constant, Noah and Al-Rfou, Rami and Barua, Aditya and Phillips, Aaron and Yang, Yinfei},
29
- journal={arXiv preprint arXiv:2004.05484},
30
- year={2020}
31
- }
32
- """
33
-
34
- # TODO: Add description of the dataset here
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- XQuAD-R is a retrieval version of the XQuAD dataset (a cross-lingual extractive QA dataset). Like XQuAD, XQUAD-R is an 11-way parallel dataset, where each question appears in 11 different languages and has 11 parallel correct answers across the languages.
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = "https://github.com/google-research-datasets/lareqa"
42
-
43
- # TODO: Add link to the official dataset URLs here
44
- # The HuggingFace dataset library don't host the datasets but only point to the original files
45
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
- _URL = "https://github.com/google-research-datasets/lareqa/raw/master/xquad-r/"
47
- _LANG = ["ar", "de", "zh", "vi", "en", "es", "hi", "el", "th", "tr", "ru"]
48
-
49
-
50
- class XquadRConfig(datasets.BuilderConfig):
51
-
52
- """BuilderConfig for XquadR"""
53
-
54
- def __init__(self, lang, **kwargs):
55
- """
56
- Args:
57
- lang: string, language for the input text
58
- **kwargs: keyword arguments forwarded to super.
59
- """
60
- super(XquadRConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
61
- self.lang = lang
62
-
63
-
64
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
65
- class XquadR(datasets.GeneratorBasedBuilder):
66
- """TODO(xquad-r): Short description of my dataset."""
67
-
68
- # TODO(xquad-r): Set up version.
69
- VERSION = datasets.Version("1.1.0")
70
- BUILDER_CONFIGS = [XquadRConfig(name=f"{lang}", description=_DESCRIPTION, lang=lang) for lang in _LANG]
71
-
72
- def _info(self):
73
- # TODO(xquad-r): Specifies the datasets.DatasetInfo object
74
- return datasets.DatasetInfo(
75
- # This is the description that will appear on the datasets page.
76
- description=_DESCRIPTION,
77
- # datasets.features.FeatureConnectors
78
- features=datasets.Features(
79
- {
80
- "id": datasets.Value("string"),
81
- "context": datasets.Value("string"),
82
- "question": datasets.Value("string"),
83
- "answers": datasets.features.Sequence(
84
- {
85
- "text": datasets.Value("string"),
86
- "answer_start": datasets.Value("int32"),
87
- }
88
- ),
89
- }
90
- ),
91
- # If there's a common (input, target) tuple from the features,
92
- # specify them here. They'll be used if as_supervised=True in
93
- # builder.as_dataset.
94
- supervised_keys=None,
95
- # Homepage of the dataset for documentation
96
- homepage=_HOMEPAGE,
97
- citation=_CITATION,
98
- )
99
-
100
- def _split_generators(self, dl_manager):
101
- """Returns SplitGenerators."""
102
- # TODO(xquad-r): Downloads the data and defines the splits
103
- # dl_manager is a datasets.download.DownloadManager that can be used to
104
- # download and extract URLs
105
- urls_to_download = {lang: _URL + f"{lang}.json" for lang in _LANG}
106
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
107
-
108
- return [
109
- datasets.SplitGenerator(
110
- name=datasets.Split.VALIDATION,
111
- # These kwargs will be passed to _generate_examples
112
- gen_kwargs={"filepath": downloaded_files[self.config.lang]},
113
- ),
114
- ]
115
-
116
- def _generate_examples(self, filepath):
117
- """Yields examples."""
118
- # TODO(xquad-r): Yields (key, example) tuples from the dataset
119
- with open(filepath, encoding="utf-8") as f:
120
- data = json.load(f)
121
- for article in data["data"]:
122
- for paragraph in article["paragraphs"]:
123
- context = paragraph["context"].strip()
124
- for qa in paragraph["qas"]:
125
- question = qa["question"].strip()
126
- id_ = qa["id"]
127
-
128
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
129
- answers = [answer["text"].strip() for answer in qa["answers"]]
130
-
131
- # Features currently used are "context", "question", and "answers".
132
- # Others are extracted here for the ease of future expansions.
133
- yield id_, {
134
- "context": context,
135
- "question": question,
136
- "id": id_,
137
- "answers": {
138
- "answer_start": answer_starts,
139
- "text": answers,
140
- },
141
- }