saattrupdan commited on
Commit
bf50f5a
1 Parent(s): c627c07

feat: Add builder script

Browse files
Files changed (2) hide show
  1. scandiqa.py +0 -1
  2. scandiqa.py +162 -0
scandiqa.py DELETED
@@ -1 +0,0 @@
1
- src/scandi_qa/scandiqa.py
 
 
scandiqa.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and Dan Saattrup Nielsen.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Python build script for the ScandiQA dataset."""
15
+
16
+
17
+ import json
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ from datasets import Version
22
+ from datasets.builder import BuilderConfig, GeneratorBasedBuilder
23
+ from datasets.download import DownloadManager
24
+ from datasets.features import Features, Value
25
+ from datasets.info import DatasetInfo
26
+ from datasets.splits import Split, SplitGenerator
27
+
28
+ _DESCRIPTION = """
29
+ ScandiQA is a dataset of questions and answers in the Danish, Norwegian, and Swedish
30
+ languages. All samples come from the Natural Questions (NQ) dataset, which is a large
31
+ question answering dataset from Google searches. The Scandinavian questions and answers
32
+ come from the MKQA dataset, where 10,000 NQ samples were manually translated into,
33
+ among others, Danish, Norwegian, and Swedish. However, this did not include a
34
+ translated context, hindering the training of extractive question answering models.
35
+
36
+ We merged the NQ dataset with the MKQA dataset, and extracted contexts as either "long
37
+ answers" from the NQ dataset, being the paragraph in which the answer was found, or
38
+ otherwise we extract the context by locating the paragraphs which have the largest
39
+ cosine similarity to the question, and which contains the desired answer.
40
+
41
+ Further, many answers in the MKQA dataset were "language normalised": for instance, all
42
+ date answers were converted to the format "YYYY-MM-DD", meaning that in most cases
43
+ these answers are not appearing in any paragraphs. We solve this by extending the MKQA
44
+ answers with plausible "answer candidates", being slight perturbations or translations
45
+ of the answer.
46
+
47
+ With the contexts extracted, we translated these to Danish, Swedish and Norwegian using
48
+ the DeepL translation service for Danish and Swedish, and the Google Translation
49
+ service for Norwegian. After translation we ensured that the Scandinavian answers do
50
+ indeed occur in the translated contexts.
51
+
52
+ As we are filtering the MKQA samples at both the "merging stage" and the "translation
53
+ stage", we are not able to fully convert the 10,000 samples to the Scandinavian
54
+ languages, and instead get roughly 8,000 samples per language. These have further been
55
+ split into a training, validation and test split, with the former two containing
56
+ roughly 750 samples. The splits have been created in such a way that the proportion of
57
+ samples without an answer is roughly the same in each split.
58
+ """
59
+
60
+ _HOMEPAGE = "https://huggingface.co/alexandrainst/scandiqa"
61
+ _LICENSE = "CC BY 4.0"
62
+ _URLS = {
63
+ "da": [
64
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/da/train.jsonl",
65
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/da/val.jsonl",
66
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/da/test.jsonl",
67
+ ],
68
+ "sv": [
69
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/sv/train.jsonl",
70
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/sv/val.jsonl",
71
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/sv/test.jsonl",
72
+ ],
73
+ "no": [
74
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/no/train.jsonl",
75
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/no/val.jsonl",
76
+ "https://huggingface.co/datasets/saattrupdan/scandiqa/resolve/main/data/no/test.jsonl",
77
+ ],
78
+ }
79
+
80
+ # _CITATION = """
81
+ # @InProceedings{huggingface:dataset,
82
+ # title = {ScandiQA: A Scandinavian Question Answering Dataset},
83
+ # author={Dan Saattrup Nielsen},
84
+ # year={2022}
85
+ # }
86
+ # """
87
+
88
+
89
+ class ScandiQA(GeneratorBasedBuilder):
90
+ """Scandinavian question answering dataset."""
91
+
92
+ VERSION = Version("1.0.0")
93
+
94
+ BUILDER_CONFIGS = [
95
+ BuilderConfig(
96
+ name="da",
97
+ version=VERSION,
98
+ description="The Danish part of the ScandiQA dataset.",
99
+ ),
100
+ BuilderConfig(
101
+ name="sv",
102
+ version=VERSION,
103
+ description="The Swedish part of the ScandiQA dataset.",
104
+ ),
105
+ BuilderConfig(
106
+ name="no",
107
+ version=VERSION,
108
+ description="The Norwegian part of the ScandiQA dataset.",
109
+ ),
110
+ ]
111
+
112
+ def _info(self) -> DatasetInfo:
113
+ features = Features(
114
+ {
115
+ "example_id": Value("int64"),
116
+ "question": Value("string"),
117
+ "answer": Value("string"),
118
+ "answer_start": Value("int64"),
119
+ "context": Value("string"),
120
+ "answer_en": Value("string"),
121
+ "answer_start_en": Value("int64"),
122
+ "context_en": Value("string"),
123
+ "title_en": Value("string"),
124
+ }
125
+ )
126
+ return DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ homepage=_HOMEPAGE,
130
+ license=_LICENSE,
131
+ # citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
135
+ urls = _URLS[self.config.name]
136
+ downloaded_files = dl_manager.download_and_extract(urls)
137
+ return [
138
+ SplitGenerator(
139
+ name=str(Split.TRAIN),
140
+ gen_kwargs=dict(
141
+ filepath=downloaded_files[0],
142
+ split="train",
143
+ ),
144
+ ),
145
+ SplitGenerator(
146
+ name=str(Split.VALIDATION),
147
+ gen_kwargs=dict(
148
+ filepath=downloaded_files[1],
149
+ split="val",
150
+ ),
151
+ ),
152
+ SplitGenerator(
153
+ name=str(Split.TEST),
154
+ gen_kwargs=dict(filepath=downloaded_files[2], split="test"),
155
+ ),
156
+ ]
157
+
158
+ def _generate_examples(self, filepath: str, split):
159
+ with Path(filepath).open(encoding="utf-8") as f:
160
+ for key, row in enumerate(f):
161
+ data = json.loads(row)
162
+ yield key, data