Datasets:

Multilinguality:
multilingual
Size Categories:
unknown
Language Creators:
other
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
maximedb commited on
Commit
2a822d8
1 Parent(s): 93226ab

Update from mdebruyn

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. mqa.py +162 -0
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ hello
mqa.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import os
18
+ import gzip
19
+ import json
20
+
21
+ import datasets
22
+
23
+
24
+ _DESCRIPTION = """ """
25
+ _HOMEPAGE_URL = ""
26
+ _CITATION = """"""
27
+
28
+ _VERSION = "0.0"
29
+ _BASE_NAME = ""
30
+ _BASE_URL = "https://www.clips.uantwerpen.be/~mdebruyn/mqa/data.{}.{}.json.gz"
31
+
32
+
33
+ _LANGUAGES = [
34
+ "en", "de", "es", "fr",
35
+ "ru", "ja", "it", "zh", "pt",
36
+ "nl", "tr", "pl", "vi", "ar",
37
+ "id", "uk", "ro", "no", "th",
38
+ "sv", "el", "fi", "he", "da",
39
+ "cs", "ko", "fa", "hi", "hu",
40
+ "sk", "lt", "et", "hr", "is",
41
+ "lv", "ms", "bg", "sr", "ca"
42
+ ]
43
+ _SCOPES = ["faq", "cqa"]
44
+ _LEVELS = ["domain", "page", "question"]
45
+
46
+
47
+ class MQAConfig(datasets.BuilderConfig):
48
+ def __init__(self, *args, language="en", scope="all", level="question", **kwargs):
49
+ super().__init__(
50
+ *args,
51
+ name=f"{language}-{scope}-{level}",
52
+ **kwargs,
53
+ )
54
+ self.language = language
55
+ self.scope = scope
56
+ self.level = level
57
+
58
+
59
+ class MQA(datasets.GeneratorBasedBuilder):
60
+ BUILDER_CONFIGS = []
61
+ for language in _LANGUAGES:
62
+ for scope in _SCOPES:
63
+ for level in _LEVELS:
64
+ BUILDER_CONFIGS.append(MQAConfig(language=language, scope=scope, level=level))
65
+ BUILDER_CONFIG_CLASS = MQAConfig
66
+
67
+ def _info(self):
68
+ question = {
69
+ "id": datasets.Value("string"),
70
+ "text": datasets.Value("string"),
71
+ "name": datasets.Value("string"),
72
+ "answers": [{
73
+ "text": datasets.Value("string"),
74
+ "name": datasets.Value("string"),
75
+ "is_accepted": datasets.Value("bool"),
76
+ }]
77
+ }
78
+ page = {
79
+ "id": datasets.Value("string"),
80
+ "bucket": datasets.Value("string"),
81
+ "domain": datasets.Value("string"),
82
+ "questions": [question]
83
+ }
84
+ domain = {
85
+ "domain": datasets.Value("string"),
86
+ "pages": [page]
87
+ }
88
+ if self.config.level == "question":
89
+ features = question
90
+ elif self.config.level == "page":
91
+ features = page
92
+ elif self.config.level == "domain":
93
+ features = domain
94
+ else:
95
+ raise NotImplementedError()
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=datasets.Features(features),
99
+ supervised_keys=None,
100
+ homepage=_HOMEPAGE_URL,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ filenames = []
106
+ languages = _LANGUAGES if self.config.language == "all" else self.config.language
107
+ scopes = _SCOPES if self.config.scope == "all" else self.config.scope
108
+ for language in languages:
109
+ for scope in scopes:
110
+ path = dl_manager.download_and_extract(_BASE_URL.format(language, scope))
111
+ filenames.append(path)
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={"filenames": filenames},
116
+ )
117
+ ]
118
+
119
+ def _generate_examples(self, filenames):
120
+
121
+ def default(e, key, default_value=""):
122
+ if e[key] is None:
123
+ return default_value
124
+ return e[key]
125
+
126
+ for filename in filenames:
127
+ with open(filename, "r") as f:
128
+ domain = []
129
+ previous_domain = ''
130
+ for line in f:
131
+ page = json.loads(line)
132
+ questions = [{
133
+ "text": default(question, "text"),
134
+ "name": default(question, "name"),
135
+ "id": question["hash"],
136
+ "answers": [{
137
+ "text": default(answer, "text"),
138
+ "name": default(answer, "name"),
139
+ "is_accepted": answer["is_accepted"]
140
+ } for answer in question["answers"]]
141
+ } for question in page["questions"]]
142
+ page = {
143
+ "id": page["page_hash"],
144
+ "domain": page["domain"],
145
+ "bucket": page["bucket"],
146
+ "questions": questions
147
+ }
148
+ if self.config.level == "question":
149
+ for question in questions:
150
+ question_hash = question.pop("hash")
151
+ yield question_hash, question
152
+ if self.config.level == "page":
153
+ yield page["page_hash"], page
154
+ if self.config.level == "domain":
155
+ if page["domain"] == previous_domain or previous_domain == "":
156
+ domain.append(page)
157
+ else:
158
+ yield previous_domain, {
159
+ "domain": previous_domain,
160
+ "pages": domain
161
+ }
162
+ previous_domain = page["domain"]