piotr-rybak commited on
Commit
cffebbf
1 Parent(s): 72e2bed

add passages

Browse files
Files changed (2) hide show
  1. data/passages.jsonl +3 -0
  2. polqa.py +165 -0
data/passages.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:902fef5a8710d41a5603e8b067baaef20eb3b2b9181e639e60834b6ca2cd0a66
3
+ size 3296480688
polqa.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import csv
17
+ import json
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @misc{rybak2022improving,
24
+ title={Improving Question Answering Performance through Manual Annotation: Costs, Benefits and Strategies},
25
+ author={Piotr Rybak and Piotr Przybyła and Maciej Ogrodniczuk},
26
+ year={2022},
27
+ eprint={2212.08897},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ PolQA is the first Polish dataset for OpenQA. It consists of 7,000 questions, 87,525 manually labeled evidence passages, and a corpus of over 7 million candidate passages.
35
+ """
36
+
37
+ _HOMEPAGE = ""
38
+
39
+ _LICENSE = ""
40
+
41
+ _FEATURES_PAIRS = datasets.Features(
42
+ {
43
+ "question_id": datasets.Value("int32"),
44
+ "passage_title": datasets.Value("string"),
45
+ "passage_text": datasets.Value("string"),
46
+ "passage_wiki": datasets.Value("string"),
47
+ "passage_id": datasets.Value("string"),
48
+ "duplicate": datasets.Value("bool"),
49
+ "question": datasets.Value("string"),
50
+ "relevant": datasets.Value("bool"),
51
+ "annotated_by": datasets.Value("string"),
52
+ "answers": datasets.Value("string"),
53
+ "question_formulation": datasets.Value("string"),
54
+ "question_type": datasets.Value("string"),
55
+ "entity_type": datasets.Value("string"),
56
+ "entity_subtype": datasets.Value("string"),
57
+ "split": datasets.Value("string"),
58
+ "passage_source": datasets.Value("string"),
59
+ }
60
+ )
61
+
62
+ _FEATURES_PASSAGES = datasets.Features(
63
+ {
64
+ "id": datasets.Value("string"),
65
+ "title": datasets.Value("string"),
66
+ "text": datasets.Value("string"),
67
+ }
68
+ )
69
+
70
+ _URLS = {
71
+ "pairs": {
72
+ "train": ["data/train.csv"],
73
+ "validation": ["data/valid.csv"],
74
+ "test": ["data/test.csv"],
75
+ },
76
+ "passages": {
77
+ "train": ["data/passages.jsonl"],
78
+ },
79
+ }
80
+
81
+
82
+ class PolQA(datasets.GeneratorBasedBuilder):
83
+ """PolQA is the first Polish dataset for OpenQA. It consists of manually labeled QA pairs and a corpus of Wikipedia passages."""
84
+
85
+ BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.0.0")), _URLS.keys()))
86
+ DEFAULT_CONFIG_NAME = "pairs"
87
+
88
+ def _info(self):
89
+ if self.config.name == "pairs":
90
+ features = _FEATURES_PAIRS
91
+ else:
92
+ features = _FEATURES_PASSAGES
93
+
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=features,
97
+ homepage=_HOMEPAGE,
98
+ license=_LICENSE,
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+ urls = _URLS[self.config.name]
104
+ data_dir = dl_manager.download_and_extract(urls)
105
+ if self.config.name == "pairs":
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "filepaths": data_dir["train"],
111
+ "split": "train",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ gen_kwargs={
117
+ "filepaths": data_dir["validation"],
118
+ "split": "validation",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ gen_kwargs={
124
+ "filepaths": data_dir["test"],
125
+ "split": "test",
126
+ },
127
+ ),
128
+ ]
129
+ else:
130
+ return [
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TRAIN,
133
+ gen_kwargs={
134
+ "filepaths": data_dir["train"],
135
+ "split": "train",
136
+ },
137
+ ),
138
+ ]
139
+
140
+ @staticmethod
141
+ def _parse_bool(text):
142
+ if text == 'True':
143
+ return True
144
+ elif text == 'False':
145
+ return False
146
+ else:
147
+ raise ValueError
148
+
149
+ def _generate_examples(self, filepaths, split):
150
+ if self.config.name == "pairs":
151
+ boolean_features = [name for name, val in _FEATURES_PAIRS.items() if val.dtype == "bool"]
152
+
153
+ for filepath in filepaths:
154
+ with open(filepath, encoding="utf-8") as f:
155
+ data = csv.DictReader(f)
156
+ for i, row in enumerate(data):
157
+ for boolean_feature in boolean_features:
158
+ row[boolean_feature] = self._parse_bool(row[boolean_feature])
159
+ yield i, row
160
+ else:
161
+ for filepath in filepaths:
162
+ with open(filepath, encoding="utf-8") as f:
163
+ for i, row in enumerate(f):
164
+ parsed_row = json.loads(row)
165
+ yield i, parsed_row