glecorve commited on
Commit
e9a7ca0
1 Parent(s): 3b4fca5

Removed script + moved original JSON files

Browse files
challenge.json → json/challenge.json RENAMED
File without changes
dev.json → json/dev.json RENAMED
File without changes
test.json → json/test.json RENAMED
File without changes
train.json → json/train.json RENAMED
File without changes
webnlgqa.py DELETED
@@ -1,199 +0,0 @@
1
- import os
2
- import zipfile
3
- import json
4
- import base64
5
- import sys
6
- import traceback
7
-
8
- import datasets
9
-
10
- _CITATION = """\
11
- @inproceedings{lecorve2022sparql2text,
12
- title={SPARQL-to-Text Question Generation for Knowledge-Based Conversational Applications},
13
- author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
14
- journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
15
- year={2022}
16
- }
17
- """
18
-
19
- _HOMEPAGE = ""
20
-
21
- _URLS = {
22
- "train": "train.json",
23
- "dev": "dev.json",
24
- "test": "test.json",
25
- "challenge": "challenge.json"
26
- }
27
-
28
- _DESCRIPTION = """\
29
- Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
30
- """
31
-
32
- class WebNLGQA(datasets.GeneratorBasedBuilder):
33
- """
34
- WebNLG-QA: Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
35
- """
36
-
37
- VERSION = datasets.Version("1.0.0")
38
-
39
- def _info(self):
40
- return datasets.DatasetInfo(
41
- # This is the description that will appear on the datasets page.
42
- description=_DESCRIPTION,
43
- # datasets.features.FeatureConnectors
44
- features=datasets.Features(
45
- {
46
- "category": datasets.Value("string"),
47
- "size": datasets.Value("int32"),
48
- "id": datasets.Value("string"),
49
- "eid": datasets.Value("string"),
50
- "original_triple_sets": [
51
- {"subject": datasets.Value("string"),
52
- "property": datasets.Value("string"),
53
- "object": datasets.Value("string")}
54
- ],
55
- "modified_triple_sets": [
56
- {"subject": datasets.Value("string"),
57
- "property": datasets.Value("string"),
58
- "object": datasets.Value("string")}
59
- ],
60
- "shape": datasets.Value("string"),
61
- "shape_type": datasets.Value("string"),
62
- "lex": datasets.Sequence(
63
- {
64
- "comment": datasets.Value("string"),
65
- "lid": datasets.Value("string"),
66
- "text": datasets.Value("string"),
67
- "lang": datasets.Value("string"),
68
- }
69
- ),
70
- "test_category": datasets.Value("string"),
71
- "dbpedia_links": datasets.Sequence(datasets.Value("string")),
72
- "links": datasets.Sequence(datasets.Value("string")),
73
- "graph": [
74
- [datasets.Value("string")]
75
- ],
76
- "main_entity": datasets.Value("string"),
77
- "mappings": [
78
- {
79
- "modified": datasets.Value("string"),
80
- "readable": datasets.Value("string"),
81
- "graph": datasets.Value("string")
82
- }
83
- ],
84
- "dialogue": [
85
- {
86
- "question": [ {
87
- "source": datasets.Value("string"),
88
- "text": datasets.Value("string")
89
- }],
90
- "graph_query": datasets.Value("string"),
91
- "readable_query": datasets.Value("string"),
92
- "graph_answer": [
93
- datasets.Value("string")
94
- ],
95
- "readable_answer": [
96
- datasets.Value("string")
97
- ],
98
- "type": [ datasets.Value("string") ]
99
- }
100
- ]
101
- }
102
- ),
103
- # If there's a common (input, target) tuple from the features,
104
- # specify them here. They'll be used if as_supervised=True in
105
- # builder.as_dataset
106
- supervised_keys=None,
107
- # Homepage of the dataset for documentation
108
- homepage=_HOMEPAGE,
109
- citation=_CITATION,
110
- )
111
-
112
- def _split_generators(self, dl_manager):
113
- """Returns SplitGenerators."""
114
- # Downloads the data and defines the splits
115
- # dl_manager is a datasets.download.DownloadManager that can be used to
116
- # download and extract URLs
117
- paths = dl_manager.download_and_extract(_URLS)
118
- return [
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TRAIN,
121
- gen_kwargs={"filepath": paths['train'],
122
- "split": "train"}
123
- ),
124
- datasets.SplitGenerator(
125
- name=datasets.Split.VALIDATION,
126
- gen_kwargs={"filepath": paths['dev'],
127
- "split": "dev"}
128
- ),
129
- datasets.SplitGenerator(
130
- name=datasets.Split.TEST,
131
- gen_kwargs={"filepath": paths['test'],
132
- "split": "test"}
133
- ),
134
- datasets.SplitGenerator(
135
- name="challenge",
136
- gen_kwargs={"filepath": paths['challenge'],
137
- "split": "challenge"}
138
- )
139
- ]
140
-
141
-
142
- def _generate_examples(self, filepath, split):
143
- """Yields examples."""
144
-
145
- def transform_sample(original_sample):
146
- transformed_sample = {
147
- "category": "",
148
- "size": -1,
149
- "id": "",
150
- "eid": "",
151
- "original_triple_sets": [],
152
- "modified_triple_sets": [],
153
- "shape": "",
154
- "shape_type": "",
155
- "lex": [],
156
- "test_category": "",
157
- "dbpedia_links": [],
158
- "links": [],
159
- "graph": [],
160
- "main_entity": "",
161
- "mappings": [],
162
- "dialogue": []
163
- }
164
-
165
- for (old_key, new_key) in [("modifiedtripleset", "modified_triple_sets"), ("originaltriplesets", "original_triple_sets"), ("dbpedialinks", "dbpedia_links"), ("lexicalisations", "lex"), ("xml_id", "eid")]:
166
- original_sample[new_key] = original_sample[old_key]
167
- del original_sample[old_key]
168
-
169
- original_sample["original_triple_sets"] = original_sample["original_triple_sets"]["originaltripleset"][0]
170
-
171
- for l in original_sample["lex"]:
172
- l["lid"] = l["xml_id"]
173
- del l["xml_id"]
174
- l["text"] = l["lex"]
175
- del l["lex"]
176
-
177
- for turn in original_sample["dialogue"]:
178
- if "question" in turn:
179
- old_format = turn["question"]
180
- new_format = []
181
- for source, text in old_format.items():
182
- new_format.append({"source": source, "text": text})
183
- turn["question"] = new_format
184
-
185
-
186
- for k in transformed_sample:
187
- if k in original_sample:
188
- transformed_sample[k] = original_sample[k]
189
- # transformed_sample.update(original_sample)
190
-
191
- return transformed_sample
192
-
193
- # Yields (key, example) tuples from the dataset
194
- with open(filepath,'r') as f:
195
- data = json.load(f)
196
- key = 0
197
- for it in data:
198
- yield key, transform_sample(it)
199
- key += 1