glecorve commited on
Commit
a9a54e8
1 Parent(s): 8e682c8

Inflate dataset

Browse files
Files changed (7) hide show
  1. .gitattributes +4 -0
  2. README.md +9 -0
  3. challenge.json +3 -0
  4. dev.json +3 -0
  5. test.json +3 -0
  6. train.json +3 -0
  7. webnlgqa.py +199 -0
.gitattributes CHANGED
@@ -53,3 +53,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ train.json filter=lfs diff=lfs merge=lfs -text
57
+ dev.json filter=lfs diff=lfs merge=lfs -text
58
+ test.json filter=lfs diff=lfs merge=lfs -text
59
+ challenge.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,12 @@
1
  ---
2
  license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: cc-by-sa-4.0
3
+ task_categories:
4
+ - conversational
5
+ - question-answering
6
+ - text-generation
7
+ tags:
8
+ - qa
9
+ - knowledge-graph
10
+ language:
11
+ - en
12
  ---
challenge.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd2f4fc1e61d2e9ef9a059bb15a4cc6ef1c1c68534ee80474be53a6cf6026f4b
3
+ size 968686
dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0518c960195c31ad995fb0b700cb0b84ed4f6717365a1416c7c1ee143928300f
3
+ size 10345623
test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1390503972269520e74edc2f3489114c488b05ab3310be36cc2e3f0ead92ccd4
3
+ size 11856922
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f77a46ef327cf8f0a56cefd1bbd170bc64551c7fbba10db9e289e26f1f80acd
3
+ size 82137230
webnlgqa.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import json
4
+ import base64
5
+ import sys
6
+ import traceback
7
+
8
+ import datasets
9
+
10
+ _CITATION = """\
11
+ @inproceedings{lecorve2022sparql2text,
12
+ title={SPARQL-to-Text Question Generation for Knowledge-Based Conversational Applications},
13
+ author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
14
+ journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
15
+ year={2022}
16
+ }
17
+ """
18
+
19
+ _HOMEPAGE = ""
20
+
21
+ _URLS = {
22
+ "train": "train.json",
23
+ "dev": "dev.json",
24
+ "test": "test.json",
25
+ "challenge": "challenge.json"
26
+ }
27
+
28
+ _DESCRIPTION = """\
29
+ Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
30
+ """
31
+
32
+ class WebNLGQA(datasets.GeneratorBasedBuilder):
33
+ """
34
+ WebNLG-QA: Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
35
+ """
36
+
37
+ VERSION = datasets.Version("1.0.0")
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ # This is the description that will appear on the datasets page.
42
+ description=_DESCRIPTION,
43
+ # datasets.features.FeatureConnectors
44
+ features=datasets.Features(
45
+ {
46
+ "category": datasets.Value("string"),
47
+ "size": datasets.Value("int32"),
48
+ "id": datasets.Value("string"),
49
+ "eid": datasets.Value("string"),
50
+ "original_triple_sets": [
51
+ {"subject": datasets.Value("string"),
52
+ "property": datasets.Value("string"),
53
+ "object": datasets.Value("string")}
54
+ ],
55
+ "modified_triple_sets": [
56
+ {"subject": datasets.Value("string"),
57
+ "property": datasets.Value("string"),
58
+ "object": datasets.Value("string")}
59
+ ],
60
+ "shape": datasets.Value("string"),
61
+ "shape_type": datasets.Value("string"),
62
+ "lex": datasets.Sequence(
63
+ {
64
+ "comment": datasets.Value("string"),
65
+ "lid": datasets.Value("string"),
66
+ "text": datasets.Value("string"),
67
+ "lang": datasets.Value("string"),
68
+ }
69
+ ),
70
+ "test_category": datasets.Value("string"),
71
+ "dbpedia_links": datasets.Sequence(datasets.Value("string")),
72
+ "links": datasets.Sequence(datasets.Value("string")),
73
+ "graph": [
74
+ [datasets.Value("string")]
75
+ ],
76
+ "main_entity": datasets.Value("string"),
77
+ "mappings": [
78
+ {
79
+ "modified": datasets.Value("string"),
80
+ "readable": datasets.Value("string"),
81
+ "graph": datasets.Value("string")
82
+ }
83
+ ],
84
+ "dialogue": [
85
+ {
86
+ "question": [ {
87
+ "source": datasets.Value("string"),
88
+ "text": datasets.Value("string")
89
+ }],
90
+ "graph_query": datasets.Value("string"),
91
+ "readable_query": datasets.Value("string"),
92
+ "graph_answer": [
93
+ datasets.Value("string")
94
+ ],
95
+ "readable_answer": [
96
+ datasets.Value("string")
97
+ ],
98
+ "type": [ datasets.Value("string") ]
99
+ }
100
+ ]
101
+ }
102
+ ),
103
+ # If there's a common (input, target) tuple from the features,
104
+ # specify them here. They'll be used if as_supervised=True in
105
+ # builder.as_dataset
106
+ supervised_keys=None,
107
+ # Homepage of the dataset for documentation
108
+ homepage=_HOMEPAGE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager):
113
+ """Returns SplitGenerators."""
114
+ # Downloads the data and defines the splits
115
+ # dl_manager is a datasets.download.DownloadManager that can be used to
116
+ # download and extract URLs
117
+ paths = dl_manager.download_and_extract(_URLS)
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ gen_kwargs={"filepath": paths['train'],
122
+ "split": "train"}
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={"filepath": paths['dev'],
127
+ "split": "dev"}
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ gen_kwargs={"filepath": paths['test'],
132
+ "split": "test"}
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name="challenge",
136
+ gen_kwargs={"filepath": paths['challenge'],
137
+ "split": "challenge"}
138
+ )
139
+ ]
140
+
141
+
142
+ def _generate_examples(self, filepath, split):
143
+ """Yields examples."""
144
+
145
+ def transform_sample(original_sample):
146
+ transformed_sample = {
147
+ "category": "",
148
+ "size": -1,
149
+ "id": "",
150
+ "eid": "",
151
+ "original_triple_sets": [],
152
+ "modified_triple_sets": [],
153
+ "shape": "",
154
+ "shape_type": "",
155
+ "lex": [],
156
+ "test_category": "",
157
+ "dbpedia_links": [],
158
+ "links": [],
159
+ "graph": [],
160
+ "main_entity": "",
161
+ "mappings": [],
162
+ "dialogue": []
163
+ }
164
+
165
+ for (old_key, new_key) in [("modifiedtripleset", "modified_triple_sets"), ("originaltriplesets", "original_triple_sets"), ("dbpedialinks", "dbpedia_links"), ("lexicalisations", "lex"), ("xml_id", "eid")]:
166
+ original_sample[new_key] = original_sample[old_key]
167
+ del original_sample[old_key]
168
+
169
+ original_sample["original_triple_sets"] = original_sample["original_triple_sets"]["originaltripleset"][0]
170
+
171
+ for l in original_sample["lex"]:
172
+ l["lid"] = l["xml_id"]
173
+ del l["xml_id"]
174
+ l["text"] = l["lex"]
175
+ del l["lex"]
176
+
177
+ for turn in original_sample["dialogue"]:
178
+ if "question" in turn:
179
+ old_format = turn["question"]
180
+ new_format = []
181
+ for source, text in old_format.items():
182
+ new_format.append({"source": source, "text": text})
183
+ turn["question"] = new_format
184
+
185
+
186
+ for k in transformed_sample:
187
+ if k in original_sample:
188
+ transformed_sample[k] = original_sample[k]
189
+ # transformed_sample.update(original_sample)
190
+
191
+ return transformed_sample
192
+
193
+ # Yields (key, example) tuples from the dataset
194
+ with open(filepath,'r') as f:
195
+ data = json.load(f)
196
+ key = 0
197
+ for it in data:
198
+ yield key, transform_sample(it)
199
+ key += 1