glecorve commited on
Commit
b05524d
1 Parent(s): 2718385

Inflate JSON dataset

Browse files
.gitattributes CHANGED
@@ -53,3 +53,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ json/annotated_wd_data_test.json filter=lfs diff=lfs merge=lfs -text
57
+ json/annotated_wd_data_test_answerable.json filter=lfs diff=lfs merge=lfs -text
58
+ json/annotated_wd_data_train.json filter=lfs diff=lfs merge=lfs -text
59
+ json/annotated_wd_data_train_answerable.json filter=lfs diff=lfs merge=lfs -text
60
+ json/annotated_wd_data_valid.json filter=lfs diff=lfs merge=lfs -text
61
+ json/annotated_wd_data_valid_answerable.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ ---
5
+
6
+ # Dataset Card for SimpleQuestions-SPARQLtoText
7
+
json/annotated_wd_data_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46ba9dad991d4fc6411128a91e51687676104893565ddece79178978d7d11bb9
3
+ size 6118384
json/annotated_wd_data_test_answerable.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06dbd443b983158305573f2d9de5871504ccde6392004f78aa9952b7789fee15
3
+ size 3456258
json/annotated_wd_data_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a6dc7e60da5e20f05bf20c9458045493ca5e2e6e0505e1c587afa43e6f4910e
3
+ size 21114921
json/annotated_wd_data_train_answerable.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ab42e2e81c2e8ef0b46cd0d02ae033074f15364f355f73ef07711bdae067b5
3
+ size 11975311
json/annotated_wd_data_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7556ffbbe61b9b208c75c79716add105d130c05ac5d037a407c1d9eacfd957e7
3
+ size 2989162
json/annotated_wd_data_valid_answerable.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6716d0a7a9e3dec54a1fce9b90169a097970b48cce526c22817ff98c3553319f
3
+ size 1735194
simplequestions-sparqltotext.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import json
4
+ import base64
5
+
6
+ import datasets
7
+
8
+ try:
9
+ import gitlab
10
+ except ImportError:
11
+ print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
12
+
13
+ _CITATION = """\
14
+ @inproceedings{lecorve2022sparql2text,
15
+ title={Coqar: Question rewriting on coqa},
16
+ author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
17
+ journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
18
+ year={2022}
19
+ }
20
+ """
21
+
22
+ _HOMEPAGE = ""
23
+
24
+ _URLS = {
25
+ "train": "json/annotated_wd_data_train.json",
26
+ "valid": "json/annotated_wd_data_valid.json",
27
+ "test": "json/annotated_wd_data_test.json"
28
+ }
29
+
30
+ _DESCRIPTION = """\
31
+ SimpleQuestions-SPARQL2Text: Special version of SimpleQuestions with SPARQL queries formatted for the SPARQL-to-Text task
32
+ """
33
+
34
+
35
+ class SimpleQuestions_SPARQL2Text(datasets.GeneratorBasedBuilder):
36
+ """
37
+ SimpleQuestions-SPARQL2Text: Special version of SimpleQuestions with
38
+ SPARQL queries formatted for the SPARQL-to-Text task
39
+ """
40
+
41
+ VERSION = datasets.Version("1.0.0")
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ # This is the description that will appear on the datasets page.
46
+ description=_DESCRIPTION,
47
+ # datasets.features.FeatureConnectors
48
+ features=datasets.Features(
49
+ {
50
+ "original_nl_question": datasets.Value('string'),
51
+ "recased_nl_question": datasets.Value('string'),
52
+ "sparql_query": datasets.Value('string'),
53
+ "verbalized_sparql_query": datasets.Value('string'),
54
+ "nl_subject": datasets.Value('string'),
55
+ "nl_property": datasets.Value('string'),
56
+ "nl_object": datasets.Value('string'),
57
+ "nl_answer": datasets.Value('string'),
58
+ "rdf_subject": datasets.Value('string'),
59
+ "rdf_property": datasets.Value('string'),
60
+ "rdf_object": datasets.Value('string'),
61
+ "rdf_answer": datasets.Value('string'),
62
+ "rdf_target": datasets.Value('string')
63
+ }
64
+ ),
65
+ # If there's a common (input, target) tuple from the features,
66
+ # specify them here. They'll be used if as_supervised=True in
67
+ # builder.as_dataset
68
+ supervised_keys=("recased_nl_question", "verbalized_sparql_query"),
69
+ # Homepage of the dataset for documentation
70
+ homepage=_HOMEPAGE,
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ # Downloads the data and defines the splits
77
+ # dl_manager is a datasets.download.DownloadManager that can be used to
78
+ paths = dl_manager.download_and_extract(_URLS)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={"filepath": dl_manager.extract(paths['train']),
83
+ "split": "train"}
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.VALIDATION,
87
+ gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
88
+ "split": "valid"}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ gen_kwargs={"filepath": dl_manager.extract(paths['test']),
93
+ "split": "test"}
94
+ )
95
+ ]
96
+
97
+
98
+ def _generate_examples(self, filepath, split):
99
+ """Yields examples."""
100
+
101
+ def transform_sample(original_sample):
102
+ transformed_sample = {
103
+ "original_nl_question": "",
104
+ "recased_nl_question": "",
105
+ "sparql_query": "",
106
+ "verbalized_sparql_query": "",
107
+ "nl_subject": "",
108
+ "nl_property": "",
109
+ "nl_object": "",
110
+ "nl_answer": "",
111
+ "rdf_subject": "",
112
+ "rdf_property": "",
113
+ "rdf_object": "",
114
+ "rdf_answer": "",
115
+ "rdf_target": ""
116
+ }
117
+ transformed_sample.update(original_sample)
118
+
119
+ return transformed_sample
120
+
121
+ # Yields (key, example) tuples from the dataset
122
+ with open(filepath,'r') as f:
123
+ data = json.load(f)
124
+ key = 0
125
+ for it in data:
126
+ yield key, transform_sample(it)
127
+ key += 1