glecorve commited on
Commit
22b65de
1 Parent(s): 1f4aa36

Inflate JSON dataset

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -0
  2. README.md +48 -0
  3. json/test.json +3 -0
  4. json/train.json +3 -0
  5. json/valid.json +3 -0
  6. lc_quad2-sparqltotext.py +127 -0
.gitattributes CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ json/test.json filter=lfs diff=lfs merge=lfs -text
57
+ json/train.json filter=lfs diff=lfs merge=lfs -text
58
+ json/valid.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - qa
4
+ - sparql
5
+ language:
6
+ - en
7
+ access: private
8
+ dataset_info:
9
+ features:
10
+ - name: uid
11
+ dtype: int32
12
+ - name: NNQT_question
13
+ dtype: string
14
+ - name: paraphrased_question
15
+ dtype: string
16
+ - name: question
17
+ dtype: string
18
+ - name: simplified_query
19
+ dtype: string
20
+ - name: sparql_dbpedia18
21
+ dtype: string
22
+ - name: sparql_wikidata
23
+ dtype: string
24
+ - name: answer
25
+ list: string
26
+ - name: solved_answer
27
+ list: string
28
+ - name: subgraph
29
+ dtype: string
30
+ - name: template
31
+ dtype: string
32
+ - name: template_id
33
+ dtype: string
34
+ - name: template_index
35
+ dtype: int32
36
+ splits:
37
+ - name: train
38
+ num_bytes: 241621115
39
+ num_examples: 21101
40
+ - name: validation
41
+ num_bytes: 11306539
42
+ num_examples: 3010
43
+ - name: test
44
+ num_bytes: 21146458
45
+ num_examples: 6024
46
+ download_size: 371924378
47
+ dataset_size: 274074112
48
+ ---
json/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e66dc8d2da5d8a0fa445de33878c7b0ae0d7ced963f3d7cd515559c9ac190b56
3
+ size 28751264
json/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:899763217cf6fea8e2e1c482a9deebfc4e1ee7b4a862db3a8773540f561aa5c0
3
+ size 327664660
json/valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ea6be02a33030241e4012c31c936dbe550be5a69b491ea27257a44b5598bf3
3
+ size 15508454
lc_quad2-sparqltotext.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import json
4
+ import base64
5
+
6
+ import datasets
7
+
8
+ try:
9
+ import gitlab
10
+ except ImportError:
11
+ print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
12
+
13
+ _CITATION = """\
14
+ @inproceedings{lecorve2022sparql2text,
15
+ title={Coqar: Question rewriting on coqa},
16
+ author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
17
+ journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
18
+ year={2022}
19
+ }
20
+ """
21
+
22
+ _HOMEPAGE = ""
23
+
24
+ _URLS = {
25
+ "train": "json/train.json",
26
+ "valid": "json/valid.json",
27
+ "test": "json/test.json"
28
+ }
29
+
30
+ _DESCRIPTION = """\
31
+ Special version of LCQuAD-2.0 for the SPARQL-to-Text task
32
+ """
33
+
34
+
35
+ class LCQuAD20_SPARQL2Text(datasets.GeneratorBasedBuilder):
36
+ """
37
+ LCQuAD_2.0-SPARQL2Text: Special version of LCQuAD-2.0 for the SPARQL-to-Text task
38
+ """
39
+
40
+ VERSION = datasets.Version("1.0.0")
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ # This is the description that will appear on the datasets page.
45
+ description=_DESCRIPTION,
46
+ # datasets.features.FeatureConnectors
47
+ features=datasets.Features(
48
+ {
49
+ "uid": datasets.Value('int32'),
50
+ "NNQT_question": datasets.Value('string'),
51
+ "paraphrased_question": datasets.Value('string'),
52
+ "question": datasets.Value('string'),
53
+ "simplified_query": datasets.Value('string'),
54
+ "sparql_dbpedia18": datasets.Value('string'),
55
+ "sparql_wikidata": datasets.Value('string'),
56
+ "answer": [datasets.Value("string")],
57
+ "solved_answer": [datasets.Value("string")],
58
+ "subgraph": datasets.Value('string'),
59
+ "template": datasets.Value('string'),
60
+ "template_id": datasets.Value('string'),
61
+ "template_index": datasets.Value('int32')
62
+ }
63
+ ),
64
+ # If there's a common (input, target) tuple from the features,
65
+ # specify them here. They'll be used if as_supervised=True in
66
+ # builder.as_dataset
67
+ supervised_keys=("simplified_query", "question"),
68
+ # Homepage of the dataset for documentation
69
+ homepage=_HOMEPAGE,
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ """Returns SplitGenerators."""
75
+ # Downloads the data and defines the splits
76
+ # dl_manager is a datasets.download.DownloadManager that can be used to
77
+ # download and extract URLs
78
+ paths = dl_manager.download_and_extract(_URLS)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={"filepath": dl_manager.extract(paths['train']),
83
+ "split": "train"}
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.VALIDATION,
87
+ gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
88
+ "split": "valid"}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ gen_kwargs={"filepath": dl_manager.extract(paths['test']),
93
+ "split": "test"}
94
+ )
95
+ ]
96
+
97
+
98
+ def _generate_examples(self, filepath, split):
99
+ """Yields examples."""
100
+
101
+ def transform_sample(original_sample):
102
+ transformed_sample = {
103
+ "uid": -1,
104
+ "NNQT_question": "",
105
+ "paraphrased_question": "",
106
+ "question": "",
107
+ "simplified_query": "",
108
+ "sparql_dbpedia18": "",
109
+ "sparql_wikidata": "",
110
+ "answer": [],
111
+ "solved_answer": [],
112
+ "subgraph": "",
113
+ "template": "",
114
+ "template_id": "",
115
+ "template_index": -1
116
+ }
117
+ transformed_sample.update(original_sample)
118
+
119
+ return transformed_sample
120
+
121
+ # Yields (key, example) tuples from the dataset
122
+ with open(filepath,'r') as f:
123
+ data = json.load(f)
124
+ key = 0
125
+ for it in data:
126
+ yield key, transform_sample(it)
127
+ key += 1