system HF staff commited on
Commit
df93623
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/2.0.0/dummy_data.zip +3 -0
  4. lc_quad.py +108 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.\n", "citation": "\n@inproceedings{dubey2017lc2,\ntitle={LC-QuAD 2.0: A Large Dataset for Complex Question Answering over Wikidata and DBpedia},\nauthor={Dubey, Mohnish and Banerjee, Debayan and Abdelkawi, Abdelrahman and Lehmann, Jens},\nbooktitle={Proceedings of the 18th International Semantic Web Conference (ISWC)},\nyear={2019},\norganization={Springer}\n}\n", "homepage": "http://lc-quad.sda.tech/", "license": "", "features": {"NNQT_question": {"dtype": "string", "id": null, "_type": "Value"}, "uid": {"dtype": "int32", "id": null, "_type": "Value"}, "subgraph": {"dtype": "string", "id": null, "_type": "Value"}, "template_index": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "sparql_wikidata": {"dtype": "string", "id": null, "_type": "Value"}, "sparql_dbpedia18": {"dtype": "string", "id": null, "_type": "Value"}, "template": {"dtype": "string", "id": null, "_type": "Value"}, "paraphrased_question": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "lc_quad", "config_name": "default", "version": {"version_str": "2.0.0", "description": null, "datasets_version_to_prepare": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4072474, "num_examples": 4781, "dataset_name": "lc_quad"}, "train": {"name": "train", "num_bytes": 16660621, "num_examples": 19293, "dataset_name": "lc_quad"}}, "download_checksums": {"https://github.com/AskNowQA/LC-QuAD2.0/archive/master.zip": {"num_bytes": 3868211, "checksum": "b3475828357b6a4661362287911275240efebe484ca9849ac6b2e314f5e90843"}}, "download_size": 3868211, "dataset_size": 20733095, "size_in_bytes": 24601306}}
dummy/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b36702aaebe1b946ed53bcb287856636314bdc2598a10a4f47058e51d8d2219d
3
+ size 2286
lc_quad.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(lc_quad): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(lc_quad): BibTeX citation
12
+ _CITATION = """
13
+ @inproceedings{dubey2017lc2,
14
+ title={LC-QuAD 2.0: A Large Dataset for Complex Question Answering over Wikidata and DBpedia},
15
+ author={Dubey, Mohnish and Banerjee, Debayan and Abdelkawi, Abdelrahman and Lehmann, Jens},
16
+ booktitle={Proceedings of the 18th International Semantic Web Conference (ISWC)},
17
+ year={2019},
18
+ organization={Springer}
19
+ }
20
+ """
21
+
22
+ # TODO(lc_quad):
23
+ _DESCRIPTION = """\
24
+ LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.
25
+ """
26
+ _URL = "https://github.com/AskNowQA/LC-QuAD2.0/archive/master.zip"
27
+
28
+
29
+ class LcQuad(datasets.GeneratorBasedBuilder):
30
+ """TODO(lc_quad): Short description of my dataset."""
31
+
32
+ # TODO(lc_quad): Set up version.
33
+ VERSION = datasets.Version("2.0.0")
34
+
35
+ def _info(self):
36
+ # TODO(lc_quad): Specifies the datasets.DatasetInfo object
37
+ return datasets.DatasetInfo(
38
+ # This is the description that will appear on the datasets page.
39
+ description=_DESCRIPTION,
40
+ # datasets.features.FeatureConnectors
41
+ features=datasets.Features(
42
+ {
43
+ "NNQT_question": datasets.Value("string"),
44
+ "uid": datasets.Value("int32"),
45
+ "subgraph": datasets.Value("string"),
46
+ "template_index": datasets.Value("int32"),
47
+ "question": datasets.Value("string"),
48
+ "sparql_wikidata": datasets.Value("string"),
49
+ "sparql_dbpedia18": datasets.Value("string"),
50
+ "template": datasets.Value("string"),
51
+ # "template_id": datasets.Value('string'),
52
+ "paraphrased_question": datasets.Value("string")
53
+ # These are the features of your dataset like images, labels ...
54
+ }
55
+ ),
56
+ # If there's a common (input, target) tuple from the features,
57
+ # specify them here. They'll be used if as_supervised=True in
58
+ # builder.as_dataset.
59
+ supervised_keys=None,
60
+ # Homepage of the dataset for documentation
61
+ homepage="http://lc-quad.sda.tech/",
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ """Returns SplitGenerators."""
67
+ # TODO(lc_quad): Downloads the data and defines the splits
68
+ # dl_manager is a datasets.download.DownloadManager that can be used to
69
+ # download and extract URLs
70
+ dl_dir = dl_manager.download_and_extract(_URL)
71
+ dl_dir = os.path.join(dl_dir, "LC-QuAD2.0-master", "dataset")
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ # These kwargs will be passed to _generate_examples
76
+ gen_kwargs={"filepath": os.path.join(dl_dir, "train.json")},
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TEST,
80
+ # These kwargs will be passed to _generate_examples
81
+ gen_kwargs={"filepath": os.path.join(dl_dir, "test.json")},
82
+ ),
83
+ ]
84
+
85
+ def _generate_examples(self, filepath):
86
+ """Yields examples."""
87
+ # TODO(lc_quad): Yields (key, example) tuples from the dataset
88
+ with open(filepath, encoding="utf-8") as f:
89
+ data = json.load(f)
90
+ for id_, row in enumerate(data):
91
+ is_list = False
92
+ for key in row:
93
+ if key != "answer" and isinstance(row[key], list):
94
+ is_list = True
95
+ if is_list:
96
+ continue
97
+ yield id_, {
98
+ "NNQT_question": row["NNQT_question"],
99
+ "uid": row["uid"],
100
+ "subgraph": row["subgraph"],
101
+ "template_index": row["template_index"],
102
+ "question": row["question"],
103
+ "sparql_wikidata": row["sparql_wikidata"],
104
+ "sparql_dbpedia18": row["sparql_dbpedia18"],
105
+ "template": row["template"],
106
+ # "template_id": str(row['template_id']),
107
+ "paraphrased_question": row["paraphrased_question"],
108
+ }