Datasets:
Tasks:
Question Answering
Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
Commit
•
7da9739
1
Parent(s):
66cf28c
Support streaming (#2)
Browse files- Add data file (584a9796d0b807dae28d82be1e18ce195f2a6bd6)
- Update script (44f7e6501469bc7df9dc32d33716d450a4e11ece)
- data.zip +3 -0
- lc_quad.py +2 -2
data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee830807de094312bddea27a7111c03aefa033d862835513a0c4ecd198133a08
|
3 |
+
size 3867829
|
lc_quad.py
CHANGED
@@ -22,7 +22,7 @@ organization={Springer}
|
|
22 |
_DESCRIPTION = """\
|
23 |
LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.
|
24 |
"""
|
25 |
-
_URL = "
|
26 |
|
27 |
|
28 |
class LcQuad(datasets.GeneratorBasedBuilder):
|
@@ -67,7 +67,7 @@ class LcQuad(datasets.GeneratorBasedBuilder):
|
|
67 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
68 |
# download and extract URLs
|
69 |
dl_dir = dl_manager.download_and_extract(_URL)
|
70 |
-
dl_dir = os.path.join(dl_dir, "
|
71 |
return [
|
72 |
datasets.SplitGenerator(
|
73 |
name=datasets.Split.TRAIN,
|
22 |
_DESCRIPTION = """\
|
23 |
LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.
|
24 |
"""
|
25 |
+
_URL = "data.zip"
|
26 |
|
27 |
|
28 |
class LcQuad(datasets.GeneratorBasedBuilder):
|
67 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
68 |
# download and extract URLs
|
69 |
dl_dir = dl_manager.download_and_extract(_URL)
|
70 |
+
dl_dir = os.path.join(dl_dir, "data")
|
71 |
return [
|
72 |
datasets.SplitGenerator(
|
73 |
name=datasets.Split.TRAIN,
|