Datasets:
add yago-facts.ttl to facts.tar.gz (adds ~30+ million more facts)
Browse files- README.md +3 -3
- facts.tar.gz +2 -2
- yago45en.py +3 -3
README.md
CHANGED
@@ -28,9 +28,9 @@ dataset_info:
|
|
28 |
config_name: default
|
29 |
splits:
|
30 |
- name: train
|
31 |
-
num_bytes:
|
32 |
-
num_examples:
|
33 |
-
dataset_size:
|
34 |
viewer: false
|
35 |
---
|
36 |
|
|
|
28 |
config_name: default
|
29 |
splits:
|
30 |
- name: train
|
31 |
+
num_bytes: 42709902295
|
32 |
+
num_examples: 249675587
|
33 |
+
dataset_size: 42709902295
|
34 |
viewer: false
|
35 |
---
|
36 |
|
facts.tar.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f451bfa0d6535ffaa223c3dea8c1a148be2f9ecd15e2b5efab7f989821a66331
|
3 |
+
size 3856455011
|
yago45en.py
CHANGED
@@ -10,7 +10,7 @@ SCHEMA = Namespace('http://schema.org/')
|
|
10 |
YAGO = Namespace('http://yago-knowledge.org/resource/')
|
11 |
|
12 |
class YAGO45DatasetBuilder(GeneratorBasedBuilder):
|
13 |
-
VERSION = "1.0.
|
14 |
|
15 |
def _info(self):
|
16 |
return DatasetInfo(
|
@@ -32,10 +32,10 @@ class YAGO45DatasetBuilder(GeneratorBasedBuilder):
|
|
32 |
# Download and extract the dataset files
|
33 |
facts, taxonomy = dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"])
|
34 |
|
35 |
-
facts = os.path.join(facts, "
|
36 |
|
37 |
# Define splits for each chunk of your dataset.
|
38 |
-
chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')]
|
39 |
return [SplitGenerator(name=datasets.Split.TRAIN,
|
40 |
gen_kwargs={'chunk_paths': chunk_paths})]
|
41 |
|
|
|
10 |
YAGO = Namespace('http://yago-knowledge.org/resource/')
|
11 |
|
12 |
class YAGO45DatasetBuilder(GeneratorBasedBuilder):
|
13 |
+
VERSION = "1.0.1"
|
14 |
|
15 |
def _info(self):
|
16 |
return DatasetInfo(
|
|
|
32 |
# Download and extract the dataset files
|
33 |
facts, taxonomy = dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"])
|
34 |
|
35 |
+
facts = os.path.join(facts, "facts/")
|
36 |
|
37 |
# Define splits for each chunk of your dataset.
|
38 |
+
chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')] + [taxonomy]
|
39 |
return [SplitGenerator(name=datasets.Split.TRAIN,
|
40 |
gen_kwargs={'chunk_paths': chunk_paths})]
|
41 |
|