Datasets:
add script
Browse files- yago-4.5-en.py +61 -0
yago-4.5-en.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from datasets import DatasetBuilder, SplitGenerator, DownloadConfig, load_dataset, DownloadManager
|
3 |
+
from rdflib import Graph, URIRef, Literal, BNode
|
4 |
+
from rdflib.namespace import RDF, RDFS, OWL, XSD, Namespace
|
5 |
+
|
6 |
+
SCHEMA = Namespace('http://schema.org/')
|
7 |
+
|
8 |
+
YAGO = Namespace('http://yago-knowledge.org/resource/')
|
9 |
+
|
10 |
+
class YAGO45DatasetBuilder(DatasetBuilder):
|
11 |
+
VERSION = "1.0.0"
|
12 |
+
|
13 |
+
taxonomy = Graph()
|
14 |
+
|
15 |
+
def _info(self):
|
16 |
+
# Define dataset metadata and features
|
17 |
+
return {
|
18 |
+
"features": {
|
19 |
+
"subject": "string",
|
20 |
+
"predicate": "string",
|
21 |
+
"object": "string"
|
22 |
+
},
|
23 |
+
"homepage": "https://yago-knowledge.org/",
|
24 |
+
"license": "CC BY 3.0",
|
25 |
+
"citation": "@article{suchanek2023integrating,title={Integrating the Wikidata Taxonomy into YAGO},author={Suchanek, Fabian M and Alam, Mehwish and Bonald, Thomas and Paris, Pierre-Henri and Soria, Jules},journal={arXiv preprint arXiv:2308.11884},year={2023}}"
|
26 |
+
}
|
27 |
+
|
28 |
+
def _split_generators(self, dl_manager):
|
29 |
+
# Download and extract the dataset
|
30 |
+
# Define splits for each chunk of your dataset.
|
31 |
+
|
32 |
+
# Download and extract the dataset files
|
33 |
+
dl_manager.download_config = DownloadConfig(cache_dir=os.path.abspath("raw"))
|
34 |
+
dl_manager.download_and_extract(["raw/facts.tar.gz", "raw/yago-taxonomy.ttl"])
|
35 |
+
|
36 |
+
# Load yago-taxonomy.ttl file in every process
|
37 |
+
self.taxonomy.parse(os.path.join(dl_manager.manual_dir, 'yago-taxonomy.ttl'), format='turtle')
|
38 |
+
|
39 |
+
# Extract prefix mappings
|
40 |
+
prefix_mappings = {prefix: namespace for prefix, namespace in self.taxonomy.namespaces()}
|
41 |
+
|
42 |
+
# Define splits for each chunk
|
43 |
+
chunk_paths = [os.path.join(dl_manager.manual_dir, chunk) for chunk in os.listdir(dl_manager.manual_dir) if chunk.endswith('.nt')]
|
44 |
+
return [SplitGenerator(name="train", gen_kwargs={'chunk_paths': chunk_paths, 'prefix_mappings': prefix_mappings})]
|
45 |
+
|
46 |
+
def _generate_examples(self, chunk_paths, prefix_mappings):
|
47 |
+
# Load the chunks into an rdflib graph
|
48 |
+
# Yield individual triples from the graph
|
49 |
+
for chunk_path in chunk_paths:
|
50 |
+
graph = Graph()
|
51 |
+
for prefix, namespace in prefix_mappings.items():
|
52 |
+
graph.bind(prefix, namespace)
|
53 |
+
graph.parse(chunk_path, format='nt')
|
54 |
+
|
55 |
+
# Yield individual triples from the graph
|
56 |
+
for i, (subject, predicate, object_) in enumerate(graph):
|
57 |
+
yield i, {
|
58 |
+
'subject': str(subject),
|
59 |
+
'predicate': str(predicate),
|
60 |
+
'object': str(object_)
|
61 |
+
}
|