Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
nell / process.py
asahi417's picture
init
2d1c9d2
raw
history blame
2.11 kB
"""
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
tar -xzf nell.tar.gz
wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
tar -xzf wiki.tar.gz
"""
import os
import json
from itertools import chain
data_dir_nell = "NELL"
data_dir_wiki = "Wiki"
os.makedirs("data", exist_ok=True)
if not os.path.exists(data_dir_nell):
raise ValueError("Please download the dataset first\n"
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
"tar -xzf nell.tar.gz")
if not os.path.exists(data_dir_wiki):
raise ValueError("Please download the dataset first\n"
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz\n"
"tar -xzf wiki.tar.gz")
def read_file(_file):
with open(_file, 'r') as f_reader:
tmp = json.load(f_reader)
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
# flatten = {}
# for k, v in tmp.items():
# flatten[k] = [{"relation": r, "head": h, "tail": t} for (h, r, t) in v]
return flatten
def read_vocab(_file):
with open(_file) as f_reader:
ent2ids = json.load(f_reader)
return sorted(list(ent2ids.keys()))
if __name__ == '__main__':
vocab = read_vocab(f"{data_dir_nell}/ent2ids")
with open("data/nell.vocab.txt", 'w') as f:
f.write("\n".join(vocab))
vocab = read_vocab(f"{data_dir_wiki}/ent2ids")
with open("data/wiki.vocab.txt", 'w') as f:
f.write("\n".join(vocab))
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
d = read_file(f"{data_dir_nell}/{i}")
with open(f"data/nell.{s}.jsonl", "w") as f:
f.write("\n".join([json.dumps(_d) for _d in d]))
d = read_file(f"{data_dir_wiki}/{i}")
with open(f"data/wiki.{s}.jsonl", "w") as f:
f.write("\n".join([json.dumps(_d) for _d in d]))