Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
t_rex / create_split.py
asahi417's picture
Update create_split.py
34852cd
raw
history blame
2.82 kB
import json
import os
from itertools import product
import pandas as pd
from random import shuffle, seed
def get_test_predicate(_data):
tmp_df = pd.DataFrame(_data)
predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
total_num = sum(predicates_count.values())
pre_k = list(predicates_count.keys())
seed(42)
shuffle(pre_k)
predicates_train = []
for k in pre_k:
predicates_train.append(k)
if sum([predicates_count[i] for i in predicates_train]) > total_num * 0.8:
break
predicates_test = sorted([i for i in pre_k if i not in predicates_train])
return predicates_test
# if not os.path.exists("data/t_rex.filter_unified.test.jsonl"):
# with open(f"data/t_rex.filter_unified.min_entity_{max(parameters_min_e_freq)}_max_predicate_{min(parameters_max_p_freq)}.jsonl") as f:
# data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
# pred_test = get_test_predicate(data)
# data_test = [i for i in data if i['predicate'] in pred_test]
# f_writer = open("data/t_rex.filter_unified.test.jsonl", 'w')
# for n, i in enumerate(data_test):
# print(f"\n[{n+1}/{len(data_test)}]")
# print(f"{json.dumps(i, indent=4)}")
# flag = input(">>> (enter to add to test)")
# if flag == '':
# i['relation'] = i.pop('predicate')
# i['head'] = i.pop('subject')
# i['tail'] = i.pop('object')
# f_writer.write(json.dumps(i) + '\n')
# f_writer.close()
with open("data/t_rex.filter_unified.test.jsonl") as f:
data_test = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
test_predicate = set([i['relation'] for i in data_test])
seed(42)
with open(f"data/t_rex.filter_unified.min_entity_5.jsonl") as f:
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
for i in data:
i['relation'] = i.pop('predicate')
i['head'] = i.pop('subject')
i['tail'] = i.pop('object')
data = [i for i in data if i['relation'] not in test_predicate]
shuffle(data)
data_train = data[:int(len(data) * 0.8)]
data_valid = data[int(len(data) * 0.8):]
with open(f"data/t_rex.filter_unified.min_entity_5.train.jsonl", "w") as f:
f.write('\n'.join([json.dumps(i) for i in data_train]))
with open(f"data/t_rex.filter_unified.min_entity_5.validation.jsonl", "w") as f:
f.write('\n'.join([json.dumps(i) for i in data_valid]))
df_train = pd.DataFrame(data_train)
df_valid = pd.DataFrame(data_valid)
df_test = pd.DataFrame(data_test)
print(f"[train]: {len(df_train)} triples, {len(df_train['relation'].unique())}")
print(f"[validation]: {len(df_valid)} triples, {len(df_valid['relation'].unique())}")
print(f"[test]: {len(df_test)} triples, {len(df_test['relation'].unique())}")