|
import json |
|
import os |
|
from itertools import product |
|
|
|
import pandas as pd |
|
from random import shuffle, seed |
|
|
|
|
|
parameters_min_e_freq = [1, 2, 3, 4] |
|
parameters_max_p_freq = [100, 50, 25, 10] |
|
|
|
|
|
def get_test_predicate(_data): |
|
tmp_df = pd.DataFrame(_data) |
|
predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict() |
|
total_num = sum(predicates_count.values()) |
|
pre_k = list(predicates_count.keys()) |
|
seed(42) |
|
shuffle(pre_k) |
|
predicates_train = [] |
|
for k in pre_k: |
|
predicates_train.append(k) |
|
if sum([predicates_count[i] for i in predicates_train]) > total_num * 0.8: |
|
break |
|
predicates_test = sorted([i for i in pre_k if i not in predicates_train]) |
|
return predicates_test |
|
|
|
|
|
if not os.path.exists("data/t_rex.filter_unified.test.jsonl"): |
|
with open(f"data/t_rex.filter_unified.min_entity_{max(parameters_min_e_freq)}_max_predicate_{min(parameters_max_p_freq)}.jsonl") as f: |
|
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
pred_test = get_test_predicate(data) |
|
data_test = [i for i in data if i['predicate'] in pred_test] |
|
f_writer = open("data/t_rex.filter_unified.test.jsonl", 'w') |
|
for n, i in enumerate(data_test): |
|
print(f"\n[{n+1}/{len(data_test)}]") |
|
print(f"{json.dumps(i, indent=4)}") |
|
flag = input(">>> (enter to add to test)") |
|
if flag == '': |
|
f_writer.write(json.dumps(i) + '\n') |
|
f_writer.close() |
|
|
|
with open("data/t_rex.filter_unified.test.jsonl") as f: |
|
data_test = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
test_predicate = set([i['predicate'] for i in data_test]) |
|
|
|
|
|
seed(42) |
|
for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq): |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl") as f: |
|
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
data = [i for i in data if i['predicate'] not in test_predicate] |
|
shuffle(data) |
|
data_train = data[:int(len(data) * 0.9)] |
|
data_valid = data[int(len(data) * 0.9):] |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl", "w") as f: |
|
f.write('\n'.join([json.dumps(i) for i in data_train])) |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl", "w") as f: |
|
f.write('\n'.join([json.dumps(i) for i in data_valid])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|