|
""" |
|
TODO: save the data with different config |
|
TODO: get stats for the frequency based selection |
|
""" |
|
import json |
|
from itertools import product |
|
from random import shuffle, seed |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import seaborn as sns |
|
from matplotlib import pyplot as plt |
|
|
|
from datasets import Dataset |
|
|
|
parameters_min_e_freq = [4, 8, 12, 16] |
|
parameters_max_p_freq = [100, 50, 25, 10] |
|
assert len(parameters_min_e_freq) == 4 |
|
assert len(parameters_max_p_freq) == 4 |
|
sns.set_theme(style="whitegrid") |
|
|
|
|
|
with open(f"data/t_rex.filter_unified.jsonl") as f: |
|
data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0]) |
|
df_main = data.to_pandas() |
|
|
|
|
|
def is_entity(token): |
|
return any(i.isupper() for i in token) |
|
|
|
|
|
def filtering(row, min_freq: int = 3, target: str = "subject"): |
|
if not row['is_entity']: |
|
return True |
|
return row[target] >= min_freq |
|
|
|
|
|
def create_split(_data): |
|
tmp_df = pd.DataFrame(_data) |
|
predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict() |
|
total_num = sum(predicates_count.values()) |
|
pre_k = list(predicates_count.keys()) |
|
seed(42) |
|
shuffle(pre_k) |
|
predicates_train = [] |
|
for k in pre_k: |
|
predicates_train.append(k) |
|
if sum([predicates_count[i] for i in predicates_train]) > total_num * 0.8: |
|
break |
|
predicates_test = sorted([i for i in pre_k if i not in predicates_train]) |
|
test_data = [i for i in _data if i['predicate'] in predicates_test] |
|
train_data = [i for i in _data if i['predicate'] in predicates_train] |
|
shuffle(train_data) |
|
validation_data = train_data[:int(len(train_data) * 0.1)] |
|
train_data = train_data[int(len(train_data) * 0.1):] |
|
return train_data, validation_data, test_data |
|
|
|
|
|
def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True): |
|
|
|
df = df_main.copy() |
|
|
|
|
|
c_sub = df.groupby("subject")['title'].count() |
|
c_obj = df.groupby("object")['title'].count() |
|
key = set(list(c_sub.index) + list(c_obj.index)) |
|
count = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key]) |
|
count.index = count.pop('entity') |
|
count['is_entity'] = [is_entity(i) for i in count.index] |
|
count['sum'] = count['subject'] + count['object'] |
|
count_filter_sub = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject'] |
|
count_filter_obj = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object'] |
|
vocab_sub = set(count_filter_sub.index) |
|
vocab_obj = set(count_filter_obj.index) |
|
df['flag_subject'] = [i in vocab_sub for i in df['subject']] |
|
df['flag_object'] = [i in vocab_obj for i in df['object']] |
|
df['flag'] = df['flag_subject'] & df['flag_object'] |
|
df_filter = df[df['flag']] |
|
df_filter.pop("flag") |
|
df_filter.pop("flag_subject") |
|
df_filter.pop("flag_object") |
|
df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']] |
|
df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']] |
|
df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object'] |
|
|
|
|
|
if random_sampling: |
|
df_balanced = pd.concat( |
|
[g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in |
|
df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) |
|
else: |
|
df_balanced = pd.concat( |
|
[g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in |
|
df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) |
|
|
|
df_balanced.pop("count_subject") |
|
df_balanced.pop("count_object") |
|
df_balanced.pop("count_sum") |
|
target_data = [i.to_dict() for _, i in df_balanced.iterrows()] |
|
|
|
|
|
predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict() |
|
entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True) |
|
entity_dist = dict(list(zip(entity.tolist(), count.tolist()))) |
|
return predicate_dist, entity_dist, len(df_balanced), target_data |
|
|
|
|
|
if __name__ == '__main__': |
|
p_dist_full = [] |
|
e_dist_full = [] |
|
data_size_full = [] |
|
config = [] |
|
candidates = list(product(parameters_min_e_freq, parameters_max_p_freq)) |
|
|
|
|
|
for min_e_freq, max_p_freq in candidates: |
|
p_dist, e_dist, data_size, new_data = main( |
|
min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False) |
|
p_dist_full.append(p_dist) |
|
e_dist_full.append(e_dist) |
|
data_size_full.append(data_size) |
|
config.append([min_e_freq, max_p_freq]) |
|
|
|
train, validation, test = create_split(new_data) |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl", 'w') as f: |
|
f.write('\n'.join([json.dumps(i) for i in train])) |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl", 'w') as f: |
|
f.write('\n'.join([json.dumps(i) for i in validation])) |
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.test.jsonl", 'w') as f: |
|
f.write('\n'.join([json.dumps(i) for i in test])) |
|
|
|
|
|
print("- Data Size") |
|
df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)]) |
|
df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq") |
|
df_size.index.name = "min entity / max predicate" |
|
df_size.to_csv("data/stats.data_size.csv") |
|
print(df_size.to_markdown()) |
|
df_size_p = pd.DataFrame( |
|
[{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)]) |
|
df_size_p = df_size_p.pivot(index="max predicate", columns="min entity", values="freq") |
|
df_size_p = df_size_p.loc[10] |
|
df_size_p.to_csv("data/stats.predicate_size.csv") |
|
print(df_size_p.to_markdown()) |
|
|
|
|
|
df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T |
|
df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] |
|
fig, axes = plt.subplots(2, 2, constrained_layout=True) |
|
fig.suptitle('Predicate Distribution over Different Configurations') |
|
for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq): |
|
_df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]] |
|
_df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq] |
|
ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1) |
|
if mpf != 100: |
|
ax.legend_.remove() |
|
axes[x, y].set_title(f'max predicate: {mpf}') |
|
fig.supxlabel('unique predicates sorted by frequency') |
|
fig.supylabel('number of triples') |
|
fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight') |
|
fig.clf() |
|
|
|
|
|
df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T |
|
df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] |
|
fig, axes = plt.subplots(2, 2, constrained_layout=True) |
|
fig.suptitle('Entity Distribution over Different Configurations') |
|
for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq): |
|
_df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]] |
|
_df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq] |
|
ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1) |
|
ax.set(xscale='log') |
|
if mpf != 100: |
|
ax.legend_.remove() |
|
axes[x, y].set_title(f'max predicate: {mpf}') |
|
fig.supxlabel('unique entities sorted by frequency') |
|
fig.supylabel('number of triples') |
|
fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight') |
|
fig.clf() |
|
|