"""
@Filename       : kg_embedding_trainer.py
@Create Time    : 2022/03/04 12:19
@Author         : Rylynn
@Description    : 

"""
import json
import os.path
import random

from util.preprocess import load_content

dataset = 'dblp_new'

triple_set = set()

root_path = '../../../data/{}'.format(dataset)
content_id_set = set()

with open(os.path.join(root_path, 'cascade.txt')) as f:
    for line in f.readlines():
        content_id = line.strip().split()[0]
        content_id_set.add(content_id)

with open(os.path.join(root_path, 'cascadetest.txt')) as f:
    for line in f.readlines():
        content_id = line.strip().split()[0]
        content_id_set.add(content_id)

with open(os.path.join(root_path, 'cascadevalid.txt')) as f:
    for line in f.readlines():
        content_id = line.strip().split()[0]
        content_id_set.add(content_id)

if True:
    entity_dict = {}

    train_file = open(os.path.join(root_path, 'train.txt').format(dataset), mode='w', encoding='utf8')
    valid_file = open(os.path.join(root_path, 'valid.txt'), mode='w', encoding='utf8')
    test_file = open(os.path.join(root_path, 'test.txt'), mode='w', encoding='utf8')
    kg = json.load(open(os.path.join(root_path, 'content.json').format(dataset)))
    for content_id, triples in kg.items():
        if content_id not in content_id_set:
            continue

        for h, r, t in triples:
            if not entity_dict.get(h):
                entity_dict[h] = 1
            else:
                entity_dict[h] += 1

            if not entity_dict.get(t):
                entity_dict[t] = 1
            else:
                entity_dict[t] += 1

    filter_entity_set = set(map(lambda x: x[0], filter(lambda x: x[1] >= 3, entity_dict.items())))
    for content_id, triples in kg.items():
        for h, r, t in triples:
            if h in filter_entity_set and t in filter_entity_set:
                triple_set.add((h, r, t))

    triple_list = list(triple_set)
    random.shuffle(triple_list)

    for h, r, t in triple_list[:-10000]:
        train_file.write('{}\t{}\t{}\n'.format(h, t, r))

    for h, r, t in triple_list[-10000:-5000]:
        valid_file.write('{}\t{}\t{}\n'.format(h, t, r))

    for h, r, t in triple_list[-5000:]:
        test_file.write('{}\t{}\t{}\n'.format(h, t, r))

    # -------------- Statistics -------------
    entity_set = set()
    relation_set = set()
    for h, r, t in triple_list:
        entity_set.add(h)
        entity_set.add(t)
        relation_set.add(r)

    print('Entities number: {}'.format(len(entity_set)))
    print('Relations number: {}'.format(len(relation_set)))
    print('Triples number: {}'.format(len(triple_list)))




import numpy as np

#
# a = np.load('../../../data/memetracker/entity.npy')
# print(a.shape)