import copy
import os
import random
import logging
import time
import pickle

import numpy as np

dim = 50
epochs = 1000
batches = 400
learning_rate = 0.01
margin = 1


def reload(name1, name2):
    with open(name1, 'rb') as f1, open(name2, 'rb') as f2:
        entity_data = pickle.load(f1, encoding='bytes')
        relation_data = pickle.load(f2, encoding='bytes')
        print('reload done.')
    return entity_data, relation_data


def load_data():
    entity, relation, G = {}, {}, []
    with open('FB15k/entity2id.txt', 'r') as f:
        for line in f:
            temp = line.replace(' ', '\t').strip().split('\t')
            assert len(temp) >= 2, 'decode ERROR'
            temp = list(map(str.strip, temp))
            entity[temp[0]] = temp[1]
    with open('FB15k/relation2id.txt', 'r') as f:
        for line in f:
            temp = line.replace(' ', '\t').strip().split('\t')
            assert len(temp) >= 2, 'decode ERROR'
            temp = list(map(str.strip, temp))
            relation[temp[0]] = temp[1]
    with open('FB15k/train.txt', 'r') as f:
        for line in f:
            temp = line.replace(' ', '\t').strip().split('\t')
            assert len(temp) >= 3, 'decode ERROR'
            temp = list(map(str.strip, temp))
            row = [entity[temp[0]], entity[temp[1]], relation[temp[2]]]
            G.append(row)
    return entity, relation, G


def disL2(h, r, t):
    return np.sum((h + r - t) ** 2)


def init(entity, relation, G):
    entity_data, relation_data = {}, {}
    bound = 6 / np.sqrt(dim)
    for i in entity:
        temp = np.random.uniform(-bound, bound, dim)
        temp = temp / np.linalg.norm(temp, ord=2)
        entity_data[entity[i]] = temp  # 均匀分布。。
    for i in relation:
        temp = np.random.uniform(-bound, bound, dim)
        temp = temp / np.linalg.norm(temp, ord=2)
        relation_data[relation[i]] = temp
    return entity_data, relation_data


def train(entity_data, relation_data, G):
    batch_size = len(G) // batches
    for epoch in range(epochs):
        loss = 0  # 因为loss依赖于随机生成的，所以在batch中累计计算loss
        for batch in range(batches):
            this_batch = random.sample(G, batch_size)  # batch的作用：随机选取可能选到重复的，向量归一化是自己的
            train_data = []
            for i in this_batch:
                temp = get_corrupted(entity, i)
                if (i, temp) not in train_data:
                    train_data.append([i, temp])
            # train_data = np.array(train_data, dtype=np.int32)
            d, corrupted_d = 0, 0
            for i in train_data:
                h, t = entity_data[i[0][0]], entity_data[i[0][1]]
                r = relation_data[i[0][2]]
                corrupted_h, corrupted_t = entity_data[i[1][0]], entity_data[i[1][1]]
                # 求loss
                l = margin + disL2(h, r, t) - disL2(corrupted_h, r, corrupted_t)
                if l <= 0:
                    # 这里是因为，梯度不是由loss计算得到的，没有进行max（0，xx），所以必须手动规定为0，
                    # 即在l==0时，计算出的梯度d等不一定为0
                    continue
                loss += l
                d += h + r - t
                corrupted_d += corrupted_h + r - corrupted_t
            d = d / batch_size
            corrupted_d = corrupted_d / batch_size
            # print(batch, batches)
            for i in train_data:
                h, t = entity_data[i[0][0]], entity_data[i[0][1]]
                r = relation_data[i[0][2]]
                corrupted_h, corrupted_t = entity_data[i[1][0]], entity_data[i[1][1]]
                h -= learning_rate * d
                t -= learning_rate * (-d)
                r -= learning_rate * (d - corrupted_d)
                corrupted_t -= learning_rate * corrupted_d
                corrupted_h -= learning_rate * (-corrupted_d)
                # 向量归一化,借助 L2范数
                h = h / np.linalg.norm(h)
                r = r / np.linalg.norm(r)
                t = t / np.linalg.norm(t)
                corrupted_t = corrupted_t / np.linalg.norm(corrupted_t)
                corrupted_h = corrupted_h / np.linalg.norm(corrupted_h)
            if (batch + 1) % 50 == 0:
                logging.info('epoch {}:batch {}/{} ,use {:.2f} second(s), loss {:.6f}'.format(epoch, batch + 1, batches,
                                                                                              time.time() - st, loss))
        logging.info('epoch {},use {:.2f} second(s), loss {:.6f}'.format(epoch, time.time() - st, loss))
        save_data(entity_data, relation_data, 'FB15k/entity_out_ori.bin', 'FB15k/relation_out_ori.bin')


def get_corrupted(entity, triple):
    # 腐败。。
    # print(triple)
    triple = copy.deepcopy(triple)  # 直接修改会修改之前的
    rd = random.random()
    if rd > 0.5:
        r = random.choice(list(entity.values()))
        while r == triple[0]:
            r = random.choice(list(entity.values()))
        triple[0] = r
    else:
        r = random.choice(list(entity.values()))
        while r == triple[0]:
            r = random.choice(list(entity.values()))
        triple[1] = random.choice(list(entity.values()))
    return triple


def save_data(entity_data, relation_data, name1, name2):
    with open(name1, 'wb') as f1, open(name2, 'wb') as f2:
        pickle.dump(entity_data, f1)
        pickle.dump(relation_data, f2)


if __name__ == '__main__':
    st = time.time()
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s - %(filename)s[line:%(lineno)d]: %(message)s')
    entity, relation, G = load_data()
    if 'entity_out_ori.bin' in os.listdir('FB15k') and 'relation_out_ori.bin' in os.listdir('FB15k'):
        entity_data, relation_data = reload('FB15k/entity_out_ori.bin', 'FB15k/relation_out_ori.bin')
    else:
        entity_data, relation_data = init(entity, relation, G)
    train(entity_data, relation_data, G)
