import torch
import pickle
import os
import random
import math
import multiprocessing
import numpy as np
import json

os.environ['CUDA_VISIBLE_DEVICES'] = '1'


# test.1
def get_fake_batch(batch):
    if batch == 0:
        return (0, 0, 1), (0, 0, 2)
    else:
        return (1, 1, 2), (1, 1, 0)


# test.2
def make_adj_table():
    d = dict()
    d[0] = [0]
    d[1] = [0, 2]
    d[2] = [1]

    d2 = dict()
    d2[0] = []
    d2[1] = []

    return d, d2


# test.3
def make_adj():
    entity_adj = torch.Tensor([[1, 1, 0], [1, 1, 1], [0, 1, 1]])
    relation_adj = torch.Tensor([[1, 0], [0, 1]])
    return entity_adj, relation_adj


# 经过一条边或两条边得到的所有路径 {entity: [[path]}
def get_1or2_path_from_head(head_ent, tail_ent, entity_adj_table):
    paths = {}  # actually second-order + first-order, {entity: [[path]]}
    first_order_entity = []
    first_order_relation = dict()

    if head_ent not in entity_adj_table:
        return paths
    for tail_entity, relation in entity_adj_table[head_ent]:
        first_order_entity.append(tail_entity)
        if tail_entity != tail_ent:
            paths[tail_entity] = [[relation]]
        first_order_relation[tail_entity] = relation
    for node in first_order_entity:
        if node not in entity_adj_table:
            continue
        for tail_entity, relation in entity_adj_table[node]:
            if tail_entity in paths:
                paths[tail_entity].append([first_order_relation[node], relation])
            else:
                paths[tail_entity] = [[first_order_relation[node], relation]]

    return paths  # {entity: [[edge]}


def find_relation_context(h, t, entity_adj_table):
    tail_ent2paths = get_1or2_path_from_head(h, t, entity_adj_table)  # 经过一条边或两条边得到的所有路径 {entity: [[edge]]}
    # for tail_entity, paths in tail_ent2paths.items():
    #     if tail_entity == t:
    #         return paths
    return tail_ent2paths.get(t, [])


def construct_adj_table():
    entity_adj_table_with_rel = dict()  # {head_entity: [(tail_entity, relation)]}
    entity_adj_table = dict()  # {head_entity: [tail_entity]}
    relation_adj_table = dict()  # {entity: [[edge]]}

    for train_data in train_list:
        h, r, t = train_data

        # entity_adj[h][t] = 1
        if h not in entity_adj_table_with_rel:
            entity_adj_table_with_rel[h] = [(t, r)]
            entity_adj_table[h] = {t}
        else:
            entity_adj_table_with_rel[h].append((t, r))
            entity_adj_table[h].add(t)

    for train_data in train_list:
        h, r, t = train_data

        # 连接h t的一条边或两条边的路径上的所有边（关系）
        paths = find_relation_context(h, t, entity_adj_table_with_rel)

        if r not in relation_adj_table:
            relation_adj_table[r] = paths
        else:
            relation_adj_table[r] += paths

    entity_coef = torch.zeros(entity_total, 1).cuda()
    relation_coef = torch.zeros(relation_total, 1).cuda()
    max_context_num = 0
    for k, v in entity_adj_table.items():
        max_context_num = max(max_context_num, len(v))
        entity_coef[k] = 1 / len(v)
        entity_adj_table[k] = list(v)
    for k, v in relation_adj_table.items():
        relation_adj_table[k] = list(set([tuple(i) for i in relation_adj_table[k]]))  # 去重
        max_context_num = max(max_context_num, len(relation_adj_table[k]))
        relation_coef[k] = 1 / (len(relation_adj_table[k]) + 1)

    return entity_adj_table, relation_adj_table, max_context_num, entity_coef, relation_coef


def read_file(train_file_name):
    train_data = []  # [(h, r, t)]
    with open(train_file_name) as f:
        lines = f.readlines()
        for line in lines:
            li = line.split()
            if len(li) == 3:
                train_data.append((int(li[0]), int(li[2]), int(li[1])))
    return train_data


def read_entity_total(entity_file_name):
    with open(entity_file_name) as f:
        entity_total = int(f.readline())
    return entity_total


def read_relation_total(relation_file_name):
    with open(relation_file_name) as f:
        relation_total = int(f.readline())
    return relation_total


def one_negative_sampling(golden_triple, train_set):
    negative_triples = []
    h, r, t = golden_triple

    for i in range(train_times):
        if bern == 0:  # uniform sampling
            while True:
                e = random.randint(0, entity_total - 1)
                is_head = random.randint(0, 1)
                if is_head:
                    if (e, r, t) in train_set:
                        continue
                    else:
                        negative_triples.append((e, r, t))
                        break
                else:
                    if (h, r, e) in train_set:
                        continue
                    else:
                        negative_triples.append((h, r, e))
                        break
        else:
            print('bern haven\'t been implemented')
            return

    return negative_triples


# def negative_sampling(golden_triple, train_set):
#     result = []
#     process_number = train_times
#     pool = multiprocessing.Pool(processes=process_number)
#     for i in range(train_times):
#         result.append(pool.apply_async(one_negative_sampling, (golden_triple, train_set)).get())
#     pool.close()
#     pool.join()
#     return result


def prepare_data():
    '''生成正例和负例'''
    phs = torch.IntTensor(len(train_list))
    prs = torch.IntTensor(len(train_list)).cuda()
    pts = torch.IntTensor(len(train_list)).cuda()
    nhs = torch.IntTensor(train_times, len(train_list)).cuda()
    nrs = torch.IntTensor(train_times, len(train_list)).cuda()
    nts = torch.IntTensor(train_times, len(train_list)).cuda()

    train_set = set(train_list)

    for i, golden_triple in enumerate(train_list):
        phs[i], prs[i], pts[i] = golden_triple

        # [train_times * (nh, nr, nt)]
        negative_triples = one_negative_sampling(golden_triple, train_set)

        for j in range(len(negative_triples)):
            nhs[j][i], nrs[j][i], nts[j][i] = negative_triples[j]

    return phs, prs, pts, nhs, nrs, nts


dataset = 'FB15K-part'
dataset = 'YAGO3-10-part'
############################ train parameter
entity_total = read_entity_total(entity_file_name='./data/' + dataset + '/entity2id.txt')
relation_total = read_entity_total(entity_file_name='./data/' + dataset + '/relation2id.txt')
train_list = read_file(train_file_name='./data/' + dataset + '/train2id.txt')
entity_adj_table, relation_adj_table, max_context_num, entity_coef, relation_coef = construct_adj_table()

print('entity_total: ' + str(entity_total))
print('relation_total: ' + str(relation_total))
print('train_total: ' + str(len(train_list)))

train_times = 101
learning_rate = 0.01
batch_size = 500
nbatchs = math.ceil(len(train_list) / batch_size)  # 单例输入，等于训练数据的数目
dim = 100
margin = 2.0
bern = 0


# print('preparing data...')
# phs, prs, pts, nhs, nrs, nts = prepare_data()
# print('preparing data complete')


def get_batch(batch, epoch, phs, prs, pts, nhs, nrs, nts):
    r = min((batch + 1) * batch_size, len(train_list))

    return (phs[batch * batch_size: r], prs[batch * batch_size: r], pts[batch * batch_size: r]), \
           (nhs[epoch, batch * batch_size: r], nrs[epoch, batch * batch_size: r], nts[epoch, batch * batch_size: r])


########################### link prediction
def load_parameter(file_name, mode='our'):
    with open('./res/' + file_name, "r") as f:
        emb = json.loads(f.read())

    if mode == 'transe':
        entity_emb = emb['entity_emb']
        relation_emb = emb['relation_emb']
        return torch.Tensor(entity_emb).cuda(), torch.Tensor(relation_emb).cuda()

    entity_emb = emb['entity_emb']
    relation_emb = emb['relation_emb']
    entity_context = emb['entity_context']
    entity_gcn_weight = emb['entity_gcn_weight']
    gate_entity = emb['gate_entity']
    v = emb['v']

    # give index, return embedding
    return torch.Tensor(entity_emb).cuda(), torch.Tensor(relation_emb).cuda(), torch.Tensor(entity_context).cuda(), \
           torch.Tensor(entity_gcn_weight).cuda(), torch.Tensor(gate_entity).cuda(), torch.Tensor(v).cuda()


def get_head_batch(golden_triple):
    head_batch = torch.zeros(entity_total, 3, dtype=torch.int32)
    head_batch[:, 0] = torch.tensor(list(range(entity_total)))
    head_batch[:, 1] = torch.tensor([golden_triple[1]] * entity_total)
    head_batch[:, 2] = torch.tensor([golden_triple[2]] * entity_total)
    return head_batch


def get_tail_batch(golden_triple):
    tail_batch = torch.zeros(entity_total, 3, dtype=torch.int32)
    tail_batch[:, 0] = torch.tensor([golden_triple[0]] * entity_total)
    tail_batch[:, 1] = torch.tensor([golden_triple[1]] * entity_total)
    tail_batch[:, 2] = torch.tensor(list(range(entity_total)))
    return tail_batch


test_list = read_file(train_file_name='./data/' + dataset + '/test2id.txt')
print('test_total: ' + str(len(test_list)))
