import torch
import torch.nn.functional as F
import os
import numpy as np

import config

os.environ['CUDA_VISIBLE_DEVICES'] = '1'


def _calc(h, t, r):
    return torch.norm(h + r - t, p=2, dim=1)


def get_adj_entity_vec(adj_entity_list):
    '''
    根据邻接实体列表按次序得到所有邻接实体的向量，并根据最大上下文数目进行补0
    :param adj_entity_list:
    :return:
    '''
    adj_entity_vec_list = torch.zeros(len(adj_entity_list), config.max_context_num, config.dim).cuda()
    adj_entity_size = []
    for i, adj_entity in enumerate(adj_entity_list):
        adj_entity_size.append(len(adj_entity))
        if len(adj_entity) == 0:
            continue
        tmp = entity_context[list(adj_entity)]  # 上下文实体的向量需要从entity_context里获得
        adj_entity_vec_list[i, :tmp.shape[0], :tmp.shape[1]] = tmp
    return adj_entity_vec_list, adj_entity_size


def score(hs, o, adj_entity_num_list, is_relation=None):
    if is_relation == 0:
        os = torch.cat(tuple([o] * config.max_context_num), dim=1).reshape(-1, config.max_context_num, config.dim)
        # os = torch.zeros(len(adj_entity_num_list), config.max_context_num, config.dim).cuda()   # batch x max x dim
        # for i in range(len(adj_entity_num_list)):
        #     if adj_entity_num_list[i] == 0:
        #         continue
        #     os[i, :adj_entity_num_list[i], :] = torch.cat(tuple([o[i]] * adj_entity_num_list[i])).reshape(-1, config.dim)
        tmp = F.relu(torch.cat((hs, os), dim=2), inplace=False)  # batch x max x 2dim
        score = torch.matmul(tmp, v)  # batch x max
        return score
    else:
        print('parameter is_relation must be either 0 or 1')


def gcn(o, adj_entity_vec_list, adj_entity_num_list):
    '''
    :param o: batch_size x dim
    :param adj_entity_vec_list: batch_size x max_context x dim
    :param adj_entity_num_list: list, batch_size
    :return:
    '''
    res = torch.Tensor(adj_entity_vec_list.shape[0], config.dim).cuda()  # batch_size x dim

    for i in range(adj_entity_vec_list.shape[0]):
        H = torch.zeros(config.max_context_num + 1, config.dim).cuda()
        H[0, :] = o[i, :]  # 目标向量放在了第一行
        H[1:, :] = adj_entity_vec_list[i, :, :]

        A = torch.zeros(1, config.max_context_num + 1).cuda()
        tmp = torch.ones(adj_entity_num_list[i] + 1).cuda() / (adj_entity_num_list[i] + 1)
        A[0, :adj_entity_num_list[i] + 1] = tmp

        support = torch.mm(A, H)
        output = F.relu(torch.mm(support, entity_gcn_weight))

        res[i, :] = output[0, :]  # 取gcn训练完之后的第一行向量，即目标向量

    return res


def calc_entity_context_vec(adj_entity_vec_list, adj_entity_num_list, o):
    '''
    :param adj_entity_vec_list: batch x max x dim
    :param adj_entity_num_list: batch
    :param o: batch_size x dim
    :return:
    '''
    score1 = score(adj_entity_vec_list, o, adj_entity_num_list, is_relation=0)  # batch x max
    alpha = torch.zeros(len(adj_entity_vec_list), config.max_context_num).cuda()

    # alpha.shape[0]是batch size
    for i in range(len(adj_entity_vec_list)):
        if adj_entity_num_list[i] == 0:
            continue
        alpha[i, :adj_entity_num_list[i]] = F.softmax(score1[i, :adj_entity_num_list[i]], dim=0).cuda()

    sg = torch.sum(torch.mul(torch.unsqueeze(alpha, dim=2), adj_entity_vec_list), dim=1)  # batch x dim
    return sg


def get_entity_context(entities):
    entities_context = [config.entity_adj_table.get(int(e), []) for e in entities]
    return entities_context


def predict(batch):
    pos_h = batch[:, 0]
    pos_r = batch[:, 1]
    pos_t = batch[:, 2]

    # target vector
    p_h = entity_emb[pos_h.cpu().numpy()]
    p_t = entity_emb[pos_t.cpu().numpy()]
    p_r = relation_emb[pos_r.cpu().numpy()]

    # context
    ph_adj_entity_list = get_entity_context(pos_h)
    pt_adj_entity_list = get_entity_context(pos_t)

    # context vectors
    ph_adj_entity_vec_list, ph_adj_entity_num_list = get_adj_entity_vec(ph_adj_entity_list)
    pt_adj_entity_vec_list, pt_adj_entity_num_list = get_adj_entity_vec(pt_adj_entity_list)

    # GCN
    p_h = gcn(p_h, ph_adj_entity_vec_list, ph_adj_entity_num_list)
    p_t = gcn(p_t, pt_adj_entity_vec_list, pt_adj_entity_num_list)

    # softmax and attention
    ph_sg = calc_entity_context_vec(ph_adj_entity_vec_list, ph_adj_entity_num_list, p_h)
    pt_sg = calc_entity_context_vec(pt_adj_entity_vec_list, pt_adj_entity_num_list, p_t)

    # gate
    ph_o = torch.mul(gate_entity, p_h) + torch.mul(1 - gate_entity, ph_sg)
    pt_o = torch.mul(gate_entity, p_t) + torch.mul(1 - gate_entity, pt_sg)

    # score for loss
    p_score = _calc(p_h, p_t, p_r)

    torch.cuda.empty_cache()
    
    return p_score


def test_head(golden_triple):
    head_batch = config.get_head_batch(golden_triple)
    value = list(predict(head_batch))
    li = np.argsort(value)
    res = 0
    sub = 0
    for pos, val in enumerate(li):
        if val == golden_triple[0]:
            res = pos + 1
            break
        if (val, golden_triple[1], golden_triple[2]) in train_set:
            sub += 1

    del head_batch
    del value
    del li

    return res, res - sub


def test_tail(golden_triple):
    tail_batch = config.get_tail_batch(golden_triple)
    value = list(predict(tail_batch))
    li = np.argsort(value)
    res = 0
    sub = 0
    for pos, val in enumerate(li):
        if val == golden_triple[2]:
            res = pos + 1
            break
        if (golden_triple[0], golden_triple[1], val) in train_set:
            sub += 1

    del tail_batch
    del value
    del li

    return res, res - sub


def test_link_prediction(test_list):
    '''
    遍历所有三元组，对于每个三元组，替换头实体为所有其他实体，再判断正确三元组所在的位置，记录下来
    替换尾实体为所有其他实体，重复同样操作
    '''
    # test_list = read_test_file()
    # test_list = read_file(train_file_name='./data/YAGO3-10-part/test2id.txt')
    test_total = len(test_list)

    l_mr = 0
    r_mr = 0
    l_hit1 = 0
    l_hit3 = 0
    l_hit10 = 0
    r_hit1 = 0
    r_hit3 = 0
    r_hit10 = 0

    l_mr_filter = 0
    r_mr_filter = 0
    l_hit1_filter = 0
    l_hit3_filter = 0
    l_hit10_filter = 0
    r_hit1_filter = 0
    r_hit3_filter = 0
    r_hit10_filter = 0

    for i, golden_triple in enumerate(test_list):
        print('test ---' + str(i) + '--- triple')
        l_pos, l_filter_pos = test_head(golden_triple)
        r_pos, r_filter_pos = test_tail(golden_triple)  # position, 1-based

        print(golden_triple, end=': ')
        print('l_pos=' + str(l_pos), end=', ')
        print('l_filter_pos=' + str(l_filter_pos), end=', ')
        print('r_pos=' + str(r_pos), end=', ')
        print('r_filter_pos=' + str(r_filter_pos), end='\n')

        l_mr += l_pos
        r_mr += r_pos

        if l_pos <= 1:
            l_hit1 += 1
        if l_pos <= 3:
            l_hit3 += 1
        if l_pos <= 10:
            l_hit10 += 1

        if r_pos <= 1:
            r_hit1 += 1
        if r_pos <= 3:
            r_hit3 += 1
        if r_pos <= 10:
            r_hit10 += 1

        ####################
        l_mr_filter += l_filter_pos
        r_mr_filter += r_filter_pos

        if l_filter_pos <= 1:
            l_hit1_filter += 1
        if l_filter_pos <= 3:
            l_hit3_filter += 1
        if l_filter_pos <= 10:
            l_hit10_filter += 1

        if r_filter_pos <= 1:
            r_hit1_filter += 1
        if r_filter_pos <= 3:
            r_hit3_filter += 1
        if r_filter_pos <= 10:
            r_hit10_filter += 1

    l_mr /= test_total
    r_mr /= test_total
    l_hit1 /= test_total
    l_hit3 /= test_total
    l_hit10 /= test_total
    r_hit1 /= test_total
    r_hit3 /= test_total
    r_hit10 /= test_total

    l_mr_filter /= test_total
    r_mr_filter /= test_total
    l_hit1_filter /= test_total
    l_hit3_filter /= test_total
    l_hit10_filter /= test_total
    r_hit1_filter /= test_total
    r_hit3_filter /= test_total
    r_hit10_filter /= test_total

    print('\t\t\tmean_rank\t\t\thit@10\t\t\thit@3\t\t\thit@1')
    print('head(raw)\t\t\t' + str(l_mr) + '\t\t\t' + str(l_hit10) + '\t\t\t' + str(l_hit3) + '\t\t\t' + str(l_hit1))
    print('tail(raw)\t\t\t' + str(r_mr) + '\t\t\t' + str(r_hit10) + '\t\t\t' + str(r_hit3) + '\t\t\t' + str(r_hit1))
    print('head(filter)\t\t\t' + str(l_mr_filter) + '\t\t\t' + str(l_hit10_filter) + '\t\t\t' + str(l_hit3_filter) + '\t\t\t' + str(l_hit1_filter))
    print('tail(filter)\t\t\t' + str(r_mr_filter) + '\t\t\t' + str(r_hit10_filter) + '\t\t\t' + str(r_hit3_filter) + '\t\t\t' + str(r_hit1_filter))


train_set = set(config.train_list)
entity_emb, relation_emb, entity_context, entity_gcn_weight, gate_entity, v = config.load_parameter('parameters60')
print('test link prediction starting...')
test_link_prediction(config.test_list)
print('test link prediction ending...')
