import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import json
import os
import time
import numpy as np

from layers import GraphConvolution
import config

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

DEBUG = True


class DynamicKGE(nn.Module):
    def __init__(self, config):
        super(DynamicKGE, self).__init__()

        self.entity_gcn_weight = nn.Parameter(torch.Tensor(config.dim, config.dim))
        # self.entity_gcn_bias = nn.Parameter(torch.Tensor(config.dim))
        self.entity_emb = nn.Parameter(torch.Tensor(config.entity_total, config.dim))
        self.entity_context = nn.Parameter(torch.Tensor(config.entity_total, config.dim))
        self.relation_emb = nn.Parameter(torch.Tensor(config.relation_total, config.dim))
        self.gate_entity = nn.Parameter(torch.Tensor(config.dim))
        self.v = nn.Parameter(torch.Tensor(2 * config.dim))

        self._init_parameters()

    def _init_parameters(self):
        nn.init.xavier_uniform_(self.entity_emb.data)
        nn.init.xavier_uniform_(self.entity_context.data)
        nn.init.xavier_uniform_(self.relation_emb.data)
        nn.init.uniform_(self.gate_entity.data)
        nn.init.uniform_(self.v.data)

        stdv = 1. / math.sqrt(self.entity_gcn_weight.size(1))
        self.entity_gcn_weight.data.uniform_(-stdv, stdv)
        # self.entity_gcn_bias.data.uniform_(-stdv, stdv)

    def _calc(self, h, t, r):
        return torch.norm(h + r - t, p=2, dim=1)

    def get_adj_entity_vec(self, adj_entity_list):
        '''
        根据邻接实体列表按次序得到所有邻接实体的向量，并根据最大上下文数目进行补0
        :param adj_entity_list:
        :return:
        '''
        adj_entity_vec_list = torch.zeros(len(adj_entity_list), config.max_context_num, config.dim).cuda()
        adj_entity_size = []
        for i, adj_entity in enumerate(adj_entity_list):
            adj_entity_size.append(len(adj_entity))
            if len(adj_entity) == 0:
                continue
            tmp = self.entity_context[list(adj_entity)]  # 上下文实体的向量需要从entity_context里获得
            adj_entity_vec_list[i, :tmp.shape[0], :tmp.shape[1]] = tmp
        return adj_entity_vec_list, adj_entity_size

    def score(self, hs, o, adj_entity_num_list, is_relation=None):
        if is_relation == 0:
            os = torch.cat(tuple([o] * config.max_context_num), dim=1).reshape(-1, config.max_context_num, config.dim)
            # os = torch.zeros(len(adj_entity_num_list), config.max_context_num, config.dim).cuda()   # batch x max x dim
            # for i in range(len(adj_entity_num_list)):
            #     if adj_entity_num_list[i] == 0:
            #         continue
            #     os[i, :adj_entity_num_list[i], :] = torch.cat(tuple([o[i]] * adj_entity_num_list[i])).reshape(-1, config.dim)
            tmp = F.relu(torch.mul(hs, os), inplace=False)  # batch x max x 2dim
            score = torch.matmul(tmp, self.v)  # batch x max
            return score
        else:
            print('parameter is_relation must be either 0 or 1')

    def gcn(self, o, adj_entity_vec_list, adj_entity_num_list):
        '''
        :param o: batch_size x dim
        :param adj_entity_vec_list: batch_size x max_context x dim
        :param adj_entity_num_list: list, batch_size
        :return:
        '''
        res = torch.Tensor(adj_entity_vec_list.shape[0], config.dim).cuda()  # batch_size x dim

        for i in range(adj_entity_vec_list.shape[0]):
            H = torch.zeros(config.max_context_num + 1, config.dim).cuda()
            H[0, :] = o[i, :]  # 目标向量放在了第一行
            H[1:, :] = adj_entity_vec_list[i, :, :]

            A = torch.eye(config.max_context_num + 1, config.max_context_num + 1).cuda()
            tmp = torch.ones(adj_entity_num_list[i] + 1).cuda() / (adj_entity_num_list[i] + 1)
            A[0, :adj_entity_num_list[i] + 1] = tmp
            A[:adj_entity_num_list[i] + 1, 0] = tmp

            support = torch.mm(A, H)
            output = F.relu(torch.mm(support, self.entity_gcn_weight))

            res[i, :] = output[0, :]  # 取gcn训练完之后的第一行向量，即目标向量

        return res

    def calc_entity_context_vec(self, adj_entity_vec_list, adj_entity_num_list, o):
        '''
        :param adj_entity_vec_list: batch x max x dim
        :param adj_entity_num_list: batch
        :param o: batch_size x dim
        :return:
        '''
        score1 = self.score(adj_entity_vec_list, o, adj_entity_num_list, is_relation=0)  # batch x max
        alpha = torch.zeros(len(adj_entity_vec_list), config.max_context_num).cuda()

        # alpha.shape[0]是batch size
        for i in range(len(adj_entity_vec_list)):
            if adj_entity_num_list[i] == 0:
                continue
            alpha[i, :adj_entity_num_list[i]] = F.softmax(score1[i, :adj_entity_num_list[i]], dim=0).cuda()

        sg = torch.sum(torch.mul(torch.unsqueeze(alpha, dim=2), adj_entity_vec_list), dim=1)  # batch x dim
        return sg

    def get_entity_context(self, entities):
        entities_context = [config.entity_adj_table.get(int(e), []) for e in entities]
        return entities_context

    def save_parameters(self, file_name, epoch):
        para2vec = {}
        lists = self.state_dict()
        for var_name in lists:
            para2vec[var_name] = lists[var_name].cpu().numpy().tolist()

        f = open('./res/' + file_name + str(epoch), "w")
        f.write(json.dumps(para2vec))
        f.close()

    def forward(self, golden_triples, negative_triples):
        # multi golden and multi negative
        pos_h, pos_r, pos_t = golden_triples
        neg_h, neg_r, neg_t = negative_triples

        # target vector
        p_h = self.entity_emb[pos_h.cpu().numpy()]
        p_t = self.entity_emb[pos_t.cpu().numpy()]
        p_r = self.relation_emb[pos_r.cpu().numpy()]
        n_h = self.entity_emb[neg_h.cpu().numpy()]
        n_t = self.entity_emb[neg_t.cpu().numpy()]
        n_r = self.relation_emb[neg_r.cpu().numpy()]

        # context
        ph_adj_entity_list = self.get_entity_context(pos_h)
        pt_adj_entity_list = self.get_entity_context(pos_t)
        nh_adj_entity_list = self.get_entity_context(neg_h)
        nt_adj_entity_list = self.get_entity_context(neg_t)

        # context vectors
        ph_adj_entity_vec_list, ph_adj_entity_num_list = self.get_adj_entity_vec(ph_adj_entity_list)
        pt_adj_entity_vec_list, pt_adj_entity_num_list = self.get_adj_entity_vec(pt_adj_entity_list)
        nh_adj_entity_vec_list, nh_adj_entity_num_list = self.get_adj_entity_vec(nh_adj_entity_list)
        nt_adj_entity_vec_list, nt_adj_entity_num_list = self.get_adj_entity_vec(nt_adj_entity_list)

        # GCN
        p_h = self.gcn(p_h, ph_adj_entity_vec_list, ph_adj_entity_num_list)
        p_t = self.gcn(p_t, pt_adj_entity_vec_list, pt_adj_entity_num_list)
        n_h = self.gcn(n_h, nh_adj_entity_vec_list, nh_adj_entity_num_list)
        n_t = self.gcn(n_t, nt_adj_entity_vec_list, nt_adj_entity_num_list)

        # softmax and attention
        ph_sg = self.calc_entity_context_vec(ph_adj_entity_vec_list, ph_adj_entity_num_list, p_h)
        pt_sg = self.calc_entity_context_vec(pt_adj_entity_vec_list, pt_adj_entity_num_list, p_t)
        nh_sg = self.calc_entity_context_vec(nh_adj_entity_vec_list, nh_adj_entity_num_list, n_h)
        nt_sg = self.calc_entity_context_vec(nt_adj_entity_vec_list, nt_adj_entity_num_list, n_t)

        # gate
        ph_o = torch.mul(self.gate_entity, p_h) + torch.mul(1 - self.gate_entity, ph_sg)
        pt_o = torch.mul(self.gate_entity, p_t) + torch.mul(1 - self.gate_entity, pt_sg)
        nh_o = torch.mul(self.gate_entity, n_h) + torch.mul(1 - self.gate_entity, nh_sg)
        nt_o = torch.mul(self.gate_entity, n_t) + torch.mul(1 - self.gate_entity, nt_sg)

        # score for loss
        p_score = self._calc(ph_o, pt_o, p_r)
        n_score = self._calc(nh_o, nt_o, n_r)

        torch.cuda.empty_cache()

        return p_score, n_score


def main():
    print('preparing data...')
    phs, prs, pts, nhs, nrs, nts = config.prepare_data()
    print('preparing data complete')

    print('train starting...')

    dynamicKGE = DynamicKGE(config).cuda()
    optimizer = optim.SGD(dynamicKGE.parameters(), lr=config.learning_rate)
    optimizer = optim.Adam(dynamicKGE.parameters(), lr=config.learning_rate)
    criterion = nn.MarginRankingLoss(config.margin, False).cuda()

    div = config.train_times // 5

    for epoch in range(config.train_times):
        i = 0
        for batch in range(config.nbatchs):
            batch_start = time.time()

            get_bat_start = time.time()
            golden_triples, negative_triples = config.get_batch(batch, epoch, phs, prs, pts, nhs, nrs, nts)
            get_bat_end = time.time()

            if DEBUG:
                print('get batch time: ' + str(get_bat_end - get_bat_start))

            zero_start = time.time()
            optimizer.zero_grad()
            zero_end = time.time()

            if DEBUG:
                print('zero grad time: ' + str(zero_end - zero_start))

            model_start = time.time()
            p_scores, n_scores = dynamicKGE(golden_triples, negative_triples)
            model_end = time.time()

            if DEBUG:
                print('model time: ' + str(model_end - model_start))

            y = torch.Tensor([-1]).cuda()

            loss_start = time.time()
            loss = criterion(p_scores, n_scores, y)
            loss_end = time.time()

            if DEBUG:
                print('loss time: ' + str(loss_end - loss_start))

            back_start = time.time()
            loss.backward()
            back_end = time.time()

            if DEBUG:
                print('back time: ' + str(back_end - back_start))

            step_start = time.time()
            optimizer.step()
            step_end = time.time()

            if DEBUG:
                print('step time: ' + str(step_end - step_start))

            print(loss.item())

            batch_end = time.time()

            if DEBUG:
                print('batch time: ' + str(batch_end - batch_start))

            if i % 100 == 0:
                print('----trained the ' + str(i) + ' batch----')
            i += config.batch_size

            torch.cuda.empty_cache()

        print('----------trained the ' + str(epoch) + ' epoch----------')
        if epoch % div == 0:
            dynamicKGE.save_parameters('parameters', epoch)

    print('train ending...')


main()
