import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import json
import os
import time
import numpy as np

from layers import GraphConvolution
import config

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

DEBUG = True


class DynamicKGE(nn.Module):
    def __init__(self, config):
        super(DynamicKGE, self).__init__()

        self.entity_emb = nn.Parameter(torch.Tensor(config.entity_total, config.dim))
        self.relation_emb = nn.Parameter(torch.Tensor(config.relation_total, config.dim))

        self._init_parameters()

    def _init_parameters(self):
        nn.init.xavier_uniform_(self.entity_emb.data)
        nn.init.xavier_uniform_(self.relation_emb.data)

    def _calc(self, h, t, r):
        return torch.norm(h + r - t, p=2, dim=1)

    def save_parameters(self, file_name, epoch):
        para2vec = {}
        lists = self.state_dict()
        for var_name in lists:
            para2vec[var_name] = lists[var_name].cpu().numpy().tolist()

        f = open('./res/transe_' + file_name + str(epoch), "w")
        f.write(json.dumps(para2vec))
        f.close()

    def forward(self, golden_triples, negative_triples):
        # multi golden and multi negative
        pos_h, pos_r, pos_t = golden_triples
        neg_h, neg_r, neg_t = negative_triples

        # target vector
        p_h = self.entity_emb[pos_h.cpu().numpy()]
        p_t = self.entity_emb[pos_t.cpu().numpy()]
        p_r = self.relation_emb[pos_r.cpu().numpy()]
        n_h = self.entity_emb[neg_h.cpu().numpy()]
        n_t = self.entity_emb[neg_t.cpu().numpy()]
        n_r = self.relation_emb[neg_r.cpu().numpy()]

        # score for loss
        p_score = self._calc(p_h, p_t, p_r)
        n_score = self._calc(n_h, n_t, n_r)

        return p_score, n_score


def main():
    print('preparing data...')
    phs, prs, pts, nhs, nrs, nts = config.prepare_data()
    print('preparing data complete')

    print('train starting...')

    dynamicKGE = DynamicKGE(config).cuda()
    optimizer = optim.SGD(dynamicKGE.parameters(), lr=config.learning_rate)
    optimizer = optim.Adam(dynamicKGE.parameters(), lr=config.learning_rate)
    criterion = nn.MarginRankingLoss(config.margin, False).cuda()

    div = config.train_times // 5

    for epoch in range(config.train_times):
        i = 0
        for batch in range(config.nbatchs):
            batch_start = time.time()

            get_bat_start = time.time()
            golden_triples, negative_triples = config.get_batch(batch, epoch, phs, prs, pts, nhs, nrs, nts)
            get_bat_end = time.time()

            if DEBUG:
                print('get batch time: ' + str(get_bat_end - get_bat_start))

            zero_start = time.time()
            optimizer.zero_grad()
            zero_end = time.time()

            if DEBUG:
                print('zero grad time: ' + str(zero_end - zero_start))

            model_start = time.time()
            p_scores, n_scores = dynamicKGE(golden_triples, negative_triples)
            model_end = time.time()

            if DEBUG:
                print('model time: ' + str(model_end - model_start))

            y = torch.Tensor([-1]).cuda()

            loss_start = time.time()
            loss = criterion(p_scores, n_scores, y)
            loss_end = time.time()

            if DEBUG:
                print('loss time: ' + str(loss_end - loss_start))

            back_start = time.time()
            loss.backward()
            back_end = time.time()

            if DEBUG:
                print('back time: ' + str(back_end - back_start))

            step_start = time.time()
            optimizer.step()
            step_end = time.time()

            if DEBUG:
                print('step time: ' + str(step_end - step_start))

            print(loss.item())

            batch_end = time.time()

            if DEBUG:
                print('batch time: ' + str(batch_end - batch_start))

            if i % 100 == 0:
                print('----trained the ' + str(i) + ' batch----')
            i += config.batch_size

            torch.cuda.empty_cache()

        print('----------trained the ' + str(epoch) + ' epoch----------')
        if epoch % div == 0:
            dynamicKGE.save_parameters('parameters', epoch)

    print('train ending...')


main()
