import torch
import numpy as np
import tqdm
import pandas as pd
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import dataloader 
import Evaluation 
import os

def normlize(param):
	return torch.norm(param,p=2,keepdim=True)

class TransE(torch.nn.Module):
    def __init__(self,enlity_len,relation_len,margin,embedding_dim):
        super().__init__()
        self.enlity_len=enlity_len
        self.relation_len=relation_len
        self.margin=margin
        self.embedding_dim=embedding_dim
        self.enlity=nn.Embedding(self.enlity_len,embedding_dim)
        self.relation=nn.Embedding(self.relation_len,embedding_dim)
        
        #初始化Embeddeding
        nn.init.xavier_uniform_(self.enlity.weight)
        nn.init.xavier_uniform_(self.relation.weight)
        self.enlity.weight.data = F.normalize(self.enlity.weight.data, p=2, dim=1)
        self.relation.weight.data = F.normalize(self.relation.weight.data, p=2, dim=1)
        
    def model_normalize(self):
        self.enlity.weight.data=self.enlity.weight.data/torch.norm(self.enlity.weight.data,2,1,keepdim=True)
        
    def pair_score(self,pair):
        #将数据放入GPU中        
        pair=torch.tensor(pair).cuda()
        
        head=self.enlity(pair[:,0])
        tail=self.enlity(pair[:,1])
        relation=self.relation(pair[:,2])
        
        #计算评分
        score_num=self.score(head, tail, relation)       
        
        return torch.unsqueeze(score_num, 0)
       
        
    def score(self,head,tail,relation):
        #h+r-t评分
        return torch.norm((head+relation-tail),2,1)
    

    def forward(self,correct_tuple,corrupt_tuple):
        
        
        score_correct=self.pair_score(correct_tuple)
        score_corrupt=self.pair_score(corrupt_tuple)
        return score_correct,score_corrupt
    
    
class TransH(torch.nn.Module):
    def __init__(self,enlity_len,relation_len,margin,embedding_dim):
        super().__init__()
        self.enlity_len=enlity_len
        self.relation_len=relation_len
        self.margin=margin
        self.embedding_dim=embedding_dim
        self.enlity=nn.Embedding(self.enlity_len,embedding_dim)
        self.relation=nn.Embedding(self.relation_len,embedding_dim)
        self.relation_wr=nn.Embedding(self.relation_len,embedding_dim)
        
        #初始化Embeddeding
        nn.init.xavier_uniform_(self.enlity.weight)
        nn.init.xavier_uniform_(self.relation.weight)
        nn.init.xavier_uniform_(self.relation_wr.weight)
        self.enlity.weight.data = F.normalize(self.enlity.weight.data, p=2, dim=1)
        self.relation.weight.data = F.normalize(self.relation.weight.data, p=2, dim=1)
        self.relation_wr.weight.data = F.normalize(self.relation_wr.weight.data, p=2, dim=1)
        
    def model_normalize(self):
        self.enlity.weight.data=self.enlity.weight.data/torch.norm(self.enlity.weight.data,2,1,keepdim=True)
        
    def pair_score(self,pair):
        #将数据放入GPU中
        
        pair=torch.tensor(pair).cuda()
        #h+r-t评分
        head=self.enlity(pair[:,0])
        tail=self.enlity(pair[:,1])
        relation=self.relation(pair[:,2])
        relation_wr=self.relation_wr(pair[:,2])
        
         
        score_num=self.score(head, tail, relation, relation_wr)
 
        return score_num
     
    def projection(self,enlity,relation_wr):
        #wr的投影函数
        return enlity - torch.sum(enlity * relation_wr, dim=1, keepdim=True) * relation_wr
    
    def score(self,head,tail,relation,relation_wr):
        head=self.projection(head, relation_wr)
        tail=self.projection(tail, relation_wr)
        
        return torch.norm((head+relation-tail),2,1)
    
    def forward(self,correct_tuple,corrupt_tuple):
        score_correct=self.pair_score(correct_tuple)
        score_corrupt=self.pair_score(corrupt_tuple)
        
        return score_correct,score_corrupt
    
class TransR(torch.nn.Module):
    def __init__(self,enlity_len,relation_len,margin,k_dim,r_dim):
        super().__init__()
        self.enlity_len=enlity_len
        self.relation_len=relation_len
        self.margin=margin
        self.k_dim=k_dim
        self.r_dim=r_dim
        self.enlity=nn.Embedding(self.enlity_len,k_dim)
        self.relation=nn.Embedding(self.relation_len,r_dim)
        self.relation_wr=nn.Embedding(self.relation_len,k_dim*r_dim)
        
        #初始化Embeddeding
        nn.init.xavier_uniform_(self.enlity.weight)
        nn.init.xavier_uniform_(self.relation.weight)
        nn.init.xavier_uniform_(self.relation_wr.weight)
        self.enlity.weight.data = F.normalize(self.enlity.weight.data, p=2, dim=1)
        self.relation.weight.data = F.normalize(self.relation.weight.data, p=2, dim=1)
        #self.relation_wr.weight.data = F.normalize(self.relation_wr.weight.data, p=2, dim=1)
        
    def model_normalize(self):
        self.enlity.weight.data=self.enlity.weight.data/torch.norm(self.enlity.weight.data,2,1,keepdim=True)
             
        
    def pair_score(self,pair):
        #将数据放入GPU中
        
        pair=torch.tensor(pair).cuda()
        #h+r-t评分
        head=self.enlity(pair[:,0])
        tail=self.enlity(pair[:,1])
        relation=self.relation(pair[:,2])
        relation_wr=self.relation_wr(pair[:,2])
                 
        score_num=self.score(head, tail, relation, relation_wr)
 
        return score_num
     
    def projection(self,enlity,relation_wr):
        #Mr的投影函数
        
        enlity=enlity.view(-1,1,self.k_dim)
        relation_wr=relation_wr.view(-1,self.k_dim,self.r_dim)
        return torch.matmul(enlity,relation_wr).view(-1,self.r_dim)
    
    def score(self,head,tail,relation,relation_wr):
        head=self.projection(head, relation_wr)
        tail=self.projection(tail, relation_wr)
        
        return torch.norm((head+relation-tail),2,1)
    
    def forward(self,correct_tuple,corrupt_tuple):
        
        score_correct=self.pair_score(correct_tuple)
        score_corrupt=self.pair_score(corrupt_tuple)
        
        return score_correct,score_corrupt
    
class Hingleloss(nn.Module):
    def __init__(self):
        super(Hingleloss,self).__init__()
        
    def forward(self,correct_score,corrupt_score,margin=1):
        #print(correct_score.size())
        zero_tensor = torch.cuda.FloatTensor(correct_score.size())
        zero_tensor.zero_()
        zero_tensor = autograd.Variable(zero_tensor)
        
        #r+correct(h+r-t)-corrupt(h+r-t)
        return torch.sum(torch.max(correct_score-corrupt_score+margin,zero_tensor))
    
    
    def normloss(embedding,dim=1):
        norm = torch.sum(embedding ** 2, dim=dim, keepdim=True)
        return torch.sum(torch.max(norm - autograd.Variable(torch.cuda.FloatTensor([1.0])), autograd.Variable(torch.cuda.FloatTensor([0.0]))))
    
        


def train(model,dataLoad,Hingleloss,pairs,learning_rate,num_epoch,batch_size):
    optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate)
    loss_sum=torch.cuda.FloatTensor([0.0])
    model.enlity.weight.data=model.enlity.weight.data/torch.norm(model.enlity.weight.data,2,1,keepdim=True)
    for e in range(num_epoch): 
        loss_sum=0
        for correct_tuple, corrupt_tuple in  dataLoad.data_iter(pairs,batch_size):
            correct_score,corrput_score=model(correct_tuple, corrupt_tuple)           
            loss=Hingleloss(correct_score,corrput_score,1)
            model.zero_grad()
            loss.backward()
            optimizer.step()
            #model.batch_norm()
            loss_sum+=loss.data.item()
        print("Epoch {}, average loss:{}".format(e,(loss_sum/len(pairs))))
    torch.save(model,dataloader.model_save_path)
    
def test(model_path,test_pairs):
    
    test_part=Evaluation.Evaluation(model,test_pairs)
    hit10_enlity,mean_rank_enlity=test_part.test()
    print('Hit10: {} Mean_rank: {}'.format(hit10_enlity,mean_rank_enlity))
    
    
#def test(enlity_embbeding,relation_embbeding,test_pairs):
    
    


if __name__=='__main__' :   
    entity, relationShips, pairs = dataloader.get_pairs(dataloader.FB15K_BASE_PATH_train_PATH)
    test_enlity,test_relation,test_pairs=dataloader.get_pairs(dataloader.FB15K_BASE_PATH_1_1_PATH)
    
    model=TransE(len(entity),len(relationShips),1,200).cuda()
    #model=TransR(len(entity),len(relationShips),1,100,100).cuda()
    dataLoad = dataloader.DataLoad(entity, relationShips)
    test_data=dataloader.load_without_dic(test_pairs)
    #test_pairs=test_data_load.pairs2array(test_pairs)
    #test_pairs=test_data_load.get_index(test_pairs)
    Hingleloss=Evaluation.Hingleloss().cuda()
    torch.cuda.empty_cache()
    #train(model, dataLoad, Hingleloss,pairs,0.1,5,1000)
    saved_model=torch.load(dataloader.model_save_path)
    test(saved_model,test_data)
    
    	