import copy
import random
import time

import numpy as np

entitySet = set()  # 保存实体的编号，利用了集合的互异性
relationSet = set()
tripleList = []
num = 0

def downloader(fileDir):
    """
        该数据集的数据没有编号，与本人仓库Trans系列不同，该仓库的数据每个txt都是三元组集合
        分别将实体、关系、三元组分成三个对应的集合
    """
    filename = fileDir + "//train.txt" # 数据内容：头实体 --- 关系 --- 尾实体
    with open(filename) as f: # train.txt中的数据
        tripleLines = f.readlines()

    for tripleLine in tripleLines:
        triple = tripleLine.strip().split('\t')
        entitySet.add(triple[0])
        entitySet.add(triple[2])
        relationSet.add(triple[1])
        tripleList.append([triple[0], triple[1], triple[2]])

    return entitySet, relationSet, tripleList

class ComplEx:
    def __init__(self, entitySet, relationSet, tripleList, embeddingDim=50, lr=0.01, regularizationFactor=0.01, negativeRatio=2, η=2):
        self.embeddingDim = embeddingDim
        self.entities = entitySet
        self.relations = relationSet
        self.tripleList = tripleList
        self.learningRate = lr
        self.regularizationFactor = regularizationFactor
        self.negativeRatio = negativeRatio
        self.loss = 0.0
        self.η = η

    def embInitialize(self):

        # 每个关系和实体分别有两个嵌入，一个实部，一个虚部
        # 让每个关系和实体都构建成字典
        relationDict = {}
        entityDict = {}
        for rel in self.relations:
            relationDict[rel] = [] # 列表，第零列是实部嵌入，第一列是虚部嵌入
            # 正态随机张量生成
            relationDict[rel].append(np.random.randn(len(self.relations),self.embeddingDim).astype('f')) # 关系实部
            relationDict[rel].append(np.random.randn(len(self.relations), self.embeddingDim).astype('f')) # 关系虚部

        for ent in self.entities:
            entityDict[ent] = []
            entityDict[ent].append(np.random.randn(len(self.entities),self.embeddingDim).astype('f')) # 实体实部
            entityDict[ent].append(np.random.randn(len(self.entities),self.embeddingDim).astype('f')) # 实体虚部

        self.entities = entityDict
        self.relations = relationDict

    def sample(self, tripleList, batchSize):
        """
        要从triplList抽取
        :param tripleList: 三元组列表
        :param batchSize: 一个batch中的数量
        :return Tbatch:（真三元组，假三元组）
        """
        # 随机抽取出 batchSize 个数据
        # Sbatch : list
        Sbatch = random.sample(tripleList, batchSize) # 正样本
        """
        一个正样本可以构造出η个负样本
        论文里面负采样的地方，我没怎么看懂，这里先用transE的代码
        """
        Tbatch = []

        num = num + 1

        for triple in Sbatch:
            Scorrupted = copy.deepcopy(triple)
            for i in range(self.η):
                pr = np.random.random(1)[0]  # 为每个triple 随机取一个0-1之间的数，因为取出的数是列表，所以要取第0位的数据
                if pr < 0.5:
                    # 改变头实体
                    Scorrupted[0] = random.sample(self.entities.keys(), 1)[0]  # 从现有实体中采样一个实体
                    while Scorrupted[0] == triple[0]:  # 如果采样获得的头实体，和原来三元组的头实体一样,则再次采样
                        Scorrupted[0] = random.sample(self.entities.keys(), 1)[0]
                else:
                    # 改变尾实体
                    Scorrupted[1] = random.sample(self.entities.keys(), 1)[0]
                    while Scorrupted[1] == triple[1]:
                        Scorrupted[1] = random.sample(self.entities.keys(), 1)[0]  # 理论同上
            if (triple, Scorrupted) not in Tbatch:
                Tbatch.append((triple, Scorrupted))  # 不能用set，list无法hash
        return Tbatch

    def updateTripleEmbedding(self,Tbatch):
        copyEntity = copy.deepcopy(self.entities)  # 所有实体集合
        copyRelation = copy.deepcopy(self.relations)  # 所有关系集合


        for correctTriple, corruptedTriple in Tbatch:
            # 真实的三元组
            updatedCorrectHead = copyEntity[correctTriple[0]]  # 三元组的头实体的（初始化）向量 copyEntity类型是个dict，correctTriple三元组：实体，实体，关系
            updatedCorrectTail = copyEntity[correctTriple[1]]  # 三元组的尾实体的（初始化）向量
            updatedRelation = copyRelation[correctTriple[2]]


            # 真实的三元组
            correctHead = self.entities[correctTriple[0]]
            correctTail = self.entities[correctTriple[1]]
            relation = self.relations[correctTriple[2]]

            # 构造的假三元组
            updatedCorruptedHead = copyEntity[corruptedTriple[0]]  # 用于梯度更新
            updatedCorruptedTail = copyEntity[corruptedTriple[1]]

            # 构造的假的三元组
            corruptedHead = self.entities[corruptedTriple[0]]
            corruptedTail = self.entities[corruptedTriple[1]]

            # 评分函数：论文中的等式11
            correctScore = np.sum(relation[0] * correctHead[0] * correctTail[0] + relation[0] * correctHead[1] * correctTail[1] + relation[1] * correctHead[0] * correctTail[1] - relation[1] * correctHead[1] * correctTail[0])
            corruptedScore = np.sum(relation[0] * corruptedHead[0] * corruptedTail[0] + relation[0] * corruptedHead[1] * corruptedTail[1] + relation[1] * corruptedHead[0] * corruptedTail[1] - relation[1] * corruptedHead[1] * corruptedTail[0])

            # sigmod函数：要让p趋近于1
            correctP = 1 / (1 + np.exp(-correctScore))
            print(correctP)
            corruptedP = 1 / (1 + np.exp(-corruptedScore))
            print(corruptedP)
            """
            优化是选用AdaGrad：
                在时间步0，AdaGrad将s0中的每个元素初始化为0
                在时间步长t，首先将小批量随机梯度gt按元素平方后累加到变量st：
            """
            """
            按照论文里面算概率的话，那就不能将真三元组，构造假的三元组的意义是什么呢？
            没看懂公式12，之后我会补上来
            """

        self.entities = copyEntity
        self.relations = copyRelation

    def train(self,maxIter=400, batchSize= 400):
        """

        :param maxIter: 最大迭代轮次
        :param batchSize:
        :return:
        """
        Nbatch = int(len(self.tripleList) / batchSize)  # batchSize :  <class 'float'>
        print("batch size : ", batchSize)
        for epoch in range(maxIter):
            # 用来判断每个epoch所花费时间的起始时间
            start = time.time()
            self.loss = 0
            num = 0

            for i in range(Nbatch):
                # 数据采样
                Tbatch = self.sample(self.tripleList,batchSize)
                # 梯度优化
                self.updateTripleEmbedding(Tbatch)
                self.loss = self.loss / num

            end = time.time()
            print("epoch : ", epoch,
                  "cost time : %s" % (
                      round((end - start), 3)))  # round() 方法返回浮点数 x 的四舍五入值，准确的说保留值将保留到离上一位更近的一端（四舍六入）。
            print("running loss : ", self.loss)


if __name__ == '__main__':
    file_base = 'datasets/fb15k'
    entitySet, relationSet, tripleList = downloader(file_base)
    print("load file ...")

    # Complete load. entity : 14951 , relation_set : 1345 , triple : 483142
    print("Complete load. entity : %d , relation_set : %d , triple : %d" % (
    len(entitySet), len(relationSet), len(tripleList)))

    """
    参数：λ，K，α0，η 在 {1,2，5,10，20,50，100，200} 中变化。
    η：每个正样本产生假样本的数量 
    maxIter 在我理解就是 epochs 轮次
    """
    ComplEx = ComplEx(entitySet, relationSet, tripleList, embeddingDim=50, lr=0.01, regularizationFactor=0.01, η=2)
    ComplEx.embInitialize()
    ComplEx.train(maxIter=400, batchSize= 400)