##语义匹配
# import torch
import torch

class Simer:

    def __init__(self, nluer1, nluer2):
        self.nluer1 = nluer1
        self.nluer2 = nluer2
        self.sim_by_sentence_vec = None
        self.sim_by_word_vec = None

    def cal_sim_all(self):
        self.sim_by_sentence_vec = self.cal_sim_by_sentence_vec
        self.sim_by_word_vec = self.cal_sim_by_word_vec

    def cal_sim_by_sentence_vec(self):
        return torch.cosine_similarity(torch.tensor(self.nluer1.sentence_vec, dtype=torch.double),
                                       torch.tensor(self.nluer2.sentence_vec, dtype=torch.double), dim=0).item()

    def cal_sim_by_word_vec(self):
        """
        把句子切词；用word2vec获取词向量，词向量求平均，归一化。获得句子向量。然后计算两个向量的余弦相似度
        :return:
        """
        word1 = [word.word_vec for word in self.nluer1.bigwords]
        word2 = [word.word_vec for word in self.nluer2.bigwords]
        # 使用 (x - mean) / (std) 进行归一化

        # 在分类、聚类算法中，需要使用距离来度量相似性的时候、或者使用PCA技术进行降维的时候，新的数据由于对方差进行了归一化，这时候每个维度的量纲其实已经等价了，
        # 每个维度都服从均值为0、方差1的正态分布，在计算距离的时候，每个维度都是去量纲化的，避免了不同量纲的选取对距离计算产生的巨大影响。
        sentence1 = torch.sum(torch.tensor(word1, dtype=torch.double), dim=0)
        mean1 = torch.mean(sentence1, dim=0)
        std1 = torch.std(sentence1, dim=0)
        sentence1 = (sentence1 - mean1) / (std1 + 1e-5)
        sentence2 = torch.sum(torch.tensor(word2, dtype=torch.double), dim=0)
        mean2 = torch.mean(sentence2, dim=0)
        std2 = torch.std(sentence2, dim=0)
        sentence2 = (sentence2 - mean2) / (std2 + 1e-5)
        return torch.cosine_similarity(sentence1, sentence2, dim=0).item()


if __name__ == '__main__':
    pass
    # nlu1 = NLUER('nlu1')
    # nlu2 = NLUER('nlu2')
    # nlu1.sentence_vec = [1, 3, 5, 3]
    # nlu2.sentence_vec = [1, 3, 5, 3]
    # big1 = Bigword('big1')
    # big1.word_vec = [1, 2, 3, 1, 1]
    # big2 = Bigword('big2')
    # big2.word_vec = [2, 2, 3, 2, 2]
    # nlu1.bigwords = [big1, big1]
    # nlu2.bigwords = [big1, big1]
    # sim = Simer(nlu1, nlu2)
    # # print(sim.cal_sim_by_sentence_vec())
    # print(sim.cal_sim_by_word_vec())
