import os
import sys

import faiss
from bert_serving.client import BertClient
import numpy as np
from datetime import datetime
from sklearn.preprocessing import normalize


class EntityIdentifier:
    def __init__(self):
        try:
            t1 = datetime.now()
            self.bert_client = BertClient(ip='127.0.0.1', port=5555, port_out=5556)  # 创建客户端对象
            t2 = datetime.now()
            print('connect to BertServer consumed {0} seconds'.format((t2 - t1).seconds))
            # 注意：可以参考API，查看其它参数的设置
            # 127.0.0.1 表示本机IP，也可以用localhost
        except:
            raise Exception("cannot create BertClient")

    def bert_encode(self, entities):
        vectors = self.bert_client.encode(entities)
        # 如果长度不一致，终止程序
        if len(vectors) != len(entities):
            sys.exit(1)
        print('vectors.shape: ', vectors.shape)
        # 归一化
        vectors = normalize(vectors)

        return vectors

    '''构造faiss向量查询库,加速过滤'''

    def build_faissIP(self, dimension, vectors):
        index_ip = faiss.IndexFlatIP(dimension)
        index_ip.add(vectors)
        return index_ip

    def close_bert(self):
        self.bert_client.close()  # 关闭服务

    def cosine_similarity(self, u, v):
        """
        Cosine similarity reflects the degree of similariy between u and v
        Arguments:
            u -- a word vector of shape (n,)
            v -- a word vector of shape (n,)
        """
        distance = 0.0

        # Compute the dot product between u and v
        dot = np.dot(u, v.T)
        # Compute the L2 norm of u
        norm_u = np.sqrt(np.sum(u ** 2))
        # Compute the L2 norm of v
        norm_v = np.sqrt(np.sum(v ** 2))
        # Compute the cosine similarity defined by formula (1)
        cosine_similarity = dot / (norm_u * norm_v)
        return cosine_similarity

    def edit_distance(self, str1, str2):
        dp = [[0] * len(str1)] * len(str2)  # dp[i][j]表示表示A串从第0个字符开始到第i个字符和B串从第0个
        # 字符开始到第j个字符，这两个字串的编辑距离。字符串的下标从1开始。
        len1 = len(str1);
        len2 = len(str2);
        dp = np.array(dp)
        # print(dp)
        # print(len1, len2)
        # 初始化
        for i in range(len1):
            dp[0][i] = i;
        for j in range(len2):
            dp[j][0] = j;
        # print(dp)
        for i in range(1, len2):
            for j in range(1, len1):
                # print(i, j)
                if (str1[j] == str2[i]):
                    flag = 0
                else:
                    flag = 1
                #
                # print(dp)
                # print(flag)
                dp[i][j] = min(dp[i - 1][j] + 1, min(dp[i][j - 1] + 1, dp[i - 1][j - 1] + flag));
                # dp[i-1][j]+1表示删掉字符串str2最后一个字符str2[i]
                # dp[i][j-1]+1表示给字符串添加str1最后一个字符
                # dp[i-1][j-1]+flag表示改变,相同则不需操作次数,不同则需要,用flag记录

        # return dp, dp[len2 - 1][len1 - 1]
        if dp[len2 - 1][len1 - 1] == 0:
            return 0
        else:
            return 1 / dp[len2 - 1][len1 - 1]

    # # 核心方法（暴力破解法，耗时太久）
    # def simCal(self,word, entities):
    #     """
    #     计算词语和字典中的词的相似度
    #     相同字符的个数/min(|A|,|B|)   +  余弦相似度
    #     :param word: 待匹配实体
    #     :param entities:List
    #     :param entities_vec:List of embedding entities
    #     :return:
    #     """
    #     a = len(word)
    #     scores = []
    #
    #     for entity, vec in entities.items():
    #         sim_num = 0
    #         b = len(entity)
    #         c = len(set(entity + word))
    #         temp = []
    #         for w in word:
    #             if w in entity:
    #                 sim_num += 1
    #         # if sim_num != 0:
    #         score1 = sim_num / c  # overlap score
    #         temp.append(score1)
    #         try:
    #             score2 = self.cosine_similarity(self.bert_client.encode([word]), vec)  # 余弦相似度分数
    #             temp.append(score2)
    #         except:
    #             pass
    #         score3 = 1 - self.edit_distance(word, entity) / (a + b)  # 编辑距离分数
    #         if score3:
    #             temp.append(score3)
    #
    #         score = sum(temp) / len(temp)
    #         if score >= 0.7:
    #             scores.append((entity,score))
    #
    #     scores.sort(key=lambda k: k[1], reverse=True)
    #     return [item[0] for item in scores]

    # 核心方法（使用Faiss实现向量匹配加速）
    def simCal(self, word, IndexFlatIP, topK, entities, threshold=0.65, overlap=0.5):
        """
        计算词语和字典中的词的相似度
        先根据余弦相似度和Faiss建立的索引结构进行匹配，
        取出topK = 10个结果，再分别进行字重叠率和DP查询距离的计算进行重排
        :param word: 待匹配实体
        :param entities:List
        :param entities_vec:List of embedding entities
        :return:
        """
        threshold = float(threshold)
        answer = normalize(self.bert_client.encode([word]))

        # D是分数，形如[[409.0104  404.91092 ]]
        # I是索引下标（默认从0开始），形如[[0 2 3 4 1 5]]
        D, I = IndexFlatIP.search(answer, topK)

        a = len(word)
        entity_scores = []

        for sim_score, index in zip(D[0], I[0]):
            entity = entities[index]
            # 字重叠率计算
            sim_num = 0
            b = len(entity) # 估计实体的长度
            c = len(set(entity + word))
            temp = []
            for w in word:
                if w in entity:
                    sim_num += 1
            # if sim_num != 0:
            score1 = sim_num / c  # overlap score
            temp.append(score1)
            # 相似度分数
            score2 = sim_score
            temp.append(score2)
            # DP距离
            score3 = 1 - self.edit_distance(word, entity) / (a + b)  # 编辑距离分数
            if score3:
                temp.append(score3)
            score = sum(temp) / len(temp)


            if score >= threshold and sim_num/b > overlap:
                entity_scores.append((entity, score))

        entity_scores.sort(key=lambda k: k[1], reverse=True)
        return entity_scores

#
# if __name__ == '__main__':
#     # 暴力匹配模式
#     # targets = ['蓝鼓膜与胆固醇肉芽肿', '耳鼓膜穿孔', '脑积水','脑膜炎','牙齿感觉过敏症','脑积水']
#     # identifier = EntityIdentifier()
#     # temp = identifier.bert_client.encode(targets)
#     # dictionary = {}
#     # for i in range(len(targets)):
#     #     dictionary[targets[i]] = temp[i]
#     # while 1:
#     #     question = input("用户：") # question为单词
#     #     print(identifier.simCal(question,dictionary)) # [('脑瘫', array([0.81486887], dtype=float32))]
#     #     # print(simCal(question,targets,temp)[0][0])
#
#     identifier = EntityIdentifier()
#     # faiss匹配模式
#     choice = input('请选择匹配库：小集合（输入0）；完整领域词汇（输入1）：')
#     if int(choice) == 0:
#         targets = ['蓝鼓膜与胆固醇肉芽肿', '耳鼓膜穿孔', '脑积水', '脑膜炎', '牙齿感觉过敏症', '脑瘫', '睥翻粘睑', '皮脂腺痣', '眼睑皮脂腺癌', '先天性喉喘鸣', '锁合',
#                    '酸菜包', '白菜包', '干咸菜包子', '蘑菇肉丁包', '韭菜包子', '甲型肝炎抗体检测（抗-HAV）', '黄酒']
#         temp = identifier.bert_encode(targets)
#     else:
#         cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
#         region_path = os.path.join(cur_dir, 'dict/region.txt')
#         targets = [i.strip() for i in open(region_path, encoding='UTF-8') if i.strip()]
#         # time_begin = datetime.now()
#         # temp = identifier.bert_encode(targets)
#         # time_end = datetime.now()
#         # print('region vectors constructed...and consumed {0} minutes'.format(
#         #     round((time_end - time_begin).seconds / 60, 2)))
#         # # 存入文件
#         # np.savetxt('region_vectors', temp)
#
#         time_begin = datetime.now()
#         # 读取文件
#         temp = np.loadtxt('../../region_vectors').astype(np.float32)
#         time_end = datetime.now()
#         print('region vectors constructed...and consumed {0} minutes'.format(
#             round((time_end - time_begin).seconds / 60, 2)))
#
#     # 将生成的向量存入文件
#     # np.savetxt('test_vector', temp)
#     # a = np.loadtxt('test_vector').astype(np.float32)
#     # print(type(a))
#     # print(type(temp))
#     # print(np.array_equal(temp,a))
#     # print(a.shape[1])
#     # faissIP = identifier.build_faissIP(a.shape[1], a)
#     faissIP = identifier.build_faissIP(temp.shape[1], temp)
#     print('faissIP finished...')
#     while 1:
#         word = input("用户：")  # question为单词
#         topK = int(input("候选实体数目："))
#         threshold = input("设置阈值：")
#         print(
#             identifier.simCal(word, faissIP, topK, targets, threshold))  # [('脑瘫', array([0.81486887], dtype=float32))]

import pyan
from IPython.display import HTML
HTML(pyan.create_callgraph(filenames=r"C:/CIKE/diploma project/Git/Back/kg-search-backend/QASystemOnMedicalKG/entity_extractor.py", format="svg"))