import gensim
import numpy as np
import time
import datetime
import jieba
import re

from GLOBALPATH import Global

found = 0


class MatchAtc(object):
    '''
    文章相似度匹配工具
    '''
    def __init__(self):
        # 加载词向量库
        self.wv_from_text = gensim.models.KeyedVectors.load(Global.word2vec_bin_path, mmap='r')
        # self.wv_from_text = gensim.models.KeyedVectors.load_word2vec_format(Global.word2vec_txt_path, limit=900000, binary=False)

    def unify_txt(self, txt):
        '''
        标点统一
        :return:
        '''
        # 常用标点统一成英文
        txt = txt.replace('，', ',').replace('：', ':').replace('！', '!').replace('；', ';')
        # 各种引号统一成双引号
        txt = txt.replace('“', '"').replace('”', '"').replace('\'', '"').replace('‘', '"').replace('’', '"')
        # 括号统一成英文
        txt = txt.replace('（', '(').replace('）', ')')
        # 统一转小写
        txt = txt.lower()

        return txt

    def n_similarity(self, src_wd_list, match_wd_list, zhanweifu=0):
        '''
        多个词组成的list进行相似度匹配
        :param src_wd_list:
        :param match_wd_list:
        :param zhanweifu:
        :return:
        '''
        try:
            sim = self.wv_from_text.n_similarity(src_wd_list, match_wd_list)

            # -----------------
            # 分词匹配可能存在两个句子，他们的词一样，顺序不一样，但是相似度100%的情况，
            # 因此在相似度为100%的情况下，需要判断一下两个句子是否相等，
            # 如果不等，就手动降低他们的相似度
            if sim == 1.0 and ''.join(src_wd_list) != ''.join(match_wd_list):
                sim = sim - 0.005
            # -----------------
            fmt_sim = '{}%'.format(round(sim * 100, 2))
            # print(fmt_sim)
            return fmt_sim, sim
        except Exception as e:
            while True:
                zhanweifu += 1
                # print(e)
                match_num = re.search('"Key \'(.*)\' not present"', e.__str__())
                # print(match_num)
                if match_num:
                    exception_wd = match_num[1]
                    replace_wd = str(zhanweifu)
                    # print(exception_wd)

                    if exception_wd in src_wd_list:
                        src_wd_list.remove(exception_wd)
                        src_wd_list.append(replace_wd)
                    if exception_wd in match_wd_list:
                        match_wd_list.remove(exception_wd)
                        match_wd_list.append(replace_wd)
                    print(src_wd_list)
                    print(match_wd_list)
                    print('-'*40)
                    fmt_sim, sim = self.n_similarity(src_wd_list, match_wd_list, zhanweifu)
                    return fmt_sim, sim

    def match_sentence(self, src_txt, match_txt):
        '''
        单个句子匹配
        :param src_txt:
        :param match_txt:
        :return:
        '''
        src_txt = self.unify_txt(src_txt)
        match_txt = self.unify_txt(match_txt)

        # src_txt_vec = get_text_vec(src_txt, wv_from_text)
        # match_txt_vec = get_text_vec(match_txt, wv_from_text)
        src_wd_list = jieba.lcut(src_txt)
        match_wd_list = jieba.lcut(match_txt)
        print(src_wd_list)
        print(match_wd_list)
        print('-'*40)

        fmt_sim = self.n_similarity(src_wd_list, match_wd_list)
        # print(fmt_sim)
        return fmt_sim

    def match_sentence_list(self, src_txt, match_txt_list):
        '''
        一组句子匹配
        :param src_txt:
        :param match_txt_list:
        :return:
        '''
        fmt_sim_list = []
        for match_txt in match_txt_list:
            fmt_sim, sim =  self.match_sentence(src_txt, match_txt)
            match_item = {
                'txt': match_txt,
                'fmt_sim': fmt_sim,
                'sim': sim
            }
            fmt_sim_list.append(match_item)
        return fmt_sim_list

    def screening_match(self, fmt_sim_list):
        '''
        此方法完成两个任务：
        1.筛选出相似度大于99的文本
        2.列出5个相似度最高的文本
        :return:
        '''
        # 按照相似度降序排序
        fmt_sim_list.sort(key=lambda stu: stu["sim"], reverse=True)
        # 相似度最大的5条数据
        match_no10_list = fmt_sim_list[:10] if len(fmt_sim_list) >= 10 else fmt_sim_list[: len(fmt_sim_list)]
        # 相似度大于99%的数据
        match_per99_list = []
        for fmt_sim in fmt_sim_list:
            if fmt_sim['sim'] > 0.99:
                match_per99_list.append(fmt_sim)

        return match_per99_list, match_no10_list



if __name__ == '__main__':

    src_txt = "悬垂线夹XTS-2只"
    match_txt = "MPP管（内径）200*16(元/米）"
    match_txt_list = [
        '电缆附件10kv户内电缆头冷缩3 * 95户内（套)',
        '电缆附件10kv户外电缆头冷缩3*70户外（套)',
        '电缆附件10kv户外电缆头冷缩3*500-630户外（套)',
        '电缆附件35kv中间接头冷缩3*70（套)',
        '电缆附件1KV中间连接热缩JSY-1/5*150-240（套)'
        'MPP管（内径）200*16(元/米）',
        '悬垂线夹XTS-2只',
        '悬垂线夹XTS-100只',
        '悬垂线夹XTS-10只',
        '悬垂线夹XTS-1只',
        '悬垂线夹XTS-20只',
        '悬垂线夹XTS-2个',
        '悬垂线夹xts-2个',
        '线夹悬垂XTS2只',
        '线夹悬垂XTS-2只'
    ]

    ma = MatchAtc()

    fmt_sim_list = ma.match_sentence_list(src_txt, match_txt_list)
    match_per99_list, match_no5_list = ma.screening_match(fmt_sim_list)
    print("match_per99_list: {}".format(match_per99_list))
    print("match_no10_list: {}".format(match_no5_list))



