#!/usr/bin/env python3
# coding: utf-8
# File: question_classifier.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-4

import os
import unittest

import ahocorasick
import operator
from entity_extractor import *
from entity_identifier import *
from datetime import datetime
import paddlehub as hub


class QuestionClassifier:
    def __init__(self):
        cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
        # 　特征词路径
        self.disease_path = os.path.join(cur_dir, 'dict/disease.txt')
        self.department_path = os.path.join(cur_dir, 'dict/department.txt')
        self.check_path = os.path.join(cur_dir, 'dict/check.txt')
        self.drug_path = os.path.join(cur_dir, 'dict/drug.txt')
        self.food_path = os.path.join(cur_dir, 'dict/food.txt')
        self.producer_path = os.path.join(cur_dir, 'dict/producer.txt')
        self.symptom_path = os.path.join(cur_dir, 'dict/symptom.txt')
        self.deny_path = os.path.join(cur_dir, 'dict/deny.txt')
        self.region_path = os.path.join(cur_dir, 'dict/region.txt')
        # 加载特征词
        self.disease_wds = [i.strip() for i in open(self.disease_path, encoding='UTF-8') if i.strip()]
        self.department_wds = [i.strip() for i in open(self.department_path, encoding='UTF-8') if i.strip()]
        self.check_wds = [i.strip() for i in open(self.check_path, encoding='UTF-8') if i.strip()]
        self.drug_wds = [i.strip() for i in open(self.drug_path, encoding='UTF-8') if i.strip()]
        self.food_wds = [i.strip() for i in open(self.food_path, encoding='UTF-8') if i.strip()]
        self.producer_wds = [i.strip() for i in open(self.producer_path, encoding='UTF-8') if i.strip()]
        self.symptom_wds = [i.strip() for i in open(self.symptom_path, encoding='UTF-8') if i.strip()]
        self.region_words = [i.strip() for i in open(self.region_path, encoding='UTF-8') if i.strip()]
        self.deny_words = [i.strip() for i in open(self.deny_path, encoding='UTF-8') if i.strip()]
        # print("领域词汇长度：",len(self.region_words))  领域词汇长度： 43430
        # 构造领域actree
        # self.region_tree = self.build_actree(list(self.region_words))

        # 构建词典（实体名 对应 实体类别）
        self.wdtype_dict = self.build_wdtype_dict()
        # 问句疑问词
        self.desc_qwds = ['描述', '什么是', '讲下', '说下', '介绍', '说下']
        self.symptom_qwds = ['症状', '表征', '现象', '症候', '表现']
        self.cause_qwds = ['原因', '病因', '成因', '为什么', '怎么会', '怎样才', '咋样才', '怎样会', '如何会', '为啥', '为何', '如何才会', '怎么才会',
                           '会导致',
                           '会造成', '会引起']
        self.acompany_qwds = ['并发症', '并发', '一起发生', '一并发生', '一起出现', '一并出现', '一起产生', '一同产生', '一并产生', '一同发生', '一同出现',
                              '伴随发生', '伴随', '共现', '共发', '一并']
        self.food_qwds = ['饮食', '饮用', '吃', '食', '伙食', '膳食', '喝', '忌口', '补品', '保健品', '食谱', '菜谱', '食用', '食物', '补品',
                          '食品']
        self.drug_qwds = ['药', '药品', '用药', '胶囊', '口服液', '炎片']
        self.prevent_qwds = ['预防', '防范', '抵制', '抵御', '防止', '躲避', '逃避', '避开', '免得', '逃开', '避开', '避掉', '躲开', '躲掉', '绕开',
                             '怎样才能不', '怎么才能不', '咋样才能不', '咋才能不', '如何才能不',
                             '怎样才不', '怎么才不', '咋样才不', '咋才不', '如何才不',
                             '怎样才可以不', '怎么才可以不', '咋样才可以不', '咋才可以不', '如何可以不',
                             '怎样才可不', '怎么才可不', '咋样才可不', '咋才可不', '如何可不', '不', '可以少', '能少', '能够少', '减少', '降低', '小心', '杜绝']
        self.lasttime_qwds = ['周期', '多久', '多长时间', '多少时间', '几天', '几年', '多少天', '多少小时', '几个小时', '多少年']
        self.cureway_qwds = ['怎么治疗', '如何医', '怎么医', '怎么治', '咋治', '怎样治', '怎样医', '咋医治', '如何治', '疗法', '怎么办',
                             '咋办', '方式', '疗方法', '治方法', '方案']
        self.cureprob_qwds = ['希望大', '几率', '概率', '几成', '比例', '可能性', '能治', '可治', '可以治', '可以医',
                              '能够医', '能医', '医好', '治好', '治愈', '康复', '痊愈']
        self.easyget_qwds = ['易感人群', '容易感染', '易得', '易患', '易发人群', '啥人', '易感', '什么人', '哪些人', '哪类人', '什么病人', '什么患者', '感染',
                             '染上', '得上', '目标']
        self.check_qwds = ['检查', '检测', '检查项目', '查出', '测出', '试出', '检出', '确诊', '诊断', '查', '测', '检']
        self.belong_qwds = ['属于什么科', '属于', '什么科', '啥科', '哪个科', '科室', '哪里看', '挂什么', '挂号', '挂什么号', '挂啥', '主治科', '哪看']
        self.cure_qwds = ['治疗什么', '治啥', '治疗啥', '医治啥', '治愈啥', '主治啥', '主治什么', '治什么', '有什么用', '有何用', '用处', '用途',
                          '有什么好处', '有什么益处', '有何益处', '好处', '益处', '用来', '用来做啥', '用来作甚', '需要', '要', '适合什么人吃', '缓解', '舒缓',
                          '有益', '治疗药', '治疗的药', '有用', '可以吃', '可以用', '能吃', '能够吃', '能用', '能够用']
        self.department_qwds = ['看什么病', '看病', '看的病', '看的疾病', '看啥病', '治什么病', '治的病', '治的疾病', '治疗的疾病', '治疗的病', '治什么疾病',
                                '治疗什么疾病',
                                '主治的疾病']
        self.same_qwds = ['共同', '相同', '一样']
        self.producer_qwds = ['在售', '销售', '出售', '市面', '市场', '生产', '厂商', '厂家', '产品', '公司', '药店', '品牌', '牌子', '在卖']

        self.qwds_set = set(
            self.symptom_qwds + self.cause_qwds + self.acompany_qwds + self.food_qwds + self.drug_qwds + self.prevent_qwds +
            self.lasttime_qwds + self.cureway_qwds + self.cureprob_qwds + self.easyget_qwds + self.check_qwds + self.belong_qwds +
            self.cure_qwds + self.department_qwds + self.same_qwds + self.producer_qwds)

        # 加载相似度匹配模块
        self.extractor = EntityExtractor()
        self.identifier = EntityIdentifier()
        '''
            这个是各邻域词汇单独扔到Bert中的步骤（用于测试）
        '''
        # time_begin = datetime.now()
        # disease_vectors = self.identifier.bert_encode(self.disease_wds)
        # time_end = datetime.now()
        # print('disease vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))
        # time_begin = datetime.now()
        # symptom_vectors = self.identifier.bert_encode(self.symptom_wds)
        # time_end = datetime.now()
        # print('symptom vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))
        # time_begin = datetime.now()
        # check_vectors = self.identifier.bert_encode(self.check_wds)
        # time_end = datetime.now()
        # print('check vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))
        # time_begin = datetime.now()
        # food_vectors = self.identifier.bert_encode(self.food_wds)
        # time_end = datetime.now()
        # print('food vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))
        # time_begin = datetime.now()
        # drug_vectors = self.identifier.bert_encode(self.drug_wds)
        # time_end = datetime.now()
        # print('drug vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))
        # time_begin = datetime.now()
        # department_vectors = self.identifier.bert_encode(self.department_wds)
        # time_end = datetime.now()
        # print('department vectors constructed...and consumed {0} minutes'.format(
        #     (time_end - time_begin).seconds / 60))

        # 加载faiss模块,用于向量加速匹配

        '''
            这个是所有的领域词汇不分类别全部扔到Bert中
        '''
        time_begin = datetime.now()
        # 方式1：在线生成
        # region_vectors = self.identifier.bert_encode(list(self.region_words))
        # 方式2：读取文件
        region_vectors = np.loadtxt('../../region_vectors').astype(np.float32)
        time_end = datetime.now()
        print('region vectors constructed...and consumed {0} minutes'.format(
            (time_end - time_begin).seconds / 60))

        self.region_faissIP = self.identifier.build_faissIP(region_vectors.shape[1], region_vectors)

        # ernie tiny关系分类模型加载
        label_map = {0: 'is_symptom', 1: 'is_food', 2: 'is_belonging', 3: 'is_need_check', 4: 'is_recommend_drug',
                     5: 'intersection_symptom',
                     6: 'intersection_disease', 7: 'compare_disease_cured_prob', 8: 'compare_disease_lasttime',
                     9: 'other'}
        self.ernie = hub.Module(
            name='ernie_tiny',
            version='2.0.1',
            task='seq-cls',
            load_checkpoint='../../model/relation_identification/best_model/ten_classification/model.pdparams',
            label_map=label_map
        )

        print('model init finished ......')
        return

    '''实体链接、关系分类主函数'''

    def classify(self, question):
        data = {}
        '''
            下面这两行是字符串硬匹配，（硬匹配-》没有匹配结果-》向量匹配），当既有模糊词又有精准词的时候是存在不足的
            因此尝试将硬匹配去掉，直接执行向量匹配
        '''
        medical_dict = {}
        # medical_dict = self.check_medical(question)
        if not medical_dict:
            # 硬字符串匹配没有匹配到实体
            # 进行相似度计算
            time_find_sim_begin = datetime.now()
            medical_dict = self.find_sim_words(question)
            time_find_sim_end = datetime.now()
            print(
                'Query by similarity takes {0} seconds'.format((time_find_sim_end - time_find_sim_begin).seconds))
            if not medical_dict:
                return {}
        data['args'] = medical_dict
        # 收集问句当中所涉及到的实体类型
        types = []
        for type_ in medical_dict.values():
            types += type_
        question_type = 'others'

        question_types = []

        '''
            已知头实体、尾实体，查询是否存在关系
        '''
        # 规则匹配
        # # is_symptom
        # if len(data['args']) == 2 and ('disease' in types) and ('symptom' in types):
        #     # 再次检验，避免 {'args': {'脑膜炎': ['disease', 'symptom'], '神经内科': ['department']}的情况出现
        #     is_symptom = True
        #     for (key, value) in data['args'].items():
        #         if ('disease' or 'symptom') not in value:
        #             is_symptom = False
        #             break
        #     if is_symptom:
        #         question_type = 'is_symptom'
        #         question_types.append(question_type)
        # # is_belonging
        # if len(data['args']) == 2 and ('disease' in types) and ('department' in types):
        #     question_type = 'is_belonging'
        #     question_types.append(question_type)
        # # is_accompany_with
        # if len(data['args']) == 2 and types.count('disease') == 2:
        #     question_type = 'is_accompany_with'
        #     question_types.append(question_type)
        # # is_good_food / is_bad_food
        # if len(data['args']) == 2 and ('disease' in types) and ('food' in types):
        #     question_type = 'is_food'
        #     question_types.append(question_type)
        # # is_need_check
        # if len(data['args']) == 2 and ('disease' in types) and ('check' in types):
        #     question_type = 'is_need_check'
        #     question_types.append(question_type)
        # # is_recommend_drug
        # if len(data['args']) == 2 and ('disease' in types) and ('drug' in types):
        #     question_type = 'is_recommend_drug'
        #     question_types.append(question_type)

        # ernie-tiny模型对“实体间是否存在关系”进行分类
        # predict函数返回结果形式：
        # (['is_recommend_drug'], [[0.00017079284589271992, 0.00545316468924284, 0.0005740893539041281, 0.0004106362466700375, 0.993133544921875, 0.00025779433781281114]])
        results, probs = self.ernie.predict([[question]], max_seq_len=50, batch_size=1, use_gpu=False, return_prob=True)
        result = results[0]
        # 规则辅助检测
        if len(data['args']) == 1:
            # 为简单问句
            print('只有一个实体')
            result = 'other'
        elif result != 'other':
            # 利用规则模板进一步判断是'is_symptom'还是'is_accompany_with'
            if result == 'is_symptom':
                if types.count('disease') == 2:
                    if len(types) > 2:
                        question_types.append(result)
                    result = 'is_accompany_with'
            question_types.append(result)

        '''
            简单问句
        '''
        if result == 'other':
            # 症状
            if self.check_words(self.symptom_qwds, question) and ('disease' in types):
                question_type = 'disease_symptom'
                question_types.append(question_type)

            if self.check_words(self.symptom_qwds, question) and ('symptom' in types):
                if '同时' in question or ('既' in question and '又' in question):
                    # 这个判断主要是为了弥补模型在这两类上识别能力的不足
                    question_type = 'intersection_disease'
                else:
                    question_type = 'symptom_disease'
                question_types.append(question_type)

            # 原因
            if self.check_words(self.cause_qwds, question) and ('disease' in types):
                question_type = 'disease_cause'
                question_types.append(question_type)
            # 并发症
            if self.check_words(self.acompany_qwds, question) and ('disease' in types):
                question_type = 'disease_acompany'
                question_types.append(question_type)

            # 推荐药品
            if self.check_words(self.drug_qwds, question) and 'disease' in types:
                question_type = 'disease_drug'
                question_types.append(question_type)

            # 药品治啥病
            if self.check_words(self.cure_qwds, question) and 'drug' in types:
                question_type = 'drug_disease'
                question_types.append(question_type)

            # 推荐食品
            if ('disease_drug' not in question_types) and self.check_words(self.food_qwds,
                                                                           question) and 'disease' in types:
                deny_status = self.check_words(self.deny_words, question)
                if deny_status:
                    question_type = 'disease_not_food'
                else:
                    question_type = 'disease_do_food'
                question_types.append(question_type)

            # 已知食物找疾病
            if ('drug_disease' not in question_types) and self.check_words(
                    self.food_qwds + self.cure_qwds + ['有害', '有坏', '加重', '严重',],
                    question) and 'food' in types or types == ['food']:
                deny_status = self.check_words(self.deny_words, question)
                if deny_status:
                    question_type = 'food_not_disease'
                else:
                    question_type = 'food_do_disease'
                question_types.append(question_type)

            # 疾病接受检查项目
            if self.check_words(self.check_qwds, question) and 'disease' in types:
                question_type = 'disease_check'
                question_types.append(question_type)

            # 已知检查项目查相应疾病
            if self.check_words(self.check_qwds + self.cure_qwds, question) and 'check' in types:
                question_type = 'check_disease'
                question_types.append(question_type)

            # 　症状防御
            if self.check_words(self.prevent_qwds, question) and 'disease' in types:
                question_type = 'disease_prevent'
                question_types.append(question_type)

            # 疾病医疗周期
            if self.check_words(self.lasttime_qwds, question) and 'disease' in types:
                question_type = 'disease_lasttime'
                question_types.append(question_type)

            # 疾病治疗方式
            if self.check_words(self.cureway_qwds, question) and 'disease' in types:
                question_type = 'disease_cureway'
                question_types.append(question_type)

            # 疾病治愈可能性
            if self.check_words(self.cureprob_qwds, question) and 'disease' in types:
                question_type = 'disease_cureprob'
                question_types.append(question_type)

            # 疾病易感染人群
            if self.check_words(self.easyget_qwds, question) and 'disease' in types:
                question_type = 'disease_easyget'
                question_types.append(question_type)

            # 疾病所属部门/科室
            if self.check_words(self.belong_qwds, question) and 'disease' in types:
                question_type = 'disease_department'
                question_types.append(question_type)

            # 部门/科室所负责的疾病
            if self.check_words(self.department_qwds + self.cure_qwds, question) and 'department' in types:
                question_type = 'department_disease'
                question_types.append(question_type)

            # 药品生产厂商/在售药品
            if self.check_words(self.producer_qwds, question) and 'drug' in types:
                question_type = 'drug_producer'
                question_types.append(question_type)

        # 若没有查到相关的外部查询信息，那么则将该疾病的描述信息返回
        if (question_types == [] or (self.check_words(self.desc_qwds, question) and not self.check_words(self.qwds_set,
                                                                                                         question))) and 'disease' in types:
            question_types = ['disease_desc']

        # 若没有查到相关的外部查询信息，那么则将该疾病的描述信息返回
        if question_types == [] and 'symptom' in types:
            question_types = ['symptom_disease']

        # 将多个分类结果进行合并处理，组装成一个字典
        print('types:', types)
        data['question_types'] = question_types

        return data

    '''构造词对应的类型
        实体名对应实体类别'''

    def build_wdtype_dict(self):
        wd_dict = dict()
        for wd in self.region_words:
            wd_dict[wd] = []
            if wd in self.disease_wds:
                wd_dict[wd].append('disease')
            if wd in self.department_wds:
                wd_dict[wd].append('department')
            if wd in self.check_wds:
                wd_dict[wd].append('check')
            if wd in self.drug_wds:
                wd_dict[wd].append('drug')
            if wd in self.food_wds:
                wd_dict[wd].append('food')
            if wd in self.symptom_wds:
                wd_dict[wd].append('symptom')
            if wd in self.producer_wds:
                wd_dict[wd].append('producer')
        return wd_dict

    '''构造actree，加速过滤
        算法使用解析：https://zhuanlan.zhihu.com/p/158767004'''

    def build_actree(self, wordlist):
        actree = ahocorasick.Automaton()
        for index, word in enumerate(wordlist):
            actree.add_word(word, (index, word))
        actree.make_automaton()
        return actree

    '''问句过滤'''

    def check_medical(self, question):
        region_wds = []
        for i in self.region_tree.iter(question):  # .iter() 子串匹配 i:[end_index, [insert_order, original_value]]
            wd = i[1][1]
            region_wds.append(wd)
        stop_wds = []
        # 两个for循环主要是为了判断region_wds中是否存在
        # 一个元素是另一个元素的真子集，存在的话就删除被包含的元素，
        # 避免重复
        for wd1 in region_wds:
            for wd2 in region_wds:
                if wd1 in wd2 and wd1 != wd2:
                    stop_wds.append(wd1)
        final_wds = [i for i in region_wds if i not in stop_wds]  # 获取问句里在领域词表中但不在停用词表中的词
        # 当问句中的实体存在歧义时无法通过字符串硬匹配的方式匹配上，此时final_wds = []，之后通过计算相似度的方式解决歧义

        final_dict = {i: self.wdtype_dict.get(i) for i in final_wds}  # 获取final_wds中的词类别

        return final_dict

    '''相似度计算函数'''

    def find_sim_words(self, question):
        # 分词
        cut_words = self.extractor.participle(question, list(self.qwds_set))
        # cut_words = list(set(cut_words) - self.qwds_set)  # 过滤一些用作关系识别的问句提问词
        final_dict = {}
        for word in cut_words:
            topK = 20
            candidates = self.identifier.simCal(word, self.region_faissIP, topK, list(self.region_words))
            print('--- 生成候选实体 ---')
            # 如果存在相似实体且相似实体和之前匹配上的相似实体不重合
            if len(candidates) >= 1:
                if candidates[0][0] not in final_dict.keys():
                    final_dict[candidates[0][0]] = self.wdtype_dict.get(candidates[0][0])
                # else:
                #     # 最高分相似实体之前已经添加过了
                #     if flag not in final_dict[candidates[0][0]]:
                #         final_dict[candidates[0][0]].append(flag)

        return final_dict

    '''基于特征词进行分类'''

    def check_words(self, wds, sent):
        for wd in wds:
            if wd in sent:
                return True
        return False


# if __name__ == '__main__':
#     handler = QuestionClassifier()
#     while 1:
#         question = input('input an question:')
#         data = handler.classify(question)
#         print(data)

