import collections
import os
import re
import string

# import jieba
import ahocorasick

'''
    特定领域分词工具pkuseg
'''
import pkuseg
from queue import Queue


class EntityExtractor:
    def __init__(self):
        cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
        # jieba.load_userdict(os.path.join(cur_dir, 'dict/region.txt'))
        self.stopwords_path = os.path.join(cur_dir, 'data/stop_words.utf8')
        deny_path = os.path.join(cur_dir, 'dict/deny.txt')
        deny_words = [i.strip() for i in open(deny_path, encoding='UTF-8') if i.strip()]
        self.stopwords = list(set([w.strip() for w in open(self.stopwords_path, 'r', encoding='utf8') if
                                   w.strip()] + deny_words))
        self.seg = pkuseg.pkuseg(model_name='medicine',
                                 user_dict='dict/region.txt',
                                 postag=True)  # 程序会自动下载所对应的细领域模型，postag=True则会以元组形式返回词性
        self.actree = self.build_actree(self.stopwords)
        print('entity extractor init finished...')

    def participle(self, question, medical_words=[]):
        # 符号处理
        # sentence = re.sub("[{}]", re.escape(string.punctuation), question)
        question = question.replace('是', '，')
        # # 因为“是”分词工具认为是动词(v)，和下面的规则冲突了，因此要替换掉
        # delete_list = []
        # for i in self.actree.iter(question):  # .iter() 子串匹配 i:[end_index, [insert_order, original_value]]
        #     delete_list.append(i[1][1])
        # question = self.replace_feature(question, delete_list, '。')

        characteristics_of_noun = medical_words + [
            '关系', '症状', '病症', '治愈', '治好', '主治', '关联', '病人', '患者', '病', '病情', '啥病', '效果', '好处', '坏处', '不良', '影响',
            '最近', '多大概率', '才能', '多久', '怎样', '怎么', '才可以', '可以', '能够', '在售', '售药', '在售药', "一般", "常见"
        ]
        interrogative = ['吗', '咩', '嘛', '哇', '哈', '吧', '诶', '欸', '唉', '哎', "啥", "什么", "肾摸", "肾么", "可能"]
        # 去除空格
        question = question.strip()
        # 去除关系特征词
        sentence = self.replace_feature(question, interrogative, '，')
        # 分词
        words_cut = self.seg.cut(sentence)
        # 分词后再进行一轮过滤
        words_cut = self.cutwords_filter(words_cut, characteristics_of_noun)
        print('分词后再进行一轮过滤\n', words_cut)
        # 规则合并
        q = collections.deque(words_cut + [(('[SEP]', 'sep'))])
        if len(q) <= 1:
            return []

        # 在某轮中是否根据规则发生了变换
        ischange = False

        # 当没有发生改变并且第一个元素为终结符[SEP]时结束循环
        # tags一览：https://github.com/lancopku/pkuseg-python/blob/master/tags.txt
        while not (q[1][0] == '[SEP]' and not ischange) and len(q) > 2:
            if q[1][1] == 'n':
                if q[0][1] in ['n', 'a', 'b', 'j', 'nz', 'vn', 't', 'nr']:
                    temp1 = q.popleft()
                    temp2 = q.popleft()
                    q.appendleft((temp1[0] + temp2[0], 'n'))
                    ischange = True
                else:
                    q.append(q.popleft())
            elif len(q) > 3 and q[0][1] == 'n' and q[1][1] == 'vn' and q[2][1] == 'n':
                temp1 = q.popleft()
                temp2 = q.popleft()
                temp3 = q.popleft()
                q.appendleft((temp1[0] + temp2[0] + temp3[0], 'n'))
                ischange = True
            else:
                if q[1][0] == '[SEP]':
                    q.extend([q.popleft(), q.popleft()])
                    ischange = False
                else:
                    q.append(q.popleft())
        if q[1][0] == '[SEP]':
            q.append(q.popleft())
            q.popleft()

        words_merge = []
        for i in range(len(q)):
            words_merge.append(q.popleft()[0])

        # words = [w.strip() for w in self.seg.cut(sentence) if w.strip() not in self.stopwords and len(w.strip()) >= 2]
        words = [w for w in words_merge if w not in self.stopwords + characteristics_of_noun and len(w) >= 2]

        print("分词结果输出：\n", list(set(words)))
        return list(set(words))

    # 替换句子中存在的列表元素
    def replace_feature(self, sentence, feature_words, substitution):
        for word in feature_words:
            if word in sentence:
                sentence = sentence.replace(word, substitution)

        return sentence

    # 去除分词列表中存在的屏蔽词和标点符号
    def cutwords_filter(self, cut_words, block_words):
        '''
        @param cut_words: 模型分词后的结果。形如[('毛囊炎', 'nr'), ('的', 'u'), ('常见', 'a'), ('症状', 'n')]
        @param block_words: 屏蔽词列表
        @return: 屏蔽后的分词结果
        '''
        filtered_words = []
        for item in cut_words:
            if item[0] not in block_words:
                filtered_words.append(item)

        return filtered_words

    '''构造actree，加速过滤
            算法使用解析：https://zhuanlan.zhihu.com/p/158767004'''

    def build_actree(self, wordlist):
        actree = ahocorasick.Automaton()
        for index, word in enumerate(wordlist):
            actree.add_word(word, (index, word))
        actree.make_automaton()
        return actree


if __name__ == '__main__':
    entity_extractor = EntityExtractor()
    # while 1:
    #     question = input('用户：')
    #     print(entity_extractor.participle(question))

    symptom_qwds = ['症状', '表征', '现象', '症候', '表现']
    cause_qwds = ['原因', '成因', '为什么', '怎么会', '怎样才', '咋样才', '怎样会', '如何会', '为啥', '为何', '如何才会', '怎么才会', '会导致',
                  '会造成', '会引起']
    acompany_qwds = ['并发症', '并发', '一起发生', '一并发生', '一起出现', '一并出现', '一同发生', '一同出现', '伴随发生', '伴随', '共现']
    food_qwds = ['饮食', '饮用', '吃', '食', '伙食', '膳食', '喝', '菜', '忌口', '补品', '保健品', '食谱', '菜谱', '食用', '食物', '补品',
                 '食品']
    drug_qwds = ['药', '药品', '用药', '胶囊', '口服液', '炎片']
    prevent_qwds = ['预防', '防范', '抵制', '抵御', '防止', '躲避', '逃避', '避开', '免得', '逃开', '避开', '避掉', '躲开', '躲掉', '绕开',
                    '怎样才能不', '怎么才能不', '咋样才能不', '咋才能不', '如何才能不',
                    '怎样才不', '怎么才不', '咋样才不', '咋才不', '如何才不',
                    '怎样才可以不', '怎么才可以不', '咋样才可以不', '咋才可以不', '如何可以不',
                    '怎样才可不', '怎么才可不', '咋样才可不', '咋才可不', '如何可不']
    lasttime_qwds = ['周期', '多久', '多长时间', '多少时间', '几天', '几年', '多少天', '多少小时', '几个小时', '多少年']
    cureway_qwds = ['怎么治疗', '如何医治', '怎么医治', '怎么治', '怎么医', '如何治', '医治方式', '疗法', '咋治', '怎么办', '咋办', '咋治']
    cureprob_qwds = ['多大概率能治好', '多大几率能治好', '治好希望大么', '几率', '几成', '比例', '可能性', '能治', '可治', '可以治', '可以医']
    easyget_qwds = ['易感人群', '容易感染', '易发人群', '什么人', '哪些人', '感染', '染上', '得上']
    check_qwds = ['检查', '检查项目', '查出', '测出', '试出', '确诊', '诊断']
    belong_qwds = ['属于什么科', '属于', '什么科', '啥科', '哪个科', '科室', '哪里看', '挂什么', '挂号', '挂什么号', '挂啥']
    cure_qwds = ['治疗什么', '治啥', '治疗啥', '医治啥', '治愈啥', '主治啥', '主治什么', '治什么', '有什么用', '有何用', '用处', '用途',
                 '有什么好处', '有什么益处', '有何益处', '用来', '用来做啥', '用来作甚', '需要', '要']
    department_qwds = ['看什么病', '主治什么病', '治疗什么病', '治的病', '治疗的病', '主治什么疾病', '治疗什么疾病', '主治的疾病', '治疗的疾病', '治啥病']
    same_qwds = ['共同', '相同', '一样']

    qwds_set = set(
        symptom_qwds + cause_qwds + acompany_qwds + food_qwds + drug_qwds + prevent_qwds +
        lasttime_qwds + cureway_qwds + cureprob_qwds + easyget_qwds + check_qwds + belong_qwds +
        cure_qwds + department_qwds + same_qwds)

    while 1:
        question = input("输入：")

        print(entity_extractor.participle(question, list(qwds_set)))
        # print(entity_extractor.participle(question))
