import jieba
from pyltp import Segmentor, Postagger, Parser
from qa_engine.aiml import pedia_response
from qa_engine.handler.base import BaseHandler
from qa_engine.user import User
from qa_engine.util import read_config_path


class PattenHandler(BaseHandler):
    """
    句子模式处理程序
    """

    def handle(self, question: str, user: User = None, _position: str = ''):
        """
        根据输入回应, handler接口
        尝试根据句子的成分结构进行回答, 主要用于百科问答
        :param question: 问题
        :param user: 用户
        :param _position: 位置
        :return: 如果能够回应, 则返回回应 格式 str[], 否则 None
        """
        # 抽取句子中的各个语法成分: 词语, 模式等
        words, pattern, arcs_dict, postags, hed_index = \
            self._get_sentence_pattern(question)
        # 根据这些成分看是否能够匹配上
        answer = None
        for aiml_kernel in self.aiml_kernels.values():
            aiml_respond = aiml_kernel.respond(pattern)
            if aiml_respond and len(aiml_respond) > 0:
                answer = pedia_response(
                    aiml_respond,
                    words, arcs_dict, postags, hed_index
                )
                break
        if answer is not None:
            return [answer]
        return None

    def __init__(self, config: dict):
        super().__init__(config)
        self._register_pyltp_util()

    def _register_pyltp_util(self):
        """
        读取pyltp模型路径配置
        :return:
        """
        self.segmentor = Segmentor()
        self.postagger = Postagger()
        self.parser = Parser()
        model_path = self.config['pyltp-model-path']
        self.segmentor.load(read_config_path(model_path['segmentor']))
        self.postagger.load(read_config_path(model_path['postagger']))
        self.parser.load(read_config_path(model_path['parser']))

    def _get_sentence_pattern(self, sentence):
        """
        得到问题对应的模版
        1.先对问题分词
        2.得到每个分词对应的词性
        3.得到句法依存分析树
        :param sentence: 句子
        :return:
        """
        words = list(jieba.cut(sentence))
        postags = self.postagger.postag(words)
        arcs = self.parser.parse(words, postags)
        arcs_dict = self._build_sub_dicts(words, arcs)
        hed_index = 0

        # for i in range(len(words)):
        # print(words[i],postags[i],arcs_dict[i])
        pattern = ''
        for i in range(len(arcs)):
            sub_arc = arcs[i]
            if sub_arc.relation == 'HED':
                hed_index = i

        for i in range(len(words)):
            if i == hed_index:
                pattern += 'HED'
            for sub_dict in arcs_dict:
                keys = sub_dict.keys()
                for k in keys:
                    if i in sub_dict[k]:
                        pattern += k
                        break
        # print(pattern)
        return words, pattern, arcs_dict, postags, hed_index

    @staticmethod
    def _build_sub_dicts(words, arcs):
        """
        为句子中的每个词语维护一个保存句法依存儿子节点的字典
        :param words: 分词列表
        :param arcs: 句法依存列表
        :return:
        """
        sub_dicts = []
        for idx in range(len(words)):
            sub_dict = dict()
            for arc_idx in range(len(arcs)):
                # 如果这个依存关系的头节点是该单词
                if arcs[arc_idx].head == idx + 1:
                    if arcs[arc_idx].relation in sub_dict:
                        sub_dict[arcs[arc_idx].relation].append(arc_idx)
                    else:
                        sub_dict[arcs[arc_idx].relation] = []
                        sub_dict[arcs[arc_idx].relation].append(arc_idx)
            sub_dicts.append(sub_dict)

        return sub_dicts
