#!/usr/bin/env python
# encoding: utf-8
'''
@author: Justin Ruan
@license: 
@contact: ruanjun@whut.edu.cn
@time: 2019-12-29
@desc:
'''

from pyhanlp import *
import util
import os, csv
import numpy as np
from vocabulary import Vocabulary


PROJECT_ROOT = util.get_project_root()


class Hanlp_Parser(object):
    def __init__(self):
        self.root_path = PROJECT_ROOT + "//data//"

        self.voc = Vocabulary()
        self.voc.load_vocabulary()
        self.segment = None
        self.parser = None

    def construst_segment(self):
        '''

        :return:
        '''
        segment = HanLP.newSegment("viterbi")
        # insert会覆盖字典中已经存在的词，add会跳过已经存在的词
        items = [self.voc.SET_TREATMENT, self.voc.SET_BODY, self.voc.SET_DISEASE, self.voc.SET_CHECK]
        for entities in items:
            for word in entities:
                CustomDictionary.insert(word, 'n 1024')

        items = [self.voc.SET_SIGNS, self.voc.SET_SIGNS_AFF]
        for entities in items:
            for word in entities:
                seg_result = segment.seg(word)

                if len(seg_result) > 1:
                    # print(seg_result)
                    labels = []
                    for p in seg_result:
                        labels.append(p.nature.toString())

                    labels_set = set(labels)
                    if ('m' in labels_set or 'nx' in labels_set) and 'q' in labels_set:
                        CustomDictionary.insert(word, '{} 1024'.format('q'))
                    elif ('a' in labels_set) and 'n' in labels_set:
                        CustomDictionary.insert(word, '{} 1024'.format('n'))

        segment.enableCustomDictionaryForcing(True)  # 强制执行

        return segment

    def parse_dependency(self, sentence, begin):
        '''

        :param sentence:
        :param begin:
        :return:
        '''
        if self.segment is None:
            self.segment = self.construst_segment()
        if self.parser is None:
            self.parser = JClass('com.hankcs.hanlp.dependency.nnparser.NeuralNetworkDependencyParser')

        # 分词
        seg_result = self.segment.seg(sentence)
        # 句法依存分析
        dp_result = self.parser.compute(seg_result)
        print(dp_result)
        #out = open('H:\myself\PatholNLP-master\data\乳腺句法依存.conll','a',encoding='utf-8')
        #print(dp_result,file=out)
        relations = {}
        for word in dp_result.iterator():
            w = word.LEMMA
            len_w = len(w)
            # print(begin, begin + len_w, w, word.ID, word.DEPREL, word.HEAD.LEMMA, word.HEAD.ID, )
            # 这里的字符出现的位置是全文中的绝对坐标
            # (begin, begin + len_w, w, word.DEPREL, word.HEAD.LEMMA, word.HEAD.ID)
            # 开始位置，结尾位置，词，语法关系，上一级的词，上一极词的ID
            relations[word.ID] = (begin, begin + len_w, w, word.DEPREL, word.HEAD.LEMMA, word.HEAD.ID)

            begin += len_w

        return relations

    def relation_analysis(self, relations):
        '''
        
        :param relations:
        :return:
        '''
        # 串联：定中关系ATT，主谓关系SBV，动补结构CMP，前附加关系LAD，后附加关系RAD,状中结构ADV
        # 并联：并列关系COO
        parallel_relations, cascading_relations = {}, {}
        for index, row in relations.items():
            # 开始位置，结尾位置，词，语法关系，上一级的词，上一极词的ID
            start, end, w, DEPREL, HEAD_LEMMA, HEAD_ID = row

            if DEPREL == "COO":
                parallel_relations[index] = (start, end, w,
                                      relations[HEAD_ID][0], relations[HEAD_ID][1], HEAD_LEMMA)
            elif DEPREL in ["ATT","ADV","SBV","CMP","LAD", "RAD"]:
                cascading_relations[index] = (start, end, w,
                                      relations[HEAD_ID][0], relations[HEAD_ID][1], HEAD_LEMMA)

        return parallel_relations, cascading_relations


