import pkuseg
from biaffineDP.data import Dictionary, Corpus
from biaffineDP.eval import CONLL
from biaffineDP.predict import predict, predict_score
import numpy as np
from biaffineDP.model import make_model
import torch
from src import util
from vocabulary import Vocabulary
#from torchsummary import summary
from biaffineDP.model import BiAffineParser
from biaffineDP.train import my_train
from biaffineDP.data import PAD_INDEX
from biaffineDP.embedding import WordEmbedding, TagEmbedding, WordTagEmbedding, RecurrentCharEmbedding, ConvolutionalCharEmbedding
from biaffineDP.nn import MLP, BiAffine
from biaffineDP.encoder import RecurrentEncoder, ConvolutionalEncoder, NoEncoder
from biaffineDP.transformer import TransformerEncoder
from torch import nn


PROJECT_ROOT = util.get_project_root()

class Biaffine_Parser(object):
    def __init__(self, char = False):

        self.voc = Vocabulary()
        self.voc.load_vocabulary()
        self.pk2ha_dic ={'n': 'n', 'nr': 'nh', 'nr1': 'nh', 'nr2': 'nh', 'nrf': 'nh', 'nrj': 'nh', 'ns': 'ns',
                'nsf': 'ns','nt': 'ni', 'nz': 'nz', 'nl': 'n', 'ng': 'n', 't': 'nt',  'tg': 'nt', 's': 'nl', 'f': 'nd',
                'v': 'v', 'vd':'n', 'vn': 'v', 'vshi':'v', 'vyou':'v', 'vf':'v', 'vx':'v', 'vi':'n', 'vl':'n',
                'vg':'n', 'a': 'a', 'ad': 'd', 'an': 'a', 'ag': 'a', 'al': 'a', 'b': 'b', 'bl': 'b', 'z': 'n', 'r': 'r',
                'rr': 'r', 'rz': 'r', 'rzt': 'nt', 'rzs': 'nl', 'rzv': 'v', 'ry': 'r', 'ryt': 'nt', 'rys': 'nl', 'ryv': 'v',
                'rg ': 'r', 'm': 'm', 'mq': 'm', 'q': 'q', 'qv': 'q', 'qt': 'q', 'd': 'd', 'p' : 'p', 'pba' : 'p', 'pbei' : 'p',
                'c': 'c', 'cc': 'c', 'u': 'u', 'uzhe': 'u', 'ule': 'u', 'uguo': 'u', 'ude1': 'u', 'ude2': 'u', 'ude3': 'u',
                'usuo': 'u', 'udeng': 'u', 'uyy': 'u', 'udh': 'u', 'uls': 'u', 'uzhi': 'u', 'ulian': 'u', 'e': 'e', 'y': 'e',
                'o': 'o', 'h': 'h', 'k': 'k', 'x': 'nz', 'xe': 'nz', 'xs': 'nz', 'xm': 'nz', 'xu': 'nz', 'w': 'wp', 'wkz':'wp',
                'wky': 'wp', 'wyz':'wp', 'wyy': 'wp', 'wj':'wp', 'ww': 'wp', 'wt': 'wp', 'wd':'wp', 'wf':'wp', 'wn':'wp', 'wm': 'wp',
                'ws': 'wp', 'wp': 'wp', 'wb': 'wp', 'wh': 'wp', 'g': 'g', 'i': 'i', 'R': 'h', 'j':'j', 'l':'l'
            }

        model_path = "{}/models/biaffine_dp_model.pth".format(PROJECT_ROOT)
        vocab_path = "{}/data/dp_train/vocab".format(PROJECT_ROOT)

        self.dictionary = Dictionary(vocab_path, char=char)

        self.segment = pkuseg.pkuseg(postag=True, model_name="medicine",user_dict=self.construst_segment())  # 开启词性标注功能
        # self.segment = pkuseg.pkuseg(postag=True, model_name="medicine",)
        # model = torch.load(model_path).cuda()
        # model.eval()
        #
        # # 关闭求导，节约大量的显存
        # for param in model.parameters():
        #     param.requires_grad = False
        # # print(model)
        # # summary(model, [(1, 10), (1, 10)])
        self.parser = self.load_model(model_path)

        # self.parser = model


    def construst_segment(self):
        '''

        :return:
        '''
        custom_dic = []
        items = [self.voc.SET_TREATMENT, self.voc.SET_BODY, self.voc.SET_DISEASE,
                 self.voc.SET_CHECK, self.voc.SET_SIGNS, self.voc.SET_SIGNS_AFF]
        for entities in items:
            for word in entities:
                custom_dic.append(word)

        return custom_dic

    def parse_dependency(self, seg_result, sentence, begin):
        '''

        :param sentence:
        :param begin:
        :return:
        '''

        # 分词
        # seg_result = self.segment.cut(sentence)    # 进行分词和词性标注

        words = []
        tags = []
        heads = []
        labels = []
        words_index = []
        tags_index = []

        words.append('<root>')
        tags.append('ROOT')
        words_index.append(2)
        tags_index.append(2)

        for word,tag in seg_result:
            words.append(word)
            words_index.append(self.dictionary.w2i[word])
            tag = self.pk2ha_dic[tag]
            tags.append(tag)
            tags_index.append(self.dictionary.t2i[tag])

        words = np.array(words)
        tags = np.array(tags)
        words_index = np.array(words_index)[np.newaxis, :]
        tags_index = np.array(tags_index)[np.newaxis, :]
        words_index = torch.cuda.LongTensor(words_index)
        tags_index = torch.cuda.LongTensor(tags_index)

        # # 句法依存分析
        heads_pred, labels_pred, score_matrix = predict_score(self.parser, words_index, tags_index)

        heads = heads_pred
        labels = [self.dictionary.i2l[i] for i in labels_pred]

        relations = {}
        # begin = 0
        for index, (word, tag, head, label) in enumerate(zip(words[1:], tags[1:], heads[1:], labels[1:]), 1):
            len_w = len(word)

            # 开始位置，结尾位置，词，语法关系，上一级的词，上一极词的ID
            relations[index] = (begin, begin + len_w, word, label, words[head], head)
            print(index, word, '_', tag, tag, '_', head, label, '_', '_', sep='\t', )
            begin += len_w

        return relations, score_matrix

    def relation_analysis(self, relations):
        '''

        :param relations:
        :return:
        '''
        # 串联：定中关系ATT，主谓关系SBV，动补结构CMP，前附加关系LAD，后附加关系RAD,状中结构ADV
        # 并联：并列关系COO
        parallel_relations, cascading_relations = {}, {}
        for index, row in relations.items():
            # 开始位置，结尾位置，词，语法关系，上一级的词，上一极词的ID
            start, end, w, DEPREL, HEAD_LEMMA, HEAD_ID = row

            if DEPREL == "COO":
                parallel_relations[index] = (start, end, w,
                                             relations[HEAD_ID][0], relations[HEAD_ID][1], HEAD_LEMMA)
            elif DEPREL in ["ATT", "ADV", "SBV", "CMP", "LAD", "RAD"]:
                cascading_relations[index] = (start, end, w,
                                              relations[HEAD_ID][0], relations[HEAD_ID][1], HEAD_LEMMA)

        return parallel_relations, cascading_relations

    def load_model(self, model_path):
        word_vocab_size = 24730
        word_emb_dim = 300
        emb_dropout = 0.3
        word_embedding = nn.Embedding(word_vocab_size, word_emb_dim, padding_idx=PAD_INDEX)
        embedding = WordEmbedding(word_embedding, emb_dropout)
        embedding_dim = word_emb_dim

        encoder = RecurrentEncoder(
            "LSTM", embedding_dim, 400, 3, True, 0.3, bidirectional=True)
        encoder_dim = 2 * 400

        # Initialize the model.
        model = BiAffineParser(
            embedding,
            encoder,
            "rnn",
            encoder_dim,
            500,
            100,
            0.3,
            26,
            nn.CrossEntropyLoss
        )

        checkpoint = torch.load(model_path, map_location='cpu')
        model.load_state_dict(checkpoint)
        model.to(torch.device("cuda:0"))
        model.eval()

        # 关闭求导，节约大量的显存
        for param in model.parameters():
            param.requires_grad = False

        return model