import pkuseg
from data import Dictionary
from eval import CONLL
from predict import predict
import numpy as np
from model import make_model
import torch

def biaffine_DP_predict(sentence,vocab_path="vocab/train",char = False,model_path = "checkpoints/model.pt"):
    pk2ha_dic = {
            'n': 'n',
            'nr': 'nh',
            'nr1': 'nh',
            'nr2': 'nh',
            'nrf': 'nh',
            'nrj': 'nh',
            'ns': 'ns',
            'nsf': 'ns',
            'nt': 'ni',
            'nz': 'nz',
            'nl': 'n',
            'ng': 'n',
            't': 'nt',
            'tg': 'nt',
            's': 'nl',
            'f': 'nd',
            'v': 'v',
            'vd':'n',
            'vn': 'n',
            'vshi':'v',
            'vyou':'v',
            'vf':'v',
            'vx':'v',
            'vi':'n',
            'vl':'n',
            'vg':'n',
            'a': 'a',
            'ad': 'd',
            'an': 'a',
            'ag': 'a',
            'al': 'a',
            'b': 'b',
            'bl': 'b',
            'z': 'n',
            'r': 'r',
            'rr': 'r',
            'rz': 'r',
            'rzt': 'nt',
            'rzs': 'nl',
            'rzv': 'v',
            'ry': 'r',
            'ryt': 'nt',
            'rys': 'nl',
            'ryv': 'v',
            'rg ': 'r',
            'm': 'm',
            'mq': 'm',
            'q': 'q',
            'qv': 'q',
            'qt': 'q',
            'd': 'd',
            'p' : 'p',
            'pba' : 'p',
            'pbei' : 'p',
            'c': 'c',
            'cc': 'c',
            'u': 'u',
            'uzhe': 'u',
            'ule': 'u',
            'uguo': 'u',
            'ude1': 'u',
            'ude2': 'u',
            'ude3': 'u',
            'usuo': 'u',
            'udeng': 'u',
            'uyy': 'u',
            'udh': 'u',
            'uls': 'u',
            'uzhi': 'u',
            'ulian': 'u',
            'e': 'e',
            'y': 'e',
            'o': 'o',
            'h': 'h',
            'k': 'k',
            'x': 'nz',
            'xe': 'nz',
            'xs': 'nz',
            'xm': 'nz',
            'xu': 'nz',
            'w': 'wp',
            'wkz':'wp',
            'wky': 'wp',
            'wyz':'wp',
            'wyy': 'wp',
            'wj':'wp',
            'ww': 'wp',
            'wt': 'wp',
            'wd':'wp',
            'wf':'wp',
            'wn':'wp',
            'wm': 'wp',
            'ws': 'wp',
            'wp': 'wp',
            'wb': 'wp',
            'wh': 'wp',
            'g': 'g',
            'i': 'i',
            'R': 'h',
            'j':'j',
        }



    dictionary = Dictionary(vocab_path, char=char)
    conll = CONLL(dictionary)
    model = torch.load(model_path).cuda()
    words = []
    tags = []
    heads = []
    labels = []
    words_index = []
    tags_index = []

    seg = pkuseg.pkuseg(postag=True, model_name="medicine")  # 开启词性标注功能
    #sentence = '乳腺囊性增生病伴纤维腺瘤形成，局灶可见中小导管内乳头状瘤形成。'
    #sentence = '右乳癌术后，左乳肿块切除标本 （左）乳腺高级别导管内癌（粉刺型），伴局灶微小浸润。'
    #sentence = '双乳肿块切除标本： （双侧）乳腺纤维腺瘤，间质细胞生长活跃。'
    #sentence = '据主人说，这里环境幽静，客房又便宜，能住六七个人的房间，一周才用３７００多挪威克朗（合人民币６５００多元），因而来往客人很多，好多会议都在这里开。'
    #sentence = '（左乳）导管内乳头状瘤伴部分导管普通型增生，及个别导管内上皮轻度不典型增生。'
    print(sentence)
    text = seg.cut(sentence)    # 进行分词和词性标注
    print(text)

    words.append('<root>')
    tags.append('ROOT')
    words_index.append(2)
    tags_index.append(2)

    for word,tag in text:
        words.append(word)
        words_index.append(dictionary.w2i[word])
        tag = pk2ha_dic[tag]
        tags.append(tag)
        tags_index.append(dictionary.t2i[tag])

    words = np.array([words])
    tags = np.array([tags])
    words_index = torch.cuda.LongTensor(np.array([words_index]))
    tags_index = torch.cuda.LongTensor(np.array([tags_index]))
    heads_pred, labels_pred = predict(model, words_index, tags_index)


    heads.append(heads_pred)
    labels.append([dictionary.i2l[i] for i in labels_pred])

    #将conll写入txt文档
    with open('predict.conll', 'w', encoding='UTF-8') as f:
        for line in zip(words, tags, heads, labels):
                word, tag, head, label = line
                lines = zip(word[1:], tag[1:], head[1:], label[1:])
                for i, (w, t, h, l) in enumerate(lines, 1):
                        print(i, w, '_', t, t, '_', h, l, '_', '_', sep='\t',file=f)
                f.close()

    begin = 0
    relations = {}
    for line in zip(words, tags, heads, labels):
        word, tag, head, label = line
        lines = zip(word[1:], tag[1:], head[1:], label[1:])
        for i, (w, t, h, l) in enumerate(lines, 1):
            len_w = len(w)
            # 开始位置，结尾位置，词，语法关系，上一级的词，上一极词的ID
            relations[i] = (begin, begin + len_w, w, l, word[h],h)

            begin += len_w
    print(relations)
    return relations


def POS_predict(sentence):
    pk2ha_dic = {
            'n': 'n',
            'nr': 'nh',
            'nr1': 'nh',
            'nr2': 'nh',
            'nrf': 'nh',
            'nrj': 'nh',
            'ns': 'ns',
            'nsf': 'ns',
            'nt': 'ni',
            'nz': 'nz',
            'nl': 'n',
            'ng': 'n',
            't': 'nt',
            'tg': 'nt',
            's': 'nl',
            'f': 'nd',
            'v': 'v',
            'vd':'n',
            'vn': 'n',
            'vshi':'v',
            'vyou':'v',
            'vf':'v',
            'vx':'v',
            'vi':'n',
            'vl':'n',
            'vg':'n',
            'a': 'a',
            'ad': 'd',
            'an': 'a',
            'ag': 'a',
            'al': 'a',
            'b': 'b',
            'bl': 'b',
            'z': 'n',
            'r': 'r',
            'rr': 'r',
            'rz': 'r',
            'rzt': 'nt',
            'rzs': 'nl',
            'rzv': 'v',
            'ry': 'r',
            'ryt': 'nt',
            'rys': 'nl',
            'ryv': 'v',
            'rg ': 'r',
            'm': 'm',
            'mq': 'm',
            'q': 'q',
            'qv': 'q',
            'qt': 'q',
            'd': 'd',
            'p' : 'p',
            'pba' : 'p',
            'pbei' : 'p',
            'c': 'c',
            'cc': 'c',
            'u': 'u',
            'uzhe': 'u',
            'ule': 'u',
            'uguo': 'u',
            'ude1': 'u',
            'ude2': 'u',
            'ude3': 'u',
            'usuo': 'u',
            'udeng': 'u',
            'uyy': 'u',
            'udh': 'u',
            'uls': 'u',
            'uzhi': 'u',
            'ulian': 'u',
            'e': 'e',
            'y': 'e',
            'o': 'o',
            'h': 'h',
            'k': 'k',
            'x': 'nz',
            'xe': 'nz',
            'xs': 'nz',
            'xm': 'nz',
            'xu': 'nz',
            'w': 'wp',
            'wkz':'wp',
            'wky': 'wp',
            'wyz':'wp',
            'wyy': 'wp',
            'wj':'wp',
            'ww': 'wp',
            'wt': 'wp',
            'wd':'wp',
            'wf':'wp',
            'wn':'wp',
            'wm': 'wp',
            'ws': 'wp',
            'wp': 'wp',
            'wb': 'wp',
            'wh': 'wp',
            'g': 'g',
            'i': 'i',
            'R': 'h',
            'j':'j',
            'l':'l',
        }

    words = []
    tags = []
    heads = []
    labels = []


    seg = pkuseg.pkuseg(postag=True, model_name="medicine")  # 开启词性标注功能
    text = seg.cut(sentence)    # 进行分词和词性标注
    for word,tag in text:
        words.append(word)
        tag = pk2ha_dic[tag]
        tags.append(tag)
    for i in range(len(words)):
        heads.append('_')
        labels.append('_')
    heads = np.array([heads])
    labels = np.array([labels])
    words = np.array([words])
    tags = np.array([tags])


    #将conll写入txt文档
    with open('MedPOS.conll', 'a', encoding='UTF-8') as f:
        for line in zip(words, tags, heads, labels):
                word, tag, head, label = line
                lines = zip(word[0:], tag[0:], head[0:], label[0:])
                for i, (w, t, h, l) in enumerate(lines, 1):
                        print(i, w, '_', t, t, '_', h, l, '_', '_', sep='\t',file=f)
        f.write('\n')



if __name__ == "__main__":
    # biaffine_DP_predict('(左乳)导管内乳头状瘤伴部分导管普通型增生，及个别导管内上皮轻度不典型增生。')
    with open('E:/workspace/PatholNLP-graduation experiment/data/乳腺病理诊断数据','r',encoding='utf-8') as f:
        sentences = f.readlines()
        for i in range(5001,10000):
            sent = sentences[i]
            print(sent,i+1)
            sent = sent.replace('(','（').replace(')','）')
            POS_predict(sent)
