import torch 
import unicodedata
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert, load_model_params, load_recent_model
import codecs
import collections

vocab_path = "./roberta_wwm_vocab.txt"
model_path = "./roberta_wwm_pytorch_model.bin"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

target = ["O", "B-DRUG", "B-DRUG_INGREDIENT", "B-DISEASE", "B-SYMPTOM", "B-SYNDROME", "B-DISEASE_GROUP", 
        "B-FOOD", "B-FOOD_GROUP", "B-PERSON_GROUP", "B-DRUG_GROUP", "B-DRUG_DOSAGE", "B-DRUG_TASTE",
         "B-DRUG_EFFICACY", "I-DRUG", "I-DRUG_INGREDIENT", "I-DISEASE", "I-SYMPTOM", "I-SYNDROME", "I-DISEASE_GROUP", 
        "I-FOOD", "I-FOOD_GROUP", "I-PERSON_GROUP", "I-DRUG_GROUP", "I-DRUG_DOSAGE", "I-DRUG_TASTE",
         "I-DRUG_EFFICACY"]

model = load_bert(vocab_path, model_name="roberta", model_class="sequence_labeling_crf", target_size=len(target), simplfied=True)
model.to(device)
load_recent_model(model, "./bert_ner_model_crf.bin", device=device)

word2idx = load_chinese_base_vocab(vocab_path, simplfied=True)

def viterbi_decode(nodes, trans):
    """
    维特比算法 解码
    nodes: (seq_len, target_size)
    trans: (target_size, target_size)
    """
    scores = nodes[0]
    scores[1:] -= 100000 # 刚开始标签肯定是"O"
    target_size = nodes.shape[1]
    seq_len = nodes.shape[0]
    labels = torch.arange(0, target_size).view(1, -1)
    path = labels
    for l in range(1, seq_len):
        scores = scores.view(-1, 1)
        M = scores + trans + nodes[l].view(1, -1)
        scores, ids = M.max(0)
        path = torch.cat((path[:, ids], labels), dim=0)
        # print(scores)
    # print(scores)
    return path[:, scores.argmax()]

def ner_print(model, test_data, text, index):
    print("开始预测:" + str(index))
    model.eval()
    tokenier = Tokenizer(word2idx)
    trans = model.state_dict()["crf_layer.trans"]
    res_list = []
    for each_text in test_data:
        decode = []
        text_encode, text_ids = tokenier.encode(each_text)
        text_tensor = torch.tensor(text_encode, device=device).view(1, -1)
        out = model(text_tensor).squeeze(0) # 其实是nodes
        labels = viterbi_decode(out, trans)
        for l in labels:
            if l > 0:
                label = target[l.item()]
                decode.append(label)
            else :
                decode.append("O")
        
        # print(decode)
        res = collections.OrderedDict()
        count = 0
        for i, each_entity in enumerate(decode):
            if each_entity != "O":
                if flag != each_entity[2:]:
                    ## 证明遇到新的实体了 
                    count += 1
                    # print(index - 1)
                    cur_text = each_text[i - 1]
                    res[each_entity[2:] + "##" + str(count)] = cur_text
                    
                    flag = each_entity[2:]
                elif flag == each_entity[2:]:
                    res[each_entity[2:] + "##" + str(count)] += each_text[i - 1]
            else :
                flag = 0
        
        print(res)
        res_list.append(res)

    ## 接下来构建对应的ann文件
    T = 1
    with codecs.open("./out/" + str(index) + ".ann", "a+") as f:
        for res_item in res_list:
            if res_item == {}:
                continue
            for k, v in res_item.items():
                # print(v)
                new_k = k.split("##")[0]
                lens = len(v)
                # print(text)
                i = -1
                try: 
                    i = text.index(v)
                except:
                    i = -1
                if i == -1 :
                    continue
                text = text[:i] + "%" * lens + text[i+lens:]
                # text[i:i+lens] = "%" * lens
                f.write("T" + str(T) + "\t")
                f.write(new_k + " ")
                f.write(str(i) + " " + str(i + lens) + "\t")
                f.write(v + "\n")
                T += 1
        

def predict(path: str, index: int):
    # 预测单个文件 生成对应的ann文件。
    with open(path, encoding="utf-8") as f :
        text = f.read()
    row_text = text

    text_list = text.split()
    text = ""
    for i, t in enumerate(text_list):
        ## 为了把多余的空格替换为逗号
        text += t
        if i != len(text_list) - 1:
            text += "，"
        
    text = text.replace("、", "，").replace("？", "，").replace("?", "，")
    sents = []
    single_sents = ""
    for char in text:
        if not Tokenizer._is_cjk_character(char) and char != "。" and char != "，" and char != "（" and char != "）":
        # if char == "." or char == " " or char == "" or char == "\n" or char == "\r" or char == "<" or char == ">" or char == "/" or unicodedata.category(char) == 'Zs' or char == "-":
            continue
        if char != "。":
            single_sents += char
            continue 
        # 说明是句号
        sents.append(single_sents)
        single_sents = ""
    if single_sents != "": 
        sents.append(single_sents)
    # print(sents)
    # print(text)
    print(sents)
    ner_print(model, sents, row_text, index)

    


if __name__ == "__main__":
    
    for i in range(1000, 1500):
        predict("./test_data/" + str(i) + ".txt", i)
    # predict("./test_data/1000.txt", 1000)