import transformers as ppb
import torch
import numpy as np
import re
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence

# ROCK 指的是岩石种类
# MINERAL 指的是矿物
# ORE_DEPOSIT指的是经济上重要的元素或矿物质
# STRAT 指的是地层单位
label_list = ['O', 'X', 'B-ROCK', 'I-ROCK', 'B-MINERAL', 'I-MINERAL', 'B-TIMESCALE', 'I-TIMESCALE', 'B-STRAT',
              'I-STRAT', 'B-LOCATION', 'I-LOCATION', 'B-ORE_DEPOSIT', 'I-ORE_DEPOSIT']
label_map = {}
for (i, label) in enumerate(label_list):
    label_map[label] = i

# 加载预训练模型
tokenizer_class, pretrained_weights = (ppb.BertTokenizer, 'bert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = torch.load('_model.pkl')


def run_ner(text):
    test_tokens = tokenizer.tokenize(text)
    test_ids = tokenizer.convert_tokens_to_ids(test_tokens)
    test_tokens_tensor = torch.tensor(test_ids)
    test_tokens_tensor = test_tokens_tensor

    test_masks_tensor = torch.zeros(test_tokens_tensor.shape, dtype=torch.long)
    test_masks_tensor = test_masks_tensor.masked_fill(test_tokens_tensor != 0, 1)

    outputs = model(input_ids=test_tokens_tensor.unsqueeze(0).cuda(),
                    attention_mask=test_masks_tensor.unsqueeze(0).cuda())
    logics = outputs[0]
    predict = []
    for logic in logics:
        predict.extend(np.argmax(logic.detach().cpu().numpy(), axis=1))

    inverse_dict = dict([val, key] for key, val in label_map.items())
    predict = [inverse_dict[i] for i in predict]
    return predict


def cut_sentences(content):
    # 结束符号，包含中文和英文的
    end_flag = ['?', '!', '.', '？', '！', '。', '…', ';', '\n']

    content_len = len(content)
    sentences = []
    tmp_char = ''
    for idx, char in enumerate(content):
        # 拼接字符
        tmp_char += char

        # 判断是否已经到了最后一位
        if (idx + 1) == content_len:
            sentences.append(tmp_char)
            break

        # 判断此字符是否为结束符号
        if char in end_flag:
            # 再判断下一个字符是否为结束符号，如果不是结束符号，则切分句子
            next_idx = idx + 1
            if not content[next_idx] in end_flag:
                sentences.append(tmp_char)
                tmp_char = ''

    return sentences


def sentence2list(sentence):
    flag = [':', ',', '(', ')', '.', '-', ';', '!', '?', '\n', '\'', '\"']
    word_list = []
    sentence = sentence.strip().split(" ")
    for word in sentence:
        word = word.strip()
        if len(word) <= 0:
            continue
        w = ""
        for char in word:
            if char in flag:
                if w != "":
                    word_list.append(w)
                    w = ""
                word_list.append(char)
            else:
                w += char
        if w != "":
            word_list.append(w)
            w = ""
    return word_list


with open('11.txt', 'r', encoding='UTF-8') as f:
    paragraph = f.read()
    sentences = cut_sentences(paragraph)
    for i, sentence in enumerate(sentences):
        word_list = sentence2list(sentence)
        subwords = list(map(tokenizer.tokenize, word_list))
        subword_lengths = list(map(len, subwords))
        token_start_idxs = np.cumsum([0] + subword_lengths[:-1])
        t1 = tokenizer.tokenize(sentence)
        # print(word_list)
        # print("-------------")
        tmp = run_ner(sentence)
        predictions = []
        for j in token_start_idxs:
            predictions.append(tmp[j])
        # print(len(word_list))
        # print(len(predictions))
        # if len(word_list) != len(predictions):
        #     print(word_list)
        #     print(t1)
        # print("-----------------")
        zipped = zip(word_list, predictions)
        for word, prediction in zipped:
            print(word + ": " + prediction)
        print("--------------")