#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.1
# 伪批量推理

import os
import time
import torch

from typing import List, Tuple, Dict

import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel

maxlen = 64
batch_size = 200
labels = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]

categories = ['O']
for label in labels:
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}
"""
categories_id2label = {0: 'O',
 1: 'B-BRAND',
 2: 'I-BRAND',
 3: 'B-MODEL',
 4: 'I-MODEL',
 5: 'B-NAME',
 6: 'I-NAME',
 7: 'B-COLOR',
 8: 'I-COLOR',
 9: 'B-SPECS',
 10: 'I-SPECS',
 11: 'B-UNIT',
 12: 'I-UNIT'}
 
categories_label2id = {'O': 0,
 'B-BRAND': 1,
 'I-BRAND': 2,
 'B-MODEL': 3,
 'I-MODEL': 4,
 'B-NAME': 5,
 'I-NAME': 6,
 'B-COLOR': 7,
 'I-COLOR': 8,
 'B-SPECS': 9,
 'I-SPECS': 10,
 'B-UNIT': 11,
 'I-UNIT': 12}
 
 
"""

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))
        self.eval()

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path

    def predict_batch(self, token_list):
        return [self.predict(x) for x in token_list]


model = Model().to(device)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def fill_outputs_batch(text_list: List[str], entity_lists: List[List[Tuple[int, int, int, str]]], token_offsets: List[List[List[List[int]]]]):
    results = []
    for text, entity_list, token_offset in zip(text_list, entity_lists, token_offsets):
        _text = text.replace("_", " ")
        result = []
        for entity in entity_list:
            label_ids = []
            start = entity[1]
            if start >= int(len(token_offset[0]) / 2):
                continue
            end = entity[2] + 1
            label = entity[3]
            for idx in range(start, end):
                label_ids.extend(token_offset[0][idx])

            if label_ids:
                one = dict()
                one["start"] = min(label_ids)
                one["end"] = max(label_ids)
                one["label"] = label
                one["text"] = "".join([_text[x] for x in label_ids])
                result.append(one)
        results.append(result)
    return results


def inference_ner_batch(text_list: List[str]):
    # encode text
    predict_list = []
    offsets = []
    for text in text_list:
        inputs = tokenizer.encode(text, text, maxlen=maxlen, return_offsets=True, return_tensors="pt")
        predict_list.append(inputs[0])
        offsets.append(inputs[2])
    # print("inputs=", inputs)
    predict_output = model.predict_batch(predict_list)
    # print(predict_output)
    entity_list = [trans_entity2tuple(x) for x in predict_output]
    # print("entity_list1=", entity_list)
    entity_list = fill_outputs_batch(text_list, entity_list, offsets)
    # print("entity_list2=", entity_list)

    return entity_list


if __name__ == '__main__':
    text = "得力0010订书钉10#(1000枚/盒)10盒/包(单位:包)"
    text2 = "恒夫曼 M14/35mm 内六角螺栓 DIN912 12.9级  全牙 黑色"
    text = text.replace(" ", "_")
    text2 = text2.replace(" ", "_")
    print("text=", text)
    model.load_weights("./model/best_model.pt")

    model.eval()
    time.sleep(1)
    start = time.time()
    result = inference_ner_batch([text, text2])
    end = time.time()
    print("use time", end - start)
    # 0.12297940254211426
    print(result)
    # [[{'start': 0, 'end': 1, 'label': 'BRAND', 'text': '得力'}, {'start': 19, 'end': 19, 'label': 'UNIT', 'text': '盒'},
    #   {'start': 25, 'end': 25, 'label': 'UNIT', 'text': '包'}, {'start': 30, 'end': 30, 'label': 'UNIT', 'text': '包'}],
    # [{'start': 0, 'end': 2, 'label': 'BRAND', 'text': '恒夫曼'}, {'start': 4, 'end': 11, 'label': 'MODEL', 'text': 'M14/35mm'}]]
