#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.1
# 完成单条推理

import os
import time
import torch

from typing import List, Tuple, Dict

import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel


maxlen = 64
batch_size = 200
labels = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]

categories = ['O']
for label in labels:

    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}
"""
categories_id2label = {0: 'O',
 1: 'B-BRAND',
 2: 'I-BRAND',
 3: 'B-MODEL',
 4: 'I-MODEL',
 5: 'B-NAME',
 6: 'I-NAME',
 7: 'B-COLOR',
 8: 'I-COLOR',
 9: 'B-SPECS',
 10: 'I-SPECS',
 11: 'B-UNIT',
 12: 'I-UNIT'}
 
categories_label2id = {'O': 0,
 'B-BRAND': 1,
 'I-BRAND': 2,
 'B-MODEL': 3,
 'I-MODEL': 4,
 'B-NAME': 5,
 'I-NAME': 6,
 'B-COLOR': 7,
 'I-COLOR': 8,
 'B-SPECS': 9,
 'I-SPECS': 10,
 'B-UNIT': 11,
 'I-UNIT': 12}
 
 
"""

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

# 固定seed
seed_everything(42)


# 加载数据集
class MyDataset(ListDataset):
    @staticmethod
    def load_data(filename):
        D = []
        with open(filename, encoding='utf-8') as f:
            f = f.read()
            for l in f.split('\n\n'):
                if not l:
                    continue
                d = ['']
                for i, c in enumerate(l.split('\n')):
                    try:
                        char, flag = c.strip().split(' ')
                    except Exception as e:
                        char = "_"
                        flag = "O"
                    d[0] += char
                    if flag[0] == 'B':
                        d.append([i, i, flag[2:]])
                    elif flag[0] == 'I':
                        try:
                            d[-1][1] = i
                        except Exception as e:
                            pass
                D.append(d)
        return D


pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)

# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        self.eval()
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path


model = Model().to(device)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def fill_outputs(text: str, entity_list: List[Tuple[int, int, int, str]], token_offsets: List[List[List[int]]]):
    _text = text.replace("_", " ")
    result = []
    token_offset = token_offsets[0]
    for entity in entity_list:
        label_ids = []
        start = entity[1]
        if start >= int(len(token_offset)/2):
            continue
        end = entity[2] + 1
        label = entity[3]
        for idx in range(start, end):
            label_ids.extend(token_offset[idx])

        if label_ids:
            one = dict()
            one["start"] = min(label_ids)
            one["end"] = max(label_ids)
            one["label"] = label
            one["text"] = "".join([_text[x] for x in label_ids])
            result.append(one)
    return result


def inference_ner(text: str):

    inputs = tokenizer.encode(text, text, maxlen=maxlen, return_offsets=True, return_tensors="pt")
    print("inputs=", inputs)
    predict_output = model.predict(inputs[0])
    entity_list = trans_entity2tuple(predict_output)
    print("entity_list=", entity_list)
    entity_list = fill_outputs(text, entity_list, inputs[2])
    print("entity_list=", entity_list)

    return entity_list


if __name__ == '__main__':
    text = "得力0010订书钉10#(1000枚/盒)10盒/包(单位:包)"
    text = "恒夫曼 M14/35mm 内六角螺栓 DIN912 12.9级  全牙 黑色"
    text = text.replace(" ", "_")
    print("text=", text)
    model.load_weights("./model/best_model.pt")

    model.eval()
    time.sleep(1)
    start = time.time()
    result = inference_ner(text)
    end = time.time()
    print("use time", end - start)
    # 0.06797409057617188
    print(result)
    # [{'start': 0, 'end': 2, 'label': 'BRAND', 'text': '恒夫曼'}, {'start': 4, 'end': 11, 'label': 'MODEL', 'text': 'M14/35mm'}]
