#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90

import os
import time
import numpy as np
import torch

import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm

maxlen = 128
batch_size = 200
labels = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]

categories = ['O']
for label in labels:
    # categories.append(label)
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}
"""
categories_id2label = {0: 'O',
 1: 'B-BRAND',
 2: 'I-BRAND',
 3: 'B-MODEL',
 4: 'I-MODEL',
 5: 'B-NAME',
 6: 'I-NAME',
 7: 'B-COLOR',
 8: 'I-COLOR',
 9: 'B-SPECS',
 10: 'I-SPECS',
 11: 'B-UNIT',
 12: 'I-UNIT'}
 
categories_label2id = {'O': 0,
 'B-BRAND': 1,
 'I-BRAND': 2,
 'B-MODEL': 3,
 'I-MODEL': 4,
 'B-NAME': 5,
 'I-NAME': 6,
 'B-COLOR': 7,
 'I-COLOR': 8,
 'B-SPECS': 9,
 'I-SPECS': 10,
 'B-UNIT': 11,
 'I-UNIT': 12}
 
 
"""

# BERT base
# config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
# checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
# dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

# 固定seed
seed_everything(42)


# 加载数据集
class MyDataset(ListDataset):
    @staticmethod
    def load_data(filename):
        D = []
        with open(filename, encoding='utf-8') as f:
            f = f.read()
            for l in f.split('\n\n'):
                if not l:
                    continue
                d = ['']
                for i, c in enumerate(l.split('\n')):
                    try:
                        char, flag = c.strip().split(' ')
                    except Exception as e:
                        char = "_"
                        flag = "O"
                    d[0] += char
                    if flag[0] == 'B':
                        d.append([i, i, flag[2:]])
                    elif flag[0] == 'I':
                        try:
                            d[-1][1] = i
                        except Exception as e:
                            pass
                D.append(d)
        return D


pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
# dict_path = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext/vocab.txt"
# config_path = r'G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext/config.json'
# checkpoint_path = r'G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext/pytorch_model.bin'
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


# tokenizer = BertTokenizer(dict_path, do_lower_case=True)

# bert_model = BertModel.from_pretrained(pretrained_model)
# bert_config = BertConfig.from_pretrained(pretrained_model)
# tokenizer = BertTokenizer.from_pretrained(pretrained_model)

def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    return batch_token_ids, batch_labels


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict_one(self, token_ids):
        self.eval()
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path


model = Model().to(device)


class Loss(nn.Module):
    def forward(self, outputs, labels):
        return model.crf(*outputs, labels)


def acc(y_pred, y_true):
    y_pred = y_pred[0]
    y_pred = torch.argmax(y_pred, dim=-1)
    acc = torch.sum(y_pred.eq(y_true)).item() / y_true.numel()
    return {'acc': acc}


# 支持多种自定义metrics = ['accuracy', acc, {acc: acc}]均可
# model.compile(loss=Loss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), metrics=acc)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = set()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.add(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def inference_ner(text: str):
    predict_result = []
    tokens = tokenizer.tokenize(text, maxlen=maxlen)
    token_ids = tokenizer.tokens_to_ids(tokens)
    predict_input = torch.tensor(sequence_padding([token_ids]), dtype=torch.long, device=device)

    inputs = tokenizer.encode(text, second_texts=text, return_offsets=True, return_tensors="pt")
    # print("inputs=", inputs)
    predict_output = model.predict_one(inputs[0])
    batch_token_ids_list = predict_input.int().tolist()
    predict_label_list = predict_output.int().tolist()
    """
    categories = ['O', 'B-BRAND', 'I-BRAND', 'B-MODEL', 'I-MODEL', 'B-NAME', 'I-NAME']
    categories_id2label = {i: k for i, k in enumerate(categories)}
    categories_label2id = {k: i for i, k in enumerate(categories)}
    """
    # demo = trans_entity2tuple(predict_output)
    # print(demo)
    # {(0, 14, 14, 'UNIT'), (0, 1, 2, 'BRAND'), (0, 19, 19, 'UNIT'), (0, 24, 24, 'UNIT')}

    for token_ids, predict_label in zip(batch_token_ids_list, predict_label_list):
        temp_token_list = []
        temp_label_list = []
        _idx = 0
        for temp_token, temp_label in zip(token_ids, predict_label):
            if temp_label == 0:
                # 处理前面的数据
                if temp_token_list:
                    fill_predict_result(predict_result, temp_token_list, temp_label_list)
                    temp_token_list = []
                    temp_label_list = []
                continue
            if temp_label % 2 == 1:
                if temp_token_list:
                    # 处理前面的数据
                    if temp_token_list:
                        fill_predict_result(predict_result, temp_token_list, temp_label_list)
                        temp_token_list = []
                        temp_label_list = []
                temp_token_list.append(temp_token)
                temp_label_list.append(temp_label)
            if temp_label % 2 == 0:
                temp_token_list.append(temp_token)
                temp_label_list.append(temp_label)
        if temp_token_list:
            fill_predict_result(predict_result, temp_token_list, temp_label_list)
    return predict_result


if __name__ == '__main__':
    text = "得力0010订书钉10#(1000枚/盒)10盒/包(单位:包)"
    text = text.replace(" ", "_")
    # print("text=", text)
    model.load_weights("./model/best_model.pt")

    model.eval()
    time.sleep(1)
    start = time.time()
    result = inference_ner(text)
    end = time.time()
    # print("use time", end - start)
    print(result)
