#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90

import os
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm

torch.multiprocessing.set_start_method('spawn')

maxlen = 128
batch_size = 128
num_workers = 0


is_train = True
# is_train = False
labels = ["BRAND"]

categories = ['O']
for label in labels:
    # categories.append(label)
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print("using device:", device)
# 固定seed
seed_everything(42)

print(categories)


# ['O', 'B-BRAND', 'I-BRAND', 'B-MODEL', 'I-MODEL', 'B-NAME', 'I-NAME', 'B-COLOR', 'I-COLOR', 'B-SPECS', 'I-SPECS', 'B-UNIT', 'I-UNIT']

# 加载数据集
class MyDataset(ListDataset):
    @staticmethod
    def load_data(filename):
        D = []
        with open(filename, encoding='utf-8') as f:
            f = f.read()
            for line in f.split('\n\n'):
                if not line:
                    continue
                d = ['']
                for i, c in enumerate(line.split('\n')):
                    try:
                        char, flag = c.strip().split(' ')
                    except Exception as e:
                        pass
                    d[0] += char
                    if flag[0] == 'B':
                        d.append([i, i, flag[2:]])
                    elif flag[0] == 'I':
                        try:
                            d[-1][1] = i
                        except Exception as e:
                            pass
                D.append(d)
        return D


# BERT base
pretrained_model = r"/mnt/pretrained_models/hfl-chinese-roberta-wwm-ext/"
dict_path = pretrained_model + "vocab.txt"
config_path = pretrained_model + 'config.json'
checkpoint_path = pretrained_model + 'pytorch_model.bin'

# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=False)


def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    return batch_token_ids, batch_labels


# 处理数据批次
def collate_fn2(batch):
    # 对于批次中的每个文本实例
    batch_token_ids, batch_labels = [], []
    # for text, text_labels in batch:
    for bb in batch:
        text = bb[0]
        text_labels = bb[1:]
        # 分词，生成token列表
        text = text.lower()
        tokens = ['[CLS]'] + [i for i in str(text)[:maxlen]] + ['[SEP]']
        # 生成token与原始文本字符的映射
        mapping = tokenizer.rematch(str(text), tokens)

        # 将字符位置映射到token位置
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}

        # 将token列表转换为 token IDs
        token_ids = tokenizer.tokens_to_ids(tokens)

        # 创建了一个全零的标签，用于存储NER标签信息
        labels = np.zeros((len(categories_label2id), maxlen, maxlen))

        # 对于文本中的每个NER标签，将标签的起始位置和结束为止映射到对应的token为止，并将标签的类型映射为对应的标签ID
        for start, end, label in text_labels:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                label = categories_label2id[label]
                labels[label, start, end] = 1
            else:
                print(text_labels)

        batch_token_ids.append(token_ids)  # 前面已经限制了长度
        batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])


print("start load data")
# 转换数据集
if is_train:
    train_dataloader = DataLoader(MyDataset('/root/train_about/bert4torch_train/datas/train_bio_brand.txt'),
                                  batch_size=batch_size, shuffle=True, collate_fn=collate_fn, num_workers=num_workers)
else:  # test
    train_dataloader = DataLoader(MyDataset(r'/root/train_about/bert4torch_train/datas/test_bio_brand.txt'),
                                  batch_size=batch_size, shuffle=True, collate_fn=collate_fn, num_workers=num_workers)
valid_dataloader = DataLoader(MyDataset('/root/train_about/bert4torch_train/datas/test_bio_brand.txt'),
                              batch_size=batch_size, collate_fn=collate_fn, num_workers=num_workers)

print("end load data")


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, segment_vocab_size=0)
        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        self.eval()
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path


model = Model().to(device)


class Loss(nn.Module):
    def forward(self, outputs, labels):
        return model.crf(*outputs, labels)


def acc(y_pred, y_true):
    y_pred = y_pred[0]
    y_pred = torch.argmax(y_pred, dim=-1)
    acc = torch.sum(y_pred.eq(y_true)).item() / y_true.numel()
    return {'acc': acc}


# 支持多种自定义metrics = ['accuracy', acc, {acc: acc}]均可
model.compile(loss=Loss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), metrics=acc, bar="tqdm")


def evaluate(data):
    X, Y, Z = 1e-10, 1e-10, 1e-10
    X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
    for token_ids, label in tqdm(data):
        scores = model.predict(token_ids)  # [btz, seq_len]
        attention_mask = label.gt(0)

        # token粒度
        X += (scores.eq(label) * attention_mask).sum().item()
        Y += scores.gt(0).sum().item()
        Z += label.gt(0).sum().item()

        # entity粒度
        entity_pred = trans_entity2tuple(scores)
        entity_true = trans_entity2tuple(label)
        X2 += len(entity_pred.intersection(entity_true))
        Y2 += len(entity_pred)
        Z2 += len(entity_true)
    f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
    f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2 / Y2, X2 / Z2
    return f1, precision, recall, f2, precision2, recall2


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = set()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.add(tuple(i))
    return batch_entity_ids


class Evaluator(Callback):
    """评估与保存
    """

    def __init__(self):
        super(Evaluator, self).__init__()
        self.best_val_f1 = 0.

    def on_epoch_end(self, steps, epoch, logs=None):
        f1, precision, recall, f2, precision2, recall2 = evaluate(valid_dataloader)
        if not os.path.exists("./model"):
            os.mkdir("./model")
        model.to("cpu")
        if f2 > self.best_val_f1:
            self.best_val_f1 = f2
            model.save_weights('./model/best_model.pt')
        model.save_weights(f"./model/epoch_{epoch}_steps_{steps}_f2_{f2:.5f}.pt")
        model.to(device)
        print(f'[val-token  level] f1: {f1:.5f}, p: {precision:.5f} r: {recall:.5f}')
        print(
            f'[val-entity level] f1: {f2:.5f}, p: {precision2:.5f} r: {recall2:.5f} best_f1: {self.best_val_f1:.5f}\n')


if __name__ == '__main__':

    evaluator = Evaluator()
    model.fit(train_dataloader, epochs=20, steps_per_epoch=None, callbacks=[evaluator], bar="tqdm")


