# encoding: utf-8

import numpy as np
import pandas as pd
from tqdm import tqdm

import datasets
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import DataCollatorForTokenClassification

from transformers import BertTokenizer

from torchkeras import KerasModel

model_name = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
tokenizer = BertTokenizer.from_pretrained(model_name)
print(tokenizer)

datadir = "./data/"

train_path = datadir + "train.json"
val_path = datadir + "dev.json"

dftrain = pd.read_json(train_path, lines=True)
dfval = pd.read_json(train_path, lines=True)

entities = ['address', 'book', 'company', 'game', 'government', 'movie',
            'name', 'organization', 'position', 'scene']

label_names = ['O'] + ['B-' + x for x in entities] + ['I-' + x for x in entities]

id2label = {i: label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}

text = dftrain["text"][43]
label = dftrain["label"][43]

print(text)
print(label)

tokenized_input = tokenizer(text)
print(tokenized_input)
print(tokenized_input["input_ids"])

tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
print("tokens=", tokens)


# 把 label格式转化成字符级别的char_label
def get_char_label(text, label):
    char_label = ['O' for x in text]
    for tp, dic in label.items():
        for word, idxs in dic.items():
            idx_start = idxs[0][0]
            idx_end = idxs[0][1]
            char_label[idx_start] = 'B-' + tp
            char_label[idx_start + 1:idx_end + 1] = ['I-' + tp for x in range(idx_start + 1, idx_end + 1)]
    return char_label


# char_label = get_char_label(text, label)
# for char, char_tp in zip(text, char_label):
#     print(char + '\t' + char_tp)


def get_token_label(text, char_label, tokenizer):
    tokenized_input = tokenizer(text)
    tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])

    iter_tokens = iter(tokens)
    iter_char_label = iter(char_label)
    iter_text = iter(text.lower())

    token_labels = []

    t = next(iter_tokens)
    char = next(iter_text)
    char_tp = next(iter_char_label)

    while True:
        # 单个字符token(如汉字)直接赋给对应字符token
        if len(t) == 1:
            assert t == char
            token_labels.append(char_tp)
            try:
                char = next(iter_text)
                char_tp = next(iter_char_label)
            except StopIteration:
                pass

                # 添加的特殊token如[CLS],[SEP],排除[UNK]
        elif t in tokenizer.special_tokens_map.values() and t != '[UNK]':
            token_labels.append('O')


        elif t == '[UNK]':
            token_labels.append(char_tp)
            # 重新对齐
            try:
                t = next(iter_tokens)
            except StopIteration:
                break

            if t not in tokenizer.special_tokens_map.values():
                while char != t[0]:
                    try:
                        char = next(iter_text)
                        char_tp = next(iter_char_label)
                    except StopIteration:
                        pass
            continue

        # 其它长度大于1的token，如英文token
        else:
            t_label = char_tp
            t = t.replace('##', '')  # 移除因为subword引入的'##'符号
            for c in t:
                assert c == char or char not in tokenizer.vocab
                if t_label != 'O':
                    t_label = char_tp
                try:
                    char = next(iter_text)
                    char_tp = next(iter_char_label)
                except StopIteration:
                    pass
            token_labels.append(t_label)

        try:
            t = next(iter_tokens)
        except StopIteration:
            break

    assert len(token_labels) == len(tokens)
    return token_labels


# token_labels = get_token_label(text, char_label, tokenizer)
#
# for t, t_label in zip(tokens, token_labels):
#     print(t, '\t', t_label)


def make_sample(text, label, tokenizer):
    sample = tokenizer(text)
    char_label = get_char_label(text, label)
    token_label = get_token_label(text, char_label, tokenizer)
    sample['labels'] = [label2id[x] for x in token_label]
    return sample


train_samples = [make_sample(text, label, tokenizer) for text, label in tqdm(list(zip(dftrain['text'], dftrain['label'])))]
val_samples = [make_sample(text, label, tokenizer) for text, label in tqdm(list(zip(dfval['text'], dfval['label'])))]

ds_train = datasets.Dataset.from_list(train_samples)
ds_val = datasets.Dataset.from_list(val_samples)

data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
dl_train = DataLoader(ds_train, batch_size=8, collate_fn=data_collator)
dl_val = DataLoader(ds_val, batch_size=8, collate_fn=data_collator)

for batch in dl_train:
    break

from transformers import BertForTokenClassification

net = BertForTokenClassification.from_pretrained(
    model_name,
    id2label=id2label,
    label2id=label2id,
)

# 冻结bert基模型参数
for para in net.bert.parameters():
    para.requires_grad_(False)

print(net.config.num_labels)

# 模型试算
out = net(**batch)
print(out.loss)
print(out.logits.shape)


class StepRunner:
    def __init__(self, net, loss_fn, accelerator, stage="train", metrics_dict=None,
                 optimizer=None, lr_scheduler=None
                 ):
        self.net, self.loss_fn, self.metrics_dict, self.stage = net, loss_fn, metrics_dict, stage
        self.optimizer, self.lr_scheduler = optimizer, lr_scheduler
        self.accelerator = accelerator
        if self.stage == 'train':
            self.net.train()
        else:
            self.net.eval()

    def __call__(self, batch):

        out = self.net(**batch)

        # loss
        loss = out.loss

        # preds
        preds = (out.logits).argmax(axis=2)

        # backward()
        if self.optimizer is not None and self.stage == "train":
            self.accelerator.backward(loss)
            self.optimizer.step()
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()
            self.optimizer.zero_grad()

        all_loss = self.accelerator.gather(loss).sum()

        labels = batch['labels']

        # precision & recall

        precision = (((preds > 0) & (preds == labels)).sum()) / (
            torch.maximum((preds > 0).sum(), torch.tensor(1.0).to(preds.device)))
        recall = (((labels > 0) & (preds == labels)).sum()) / (
            torch.maximum((labels > 0).sum(), torch.tensor(1.0).to(labels.device)))

        all_precision = self.accelerator.gather(precision).mean()
        all_recall = self.accelerator.gather(recall).mean()

        f1 = 2 * all_precision * all_recall / torch.maximum(
            all_recall + all_precision, torch.tensor(1.0).to(labels.device))

        # losses
        step_losses = {self.stage + "_loss": all_loss.item(),
                       self.stage + '_precision': all_precision.item(),
                       self.stage + '_recall': all_recall.item(),
                       self.stage + '_f1': f1.item()
                       }

        # metrics
        step_metrics = {}

        if self.stage == "train":
            if self.optimizer is not None:
                step_metrics['lr'] = self.optimizer.state_dict()['param_groups'][0]['lr']
            else:
                step_metrics['lr'] = 0.0
        return step_losses, step_metrics


KerasModel.StepRunner = StepRunner

optimizer = torch.optim.AdamW(net.parameters(), lr=3e-5)

keras_model = KerasModel(net, loss_fn=None, optimizer=optimizer)

keras_model.fit(
    train_data=dl_train,
    val_data=dl_val,
    ckpt_path='bert_ner.pt',
    epochs=50,
    patience=5,
    monitor="val_f1",
    mode="max",
    plot=True,
    wandb=False,
    quiet=True
)
