# encoding: utf-8

import numpy as np
import pandas as pd
from tqdm import tqdm

import datasets
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import DataCollatorForTokenClassification
from transformers import BertForTokenClassification
from transformers import BertTokenizer

from torchkeras import KerasModel

# model_name = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
model_name = r"E:/nlp_about/pretrained_model/hfl_chinese_roberta_wwm_ext"
tokenizer = BertTokenizer.from_pretrained(model_name)
# print(tokenizer)
# train_path = r'E:\nlp_about\bert4torch_train\data\train2.txt'
train_path = r'E:\nlp_about\bert4torch_train\data\train.txt'
val_path = r'E:\nlp_about\bert4torch_train\data\train2.txt'

batch_size = 128
entities = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]

label_names = ["O"]
for entity in entities:
    label_names.append("B-" + entity)
    label_names.append("I-" + entity)

id2label = {i: label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}


class MyDataset(Dataset):

    def __init__(self, file_path):
        self.data = self.load_data(file_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index]

    @staticmethod
    def load_data(file_path):
        D = []
        with open(file_path, encoding='utf-8') as f:
            f = f.read()
            for line in f.split('\n\n'):
                if not line:
                    continue
                d = ['']
                for i, c in enumerate(line.split('\n')):
                    try:
                        char, flag = c.strip().split(' ')
                    except Exception as e:
                        pass
                    d[0] += char
                    if flag[0] == 'B':
                        d.append([i, i, flag[2:]])
                    elif flag[0] == 'I':
                        try:
                            d[-1][1] = i
                        except Exception as e:
                            pass
                D.append(d)
        return D


def read_txt_file(file_path: str):
    """

    :param file_path:
    :return: [{"input_ids": token_ids, "labels": labels}]
    """
    result = []
    with open(file_path, "r", encoding="utf-8") as f:
        f = f.read()
        for lines in tqdm(f.split('\n\n'), desc="读取数据"):
            if not lines:
                continue
            text = ""
            label = []
            for line in lines.split("\n"):
                try:
                    char, flag = line.strip().split(' ')
                except Exception as e:
                    # print(f">{line}<")
                    # 有些可能是空格"  O" 导致分割字符串报错,直接用下划线填充
                    char = "_"
                    flag = "O"
                text += char
                label.append(flag)
            sample = tokenizer(text, max_length=128, padding=True, truncation=True)
            tokens = tokenizer.convert_ids_to_tokens(sample["input_ids"])

            iter_tokens = iter(tokens)
            iter_char_label = iter(label)
            iter_text = iter(text.lower())

            token_labels = []

            t = next(iter_tokens)
            char = next(iter_text)
            char_tp = next(iter_char_label)

            while True:
                # 单个字符token(如汉字)直接赋给对应字符token
                if len(t) == 1:
                    assert t == char
                    token_labels.append(char_tp)
                    try:
                        char = next(iter_text)
                        char_tp = next(iter_char_label)
                    except StopIteration:
                        pass

                        # 添加的特殊token如[CLS],[SEP],排除[UNK]
                elif t in tokenizer.special_tokens_map.values() and t != '[UNK]':
                    token_labels.append('O')


                elif t == '[UNK]':
                    token_labels.append(char_tp)
                    # 重新对齐
                    try:
                        t = next(iter_tokens)
                    except StopIteration:
                        break

                    if t not in tokenizer.special_tokens_map.values():
                        while char != t[0]:
                            try:
                                char = next(iter_text)
                                char_tp = next(iter_char_label)
                            except StopIteration:
                                pass
                    continue

                # 其它长度大于1的token，如英文token
                else:
                    t_label = char_tp
                    t = t.replace('##', '')  # 移除因为subword引入的'##'符号
                    for c in t:
                        assert c == char or char not in tokenizer.vocab
                        if t_label != 'O':
                            t_label = char_tp
                        try:
                            char = next(iter_text)
                            char_tp = next(iter_char_label)
                        except StopIteration:
                            pass
                    token_labels.append(t_label)

                try:
                    t = next(iter_tokens)
                except StopIteration:
                    break

            assert len(token_labels) == len(tokens)
            sample["labels"] = [label2id[x] for x in token_labels]
            # sample["input_ids"] = fill_length(sample["input_ids"], 128)
            """
            {'input_ids': [101, 5401, 3723, 3975, 142, 3362, 5108, 3581, 142, 122, 119, 8132, 8178, 120, 4486, 142, 8110, 4486, 120, 5056, 142, 3146, 5056, 7218, 1545, 102], 
            'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 
            'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 
            'labels': [1, 2, 2, 0, 0, 0, 0, 0, 9, 10, 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 11, 0, 0, 11, 0, 0]}
            """
            result.append(sample)
            text = ""
            label = []
    return result

train_samples = read_txt_file(train_path)
val_samples = read_txt_file(val_path)

ds_train = datasets.Dataset.from_list(train_samples)
ds_val = datasets.Dataset.from_list(val_samples)

data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
dl_train = DataLoader(ds_train, batch_size=batch_size, collate_fn=data_collator)
dl_val = DataLoader(ds_val, batch_size=batch_size, collate_fn=data_collator)

net = BertForTokenClassification.from_pretrained(
    model_name,
    id2label=id2label,
    label2id=label2id,
)


class StepRunner:
    def __init__(self, net, loss_fn, accelerator, stage="train", metrics_dict=None,
                 optimizer=None, lr_scheduler=None
                 ):
        self.net, self.loss_fn, self.metrics_dict, self.stage = net, loss_fn, metrics_dict, stage
        self.optimizer, self.lr_scheduler = optimizer, lr_scheduler
        self.accelerator = accelerator
        if self.stage == 'train':
            self.net.train()
        else:
            self.net.eval()

    def __call__(self, batch):

        out = self.net(**batch)

        # loss
        loss = out.loss

        # preds
        preds = (out.logits).argmax(axis=2)

        # backward()
        if self.optimizer is not None and self.stage == "train":
            self.accelerator.backward(loss)
            self.optimizer.step()
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()
            self.optimizer.zero_grad()

        all_loss = self.accelerator.gather(loss).sum()

        labels = batch['labels']

        # precision & recall

        precision = (((preds > 0) & (preds == labels)).sum()) / (
            torch.maximum((preds > 0).sum(), torch.tensor(1.0).to(preds.device)))
        recall = (((labels > 0) & (preds == labels)).sum()) / (
            torch.maximum((labels > 0).sum(), torch.tensor(1.0).to(labels.device)))

        all_precision = self.accelerator.gather(precision).mean()
        all_recall = self.accelerator.gather(recall).mean()

        f1 = 2 * all_precision * all_recall / torch.maximum(
            all_recall + all_precision, torch.tensor(1.0).to(labels.device))

        # losses
        step_losses = {self.stage + "_loss": all_loss.item(),
                       self.stage + '_precision': all_precision.item(),
                       self.stage + '_recall': all_recall.item(),
                       self.stage + '_f1': f1.item()
                       }

        # metrics
        step_metrics = {}

        if self.stage == "train":
            if self.optimizer is not None:
                step_metrics['lr'] = self.optimizer.state_dict()['param_groups'][0]['lr']
            else:
                step_metrics['lr'] = 0.0
        return step_losses, step_metrics


KerasModel.StepRunner = StepRunner

optimizer = torch.optim.AdamW(net.parameters(), lr=3e-5)

keras_model = KerasModel(net, loss_fn=None, optimizer=optimizer)

keras_model.fit(
    train_data=dl_train,
    val_data=dl_val,
    ckpt_path='ecom_bert_ner.pt',
    epochs=50,
    patience=5,
    monitor="val_f1",
    mode="max",
    plot=True,
    wandb=False,
    quiet=True
)

net.save_pretrained("ner_roberta")
tokenizer.save_pretrained("ner_roberta")
