import os

import time

from typing import List, Tuple, Callable
from tqdm import tqdm

import pickle
import unicodedata
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from transformers import BertModel, BertConfig, BertTokenizer

from lsptrain.nlp_ner.model import NERDataSet, Model
from lsptrain.utils.common import release_cache

max_length = 128
tokenizer = None
train_device = None
categories = ["O"]
categories_label2id = {}
categories_id2label = {}
bert_model = None
bert_config = None


def init(new_max_length, _pretrained_model, _device, _labels):
    global max_length, tokenizer, train_device, categories, categories_label2id, categories_id2label
    global bert_model, bert_config
    max_length = new_max_length
    bert_model = BertModel.from_pretrained(_pretrained_model)
    bert_config = BertConfig.from_pretrained(_pretrained_model)

    tokenizer = BertTokenizer.from_pretrained(_pretrained_model, do_lower_case=False)

    if torch.cuda.is_available():
        train_device = torch.device(_device)
    else:
        train_device = torch.device("cpu")
    print("init using device:", train_device)
    for _lab in _labels:
        categories.append("B-" + _lab)
        categories.append("I-" + _lab)
    categories_id2label = {i: k for i, k in enumerate(categories)}
    categories_label2id = {k: i for i, k in enumerate(categories)}


def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
    """将序列padding到同一长度"""
    if isinstance(inputs[0], (np.ndarray, list)):
        if length is None:
            length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
        elif not hasattr(length, '__getitem__'):
            length = [length]

        slices = [np.s_[:length[i]] for i in range(seq_dims)]
        slices = tuple(slices) if len(slices) > 1 else slices[0]
        pad_width = [(0, 0) for _ in np.shape(inputs[0])]

        outputs = []
        for x in inputs:
            x = x[slices]
            for i in range(seq_dims):
                if mode in {'post', 'right'}:
                    pad_width[i] = (0, length[i] - np.shape(x)[i])
                elif mode in {'pre', 'left'}:
                    pad_width[i] = (length[i] - np.shape(x)[i], 0)
                else:
                    raise ValueError('"mode" argument must be "post/right" or "pre/left".')
            x = np.pad(x, pad_width, 'constant', constant_values=value)
            outputs.append(x)

        return np.array(outputs)

    elif isinstance(inputs[0], torch.Tensor):
        assert mode in {'post', 'right'}, '"mode" argument must be "post/right" when element is torch.Tensor'
        if length is not None:
            inputs = [i[:length] for i in inputs]
        return pad_sequence(inputs, padding_value=value, batch_first=True)
    else:
        raise ValueError('"input" argument must be tensor/list/ndarray.')


def load_data(train_path: str, encoding: str, batch_size: int, collate_fn: Callable, split_str: str = " ",
              test_size: float = 0.1) -> Tuple[DataLoader, DataLoader]:
    temp_data = []
    with open(train_path, "r", encoding=encoding) as f:
        data_source = f.read()
    for line in tqdm(data_source.split("\n\n"), desc="loading data"):
        if not line:
            continue

        d = [""]
        for i, c in enumerate(line.split("\n")):
            char, flag = c.strip().split(split_str)
            d[0] += char
            if flag[0] == "B":
                d.append([i, i, flag[2:]])
            elif flag[0] == "I":
                d[-1][1] = i

        temp_data.append(d)

    total_datas = len(temp_data)
    if test_size < 0 or test_size > 1:
        test_size = 0.1
    test_idx = int(total_datas * test_size)
    train_datas = temp_data[test_idx:]
    test_datas = temp_data[:test_idx]

    train_dataset = NERDataSet(train_datas)
    test_dataset = NERDataSet(test_datas)

    # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)
    return train_dataloader, test_dataloader


def _is_special(ch):
    """判断是不是有特殊含义的符号
    """
    return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')


def _is_control(ch):
    """控制类字符判断
    """
    return unicodedata.category(ch) in ('Cc', 'Cf')


def stem(token):
    """获取token的“词干”（如果是##开头，则自动去掉##）
    """
    if token[:2] == '##':
        return token[2:]
    else:
        return token


def rematch_tokenizer(text, tokens):
    # print(text)
    normalized_text, char_mapping = '', []
    for i, ch in enumerate(text):
        ch = ''.join([
            c for c in ch
            if not (ord(c) == 0 or ord(c) == 0xfffd or _is_control(c))
        ])
        normalized_text += ch
        char_mapping.extend([i] * len(ch))

    text, token_mapping, offset = normalized_text, [], 0
    for token in tokens:
        if _is_special(token):
            token_mapping.append([])
        else:
            token = stem(token)
            try:
                start = text[offset:].index(token) + offset
                end = start + len(token)
                token_mapping.append(char_mapping[start:end])
                offset = end
            except Exception as e:
                # print(traceback.format_exc())
                # print("123")
                pass

    return token_mapping


def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0])  # noqa
        mapping = rematch_tokenizer(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        # token_ids = tokenizer.tokens_to_ids(tokens)
        token_ids = tokenizer.encode(tokens)  # noqa
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=train_device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=train_device)
    return batch_token_ids, batch_labels


def get_optimizer(model: Model, select_optimizer: str = 'adam', lr: float = 0.0001):
    if select_optimizer.lower() == "sgd":
        optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=1e-4)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
    return optimizer


def train(train_path: str, labels: List[str],
          test_size: float = 0.1, encoding: str = "utf-8",
          pretrained_model: str = "hfl/chinese-roberta-wwm-ext",
          learning_rate: float = 0.0001, optimizer: str = "adam",
          batch_size: int = 64, save_dir: str = "checkpoint",
          save_best: bool = True, num_epochs: int = 25,
          device: str = "cuda:0", job_type: str = "txt_ner",
          split_str: str = " ", replace_str: str = "_", max_length: int = 128
          ):
    """
    Args:
        train_path: 训练数据路径，txt文件，每一行是一个字，对应的标签，数据集为BIO格式
        labels: 需要推理的标签
        test_size: 测试机比例，默认0.1
        encoding: 打开数据字符集，默认为utf-8
        pretrained_model: 预训练模型名称, 默认'hfl/chinese-roberta-wwm-ext'
        learning_rate: 学习率，默认0.0001
        optimizer: 模型优化函数，默认adam
        batch_size: 训练批次大小，默认64
        save_dir: 训练checkpoint保存路径
        save_best: 是否只保存最优模型
        num_epochs: 训练步数，默认25
        device: 训练设备。如果可用，默认使用第一块显卡
        job_type: 任务名称，保存模型开头名称，默认'ner'
        split_str: 切分数据与标签的分隔符， 默认' '(空格)
        replace_str: 如果训练集中包含空格，则替换，默认'_'
        max_length: 默认序列最大长度128，超过部分会被自动截断
    Returns:

    """
    print("start train")
    assert len(labels) > 0, "labels cannot be empty"
    init(max_length, pretrained_model, device, labels)

    train_dataloader, test_dataloader = load_data(train_path=train_path, encoding=encoding, batch_size=batch_size,
                                                  collate_fn=collate_fn, test_size=test_size, split_str=split_str)
    print("using device:", train_device)

    model = Model(bert_model, bert_config, categories)  # noqa
    model.to(train_device)

    optimizer = get_optimizer(model, select_optimizer=optimizer, lr=learning_rate)
    print("开始训练")
    best_accu = 0

    for epoch in range(num_epochs):
        # print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
        # print(f"epoch = {epoch} ")
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for ids, label in tqdm(train_dataloader, desc="training"):
            outputs = model(ids)
            emissions = outputs[0]
            mask = outputs[1]
            loss = model.crf(emissions, mask, label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_sum += loss.cpu().data.numpy()
            # accu += (emissions.argmax(2) == label).sum().cpu().data.numpy() / batch_size
            y_pred = torch.argmax(emissions, dim=-1)
            accu += torch.sum(y_pred.eq(label)).item() / label.numel()

        test_loss_sum = 0.0
        test_accu = 0
        model.eval()
        for ids, label in tqdm(test_dataloader, desc="testing"):
            outputs = model(ids)
            emissions = outputs[0]
            mask = outputs[1]
            loss = model.crf(emissions, mask, label)
            test_loss_sum += loss.cpu().data.numpy()
            # test_accu += (emissions.argmax(2) == label).sum().cpu().data.numpy() / batch_size
            # test_accu += torch.sum(emissions.eq(label)).item() / label.numel()
            y_pred = torch.argmax(emissions, dim=-1)
            test_accu += torch.sum(y_pred.eq(label)).item() / label.numel()

        accuracy = test_accu / len(test_dataloader)

        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            epoch, loss_sum / len(train_dataloader), accu / len(train_dataloader), test_loss_sum / len(test_dataloader),
            test_accu / len(test_dataloader)), int(time.time() - start))

        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        # if save_best:
        #     if best_accu < accuracy:
        #         save_path = os.path.join(save_dir, f"{job_type}_model_best.pt")
        #         best_accu = accuracy
        #         torch.save(model.state_dict(), save_path)
        # else:
        #     save_path = os.path.join(save_dir, f"{job_type}_model_{epoch}_{accuracy}.pt")
        #     torch.save(model.state_dict(), save_path)

        pickle_dict = {}
        model.to(torch.device("cpu"))

        pickle_dict['stat_dict'] = model.state_dict()
        pickle_dict['categories'] = categories
        pickle_dict['categories_id2label'] = categories_id2label
        pickle_dict['categories_label2id'] = categories_label2id

        if save_best:
            if best_accu < accuracy:
                save_path = os.path.join(save_dir, f"{job_type}_data.pickle")
                with open(save_path, "wb") as f:
                    pickle.dump(pickle_dict, f)
        else:
            save_path = os.path.join(save_dir, f"{job_type}_model_{epoch}_{accuracy}.pickle")
            with open(save_path, "wb") as f:
                pickle.dump(pickle_dict, f)

        model.to(train_device)


# if __name__ == '__main__':
#     release_cache()
#     pretrained_model = r"D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext"
#     # train_path = r"D:\codes\nlp_about\lsptrain\example\nlp_ner\train.txt"
#     # train_path = r"D:\BaiduNetdiskDownload\bio_data\ner_train.txt"
#     train_path = r"C:\Users\Administrator\Downloads\china-people-daily-ner-corpus\china-people-daily-ner-corpus\renminribao.txt"
#     # labels = ["BRAND", "MODEL", "NAME"]
#     labels = ["LOC", "ORG", "PER"]
#     train(train_path, labels, pretrained_model=pretrained_model, batch_size=16, save_best=False, max_length=max_length)
