import logging
import time

import paddle
from paddlenlp.transformers import BertForMaskedLM, BertTokenizer, CosineDecayWithWarmup
from paddle.io import DataLoader
import os
import numpy as np
from typing import Optional
from tqdm import tqdm

from PromptDataSet import MyDataSet

logger = logging.getLogger()
logger.setLevel(logging.INFO)  # 设置打印级别
formatter = logging.Formatter('%(asctime)s %(filename)s %(funcName)s [line:%(lineno)d] %(levelname)s %(message)s')
formatter.converter = time.gmtime

# 设置log保存
fh = logging.FileHandler("train.log", encoding='utf8')
fh.setFormatter(formatter)
logger.addHandler(fh)

data_dir = os.path.join(os.getcwd(), 'paddle_data')
save_dir = os.path.join(os.getcwd(), 'save_model')

tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

with open(os.path.join(data_dir, "fou_shi.txt"), encoding='utf-8', mode='r') as f:
    class_list = f.read().splitlines()
class_list = [tokenizer.convert_tokens_to_ids(item) for item in class_list]


def collate_fn(batch):
    batch_numpy = np.asarray(batch)
    text_list = batch_numpy[np.arange(0, len(batch_numpy)), 0].tolist()
    label_all = list(map(int, batch_numpy[np.arange(0, len(batch_numpy)), 1].tolist()))
    output = tokenizer(text=text_list, padding=True, return_attention_mask=True, return_tensors="pd")
    label_model = paddle.full_like(output['input_ids'], -100)
    for index, line in enumerate(label_model):
        if label_all[index] == 1:
            line[7] = tokenizer.convert_tokens_to_ids('有')
        else:
            line[7] = tokenizer.convert_tokens_to_ids('无')
        label_model[index] = line
    return_data = {
        "original_text": text_list,
        "input_ids": output['input_ids'],
        "attention": output['attention_mask'],
        "token_type_ids": output['token_type_ids'],
        "label": label_all,
        "label_model": label_model
    }
    return return_data


def train(model, dataLoader: DataLoader, optimizer: paddle.optimizer.AdamW,
          lr_scheduler: Optional[CosineDecayWithWarmup] = None):
    loss_total = 0
    model.train()
    for item in tqdm(dataLoader):
        input_ids = item['input_ids']
        token_type_ids = item['token_type_ids']
        label_model = item['label_model']
        attention_mask = item['attention']
        loss, _ = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
                        labels=label_model)  # [batch_size*seq_max_length*vocab_length]
        loss_total += loss.item()
        optimizer.clear_gradients()
        loss.backward()
        optimizer.step()
        if lr_scheduler is not None:
            lr_scheduler.step()
    return loss_total


@paddle.no_grad()
def dev(model: BertForMaskedLM, dev_data_loader: DataLoader):
    total = 0
    accuracy = 0
    dev_loss = -1
    model.eval()

    for item in tqdm(dev_data_loader):
        input_ids = item['input_ids']
        batch_size = len(input_ids)
        total += batch_size
        token_type_ids = item['token_type_ids']
        label = item['label']
        label_model = item['label_model']
        attention_mask = item['attention']
        dev_loss, logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
                                 labels=label_model)  # [batch_size*seq_max_length*vocab_length]
        mask_probability = logits[:, 7]  # [batch*vocab_size]

        last_probability = paddle.index_select(mask_probability, index=paddle.to_tensor(class_list),
                                               axis=1)  # [batch*2]
        predict = paddle.argmax(last_probability, axis=1)
        accuracy += (paddle.to_tensor(label) == predict).sum().item()
    return accuracy / total, dev_loss


def save(model, optimizer, checkpoint):
    # 保存Layer参数
    paddle.save(model.state_dict(), os.path.join(save_dir, "model.pdparams"))
    # 保存优化器参数
    paddle.save(optimizer.state_dict(), os.path.join(save_dir, "adamw.pdopt"))
    paddle.save(checkpoint, os.path.join(save_dir, "checkpoint.pkl"))


if __name__ == '__main__':
    model = BertForMaskedLM.from_pretrained("model_hub/bert-base-chinese")
    adamW = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=5e-5)
    train_data_set = MyDataSet("train", 'old')
    dev_data_set = MyDataSet("dev", 'old')
    train_dataLoader = DataLoader(train_data_set, shuffle=True, batch_size=256, collate_fn=collate_fn)
    dev_dataLoader = DataLoader(dev_data_set, shuffle=True, batch_size=256, collate_fn=collate_fn)
    best = 0
    final_checkpoint = dict()
    for batch in range(50):
        logging.info(f"第{batch + 1}轮训练开始了！")
        loss_total = train(model, train_dataLoader, adamW)
        logging.info(f"第{batch + 1}轮训练的loss为:{loss_total}")
        logging.info(f"开始第{batch + 1}次测评")
        accuracy, dev_loss = dev(model, dev_dataLoader)
        logging.info(f"第{batch + 1}轮训练的准确率为:{accuracy}")
        logging.info(f"第{batch + 1}轮训练在测试集上的loss为:{dev_loss}")
        final_checkpoint["epoch"] = batch + 1
        final_checkpoint["loss"] = loss_total
        if accuracy > best:
            logging.info("准备保存模型")
            save(model, optimizer=adamW, checkpoint=final_checkpoint)
            best = accuracy
            logging.info("保存成功")
