#! -*- coding: utf-8 -*-
"""
@Author: AI
@Create Time: 20240625
@Info: 模型训练
"""
import os
import time
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers import get_linear_schedule_with_warmup
from arguments import args
from collate_fn import collate
from procession import UIEDataSet
from metric import evaluate

from datetime import datetime

now = datetime.now()

# 模型保存路径
save_dir = "model_" + now.strftime("%Y%m%d_%H%M%S")

pretrained_model = args.bash_path + '/model/uie-base-zh/'
pretrain_model_path = args.bash_path + '/model/uie-base-zh/pytorch_model.bin'
model = torch.load(pretrain_model_path)  # 加载预训练好的UIE模型，模型结构见：model.UIE()
model.to(args.device)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)  # 加载tokenizer，ERNIE 3.0

# 加载数据
# train_data_path = '../data/ner_train.txt'
# eval_data_path = '../data/ner_eval.txt'

train_data_path = '../data/all.jsonl'
eval_data_path = '../data/all_test.jsonl'

train_data = UIEDataSet(train_data_path, tokenizer, is_train=True)
eval_data = UIEDataSet(eval_data_path, tokenizer, is_train=True)

train_dataloader = DataLoader(train_data, shuffle=True, batch_size=args.batch_size, collate_fn=collate)
eval_dataloader = DataLoader(eval_data, batch_size=args.batch_size, collate_fn=collate)

# 加载优化器
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
    {
        "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
        "weight_decay": args.weight_decay,
    },
    {
        "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
        "weight_decay": 0.0,
    },
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)

# 根据训练轮数计算最大训练步数，以便于scheduler动态调整lr
num_update_steps_per_epoch = len(train_dataloader)
num_train_epochs = args.num_train_epochs
max_train_steps = num_train_epochs * num_update_steps_per_epoch
warm_steps = int(args.warmup_ratio * max_train_steps)
lr_scheduler = get_linear_schedule_with_warmup(
    optimizer=optimizer,
    num_warmup_steps=warm_steps,
    num_training_steps=max_train_steps,
)

loss_list = []
tic_train = time.time()
criterion = torch.nn.BCELoss()
global_step, best_f1 = 0, 0
for epoch in range(1, args.num_train_epochs + 1):
    for batch in train_dataloader:
        start_prob, end_prob = model(input_ids=batch['input_ids'].to(args.device),
                                     token_type_ids=batch['token_type_ids'].to(args.device),
                                     attention_mask=batch['attention_mask'].to(args.device))
        start_ids = batch['start_label'].to(torch.float32).to(args.device)
        end_ids = batch['end_label'].to(torch.float32).to(args.device)
        loss_start = criterion(start_prob, start_ids)
        loss_end = criterion(end_prob, end_ids)
        loss = (loss_start + loss_end) / 2.0
        loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.zero_grad()
        loss_list.append(float(loss.cpu().detach()))

        global_step += 1
        if global_step % 100 == 0:
            time_diff = time.time() - tic_train
            loss_avg = sum(loss_list) / len(loss_list)
            print("global step %d, epoch: %d, loss: %.5f, speed: %.2f step/s"
                  % (global_step, epoch, loss_avg, 100 / time_diff))
            tic_train = time.time()

            # cur_save_dir = os.path.join(args.save_dir, "model_%d" % global_step)
            # if not os.path.exists(cur_save_dir):
            #     os.makedirs(cur_save_dir)
            # torch.save(model, os.path.join(cur_save_dir, 'model.pt'))
            # tokenizer.save_pretrained(cur_save_dir)

    # 一轮结束完评测
    precision, recall, f1 = evaluate(model, eval_dataloader)
    print("Evaluation precision: %.5f, recall: %.5f, F1: %.5f" % (precision, recall, f1))
    if f1 > best_f1:
        print(
            f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
        )
        best_f1 = f1
        cur_save_dir = os.path.join(args.save_dir, save_dir)
        if not os.path.exists(cur_save_dir):
            os.makedirs(cur_save_dir)
        torch.save(model, os.path.join(cur_save_dir, 'pytorch_model.bin'))
        tokenizer.save_pretrained(cur_save_dir)
        print(f"ner模型保存在： {cur_save_dir}")
        # tic_train = time.time()
