import numpy as np

import dataloader
import create_model
import parameter_config
import torch.optim as optim
import os
from datetime import datetime
from tqdm import tqdm
import torch
import transformers
import functions_tools
params = parameter_config.ParameterConfig()

def train_epoch(model, train_dataloader, optimizer, scheduler, epoch, params):
    """

    :param model: 模型
    :param train_dataloader:数据集
    :param optimizer: 优化器
    :param scheduler: 学习率预热
    :param epoch: 当前轮次
    :param params: 配置文件
    :return: 返回损失
    """
    model.train()
    device = params.device
    ignore_index = params.ignore_index
    epoch_start_time = datetime.now()
    total_loss = 0
    epoch_correct_num, epoch_total_num =0 ,0

    for batch_idx, (inputs, labels) in enumerate(tqdm(train_dataloader)):
        inputs, labels = inputs.to(device), labels.to(device)

        outputs = model(inputs, labels=labels)
        logits = outputs.logits
        loss = outputs.loss

        batch_correct_num, batch_total_num = functions_tools.calculate_acc(logits, labels,
                                                                          ignore_index=ignore_index)
        batch_acc = batch_correct_num / batch_total_num

        epoch_correct_num += batch_correct_num
        epoch_total_num += batch_correct_num
        total_loss += loss.item()

        if params.gradient_accumulation_steps >1:
            loss = loss / params.gradient_accumulation_steps
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), params.max_grad_norm)

        if (batch_idx + 1) % params.gradient_accumulation_steps == 0:
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

        if (batch_idx + 1) % params.loss_step == 0:
            print(
                "batch {} of epoch {}, loss {}, batch_acc {}, lr {}" .format(
                    batch_idx + 1,
                    epoch + 1,
                    loss.item() * params.gradient_accumulation_steps, batch_acc, scheduler.get_lr()
                )
            )


    epoch_mean_loss = total_loss / len(train_dataloader)
    epoch_mean_acc = epoch_correct_num / epoch_total_num

    print("epoch {} : loss {}, predict_acc {}".format(epoch + 1, epoch_mean_loss, epoch_mean_acc))

    if epoch % 10 == 0 or epoch == params.epochs:
        print("保存模型epoch {}".format(epoch + 1))
        model_path = os.path.join(params.save_model_path, 'GPT2_epoch{}'.format(epoch + 1))
        if not os.path.exists(model_path):
            os.makedirs(model_path)

        model.save_pretrained(model_path)

        print(" epoch {} finished".format(epoch + 1))
        epoch_end_time = datetime.now()
        print(" epoch {} finished in {}".format(epoch + 1, epoch_end_time - epoch_start_time))
    return epoch_mean_loss

def val_epoch(model, val_dataloader, epoch, params):
    model.eval()
    device = params.device
    epoch_start_time = datetime.now()
    total_loss = 0

    with torch.no_grad():
        for batch_idx, (inputs, labels) in enumerate(tqdm(val_dataloader)):
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model.forward(inputs, labels=labels)
            loss = outputs.loss
            total_loss += loss.item()

        epoch_mean_loss = total_loss / len(val_dataloader)

        print("验证轮次：{}， 损失：{}".format(epoch+1, epoch_mean_loss))

        epoch_end_time = datetime.now()
        print(" epoch {} finished in {}".format(epoch + 1, epoch_end_time - epoch_start_time))

        return epoch_mean_loss

def train(params, train_dataloader, val_dataloader, model):
    """

    :param params:参数
    :param train_dataloader:训练集
    :param val_dataloader:验证集
    :param model:模型
    :return:最终训练完毕后的模型
    """
    optimizer = optim.AdamW(model.parameters(), lr=params.lr, eps=params.eps)
    t_total = len(train_dataloader) // params.gradient_accumulation_steps * params.epochs
    scheduler = transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=params.warmup_steps,
                                                             num_training_steps=t_total)

    print("----------------------开始训练--------------------")

    train_losses, val_losses = [], []
    best_val_loss = np.inf
    for epoch in range(params.epochs):
        print('训练-------------------------')
        train_loss = train_epoch(
            model= model, train_dataloader=train_dataloader,
            optimizer=optimizer, scheduler=scheduler, epoch=epoch, params=params
        )
        train_losses.append(train_loss)

        print('验证---------------------------')
        val_loss = val_epoch(model= model, val_dataloader=val_dataloader,
                             epoch=epoch, params=params)
        val_losses.append(val_loss)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            print('当前轮次{}为最好的模型'.format(epoch + 1))
            model_path = os.path.join(params.save_model_path, 'best_GPT2_epoch')
            if not os.path.exists(model_path):
                os.makedirs(model_path)
            model.save_pretrained(model_path)


if __name__ == '__main__':
    train_dataloader, val_dataloader = dataloader.get_dataloader(params.train_path, params.valid_path)
    print("数据加载成功")
    model = create_model.get_model()
    print('模型对象创建成功')
    model = model.to(params.device)
    print('模型移动到指定该设备上')
    train(params, train_dataloader, val_dataloader, model)
