import torch
import tqdm

import data_handler
import config

logger = config.get_logger()

if __name__ == '__main__':
    device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
    print('device:', device)
    model = data_handler.get_model()
    optim, lr_scheduler = data_handler.get_optim_lr_scheduler(model)
    data_iter = data_handler.get_iter()
    logger.info('开始训练')
    for epoch in range(config.EPOCHS):
        logger.info(f'Epoch {epoch}')
        total = len(data_iter)
        for inputs, labels in tqdm.tqdm(data_iter, total=total):
            optim.zero_grad()
            outputs = model.forward(inputs, labels=labels)
            logits = outputs.logits
            loss = outputs.loss
            loss = loss.mean()
            loss.backward()
            optim.step()
            lr_scheduler.step()
        logger.info(f'Epoch {epoch} 训练完毕,loss: {loss}')
        save_model_path = config.OUTPUT_DIR + 'model_epoch_' + str(epoch) + f'{config.utils_file.get_now()}'
        model_to_save = model.module if hasattr(model, 'module') else model
        model_to_save.save_pretrained(save_model_path)
