from utils.utils import get_time_dif, printbar


def params2update(model):
    params_to_update = []
    for name, param in model.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
    return params_to_update


def train_model(model_name, dataset_name):
    printbar()
    x = import_module('models.' + model_name)
    config = (x.Config(dataset_name))
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True
    start_time = time.time()
    print("Loading data...")
    train_dataset = CMNDataset(config, 'train', rebuild_vocab=True)
    val_dataset = CMNDataset(config, 'val')
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=config.batch_size, 
        shuffle=True,
        worker_init_fn=np.random.seed(1), 
    )
    val_dataloader = DataLoader(
        val_dataset, 
        batch_size=config.batch_size, 
        shuffle=True,
        worker_init_fn=np.random.seed(1), 
    )
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
    printbar()
    model = x.Model(config).to(config.device)
    start_time = time.time()
    print('Start training...')

    optimizer = torch.optim.AdamW(
        params2update(model), 
        lr=config.learning_rate, 
        weight_decay=1e-4
    )
    # 模型是否使用可变学习率
    if config.scheduler_1r_function:
        scheduler_1r = torch.optim.lr_scheduler.LambdaLR(
            optimizer,
            lr_lambda=config.scheduler_1r_function
        )
    best_metric = 0.
    df_history = pd.DataFrame(columns=["epoch", "loss", metric_name, "val_loss", "val_"+metric_name])
    for epoch in range(config.num_epochs):
        # 训练
        loss_sum, metric_sum = 0., 0.
        for step, (inps, labs) in enumerate(train_dataloader, start=1):
            loss, metric = x.train_step(model, inps, labs, optimizer)
            loss_sum += loss
            metric_sum += metric

            # 打印batch级别日志
            if step % 100 == 0:
                print('*'*27, f'[step = {step}] loss: {loss_sum/step:.3f}, {metric_name}: {metric_sum/step:.3f}')

        # 验证 一个epoch的train结束，做一次验证
        val_loss_sum, val_metric_sum = 0., 0.
        for val_step, (inps, labs) in enumerate(val_dataloader, start=1):
            val_loss, val_metric = validate_step(model, inps, labs)
            val_loss_sum += val_loss
            val_metric_sum += val_metric

        if config.scheduler_1r_function:
            scheduler_1r.step()

        # 记录和收集 1个epoch的训练和验证信息
        # columns=['epoch', 'loss', metric_name, 'val_loss', 'val_'+metric_name]
        record = (epoch, loss_sum/step, metric_sum/step, val_loss_sum/val_step, val_metric_sum/val_step)
        df_history.loc[epoch - 1] = record

        # 打印epoch级别日志
        print('EPOCH = {} loss: {:.3f}, {}: {:.3f}, val_loss: {:.3f}, val_{}: {:.3f}'.format(
               record[0], record[1], metric_name, record[2], record[3], metric_name, record[4]))
        printbar()

        # 保存最佳模型参数

        current_metric_avg = val_metric_sum/val_step
        if current_metric_avg > best_metric:
            best_metric = current_metric_avg
            # checkpoint = save_dir + '{:03d}_{:.3f}_ckpt.tar'.format(epoch, current_metric_avg) ############################################################
            checkpoint = config.save_path
            model_sd = copy.deepcopy(model.state_dict())
            # 保存
            torch.save({
                'loss': loss_sum / step,
                'epoch': epoch,
                'net': model_sd,
                'opt': optimizer.state_dict(),
            }, checkpoint)


    endtime = time.time()
    time_elapsed = endtime - starttime
    print('*' * 27, 'training finished...')
    print('*' * 27, 'and it costs {} h {} min {:.2f} s'.format(int(time_elapsed // 3600),
                                                               int((time_elapsed % 3600) // 60),
                                                               (time_elapsed % 3600) % 60))

    print('Best val Acc: {:4f}'.format(best_metric))
    return df_history

