import numpy as np
from model.mlpModel import myMLP
import torch
from torch import nn
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
import os
from train_util import *
import torch.optim as optim
from torchsummary import summary


def main(args: argparse.Namespace):
    model_log_path = os.path.join('model_log', args.save_file)
    model_path = os.path.join(model_log_path, 'trained_model')
    trials_log_path = os.path.join(model_log_path, 'trials_loss')
    modelError_path = os.path.join(model_log_path, 'modelError')
    path_exists(model_path, trials_log_path, modelError_path)

    file_name_prefix = 'Temp'
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print('---------Train on: ' + device + '----------')

    if args.seed is not None:
        set_seed(args.seed)

    train_loader, val_loader = get_loader(args)

    input_size = tuple(train_loader.dataset.x.shape[1:])

    model = myMLP().to(device)
    summary(model, input_size=input_size)
    # model.apply(init_weights

    criterion = nn.MSELoss(reduction='sum')

    # ["Adam", "RMSprop", "SGD", "Adagrad"]
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=int(0.5 * args.epochs), gamma=0.2)

    train_loss = np.array([])
    val_loss = np.array([])
    learning_rate = np.array([])

    for epoch in range(args.epochs):
        train_batch_loss = train(epoch, train_loader, model, optimizer, criterion, args)
        train_loss = np.append(train_loss, train_batch_loss)
        val_batch_acc = val(epoch, val_loader, model, criterion, args)
        val_loss = np.append(val_loss, val_batch_acc)
        lr = optimizer.state_dict()['param_groups'][0]['lr']
        learning_rate = np.append(learning_rate, lr)
        # scheduler.step(val_batch_acc)
        scheduler.step()

    draw_regression_loss(train_loss, val_loss,
                         title=f'{file_name_prefix}--Loss',
                         save_path=f'{trials_log_path}/{file_name_prefix}.png')
    save_losses(train_loss, val_loss, f'{trials_log_path}/{file_name_prefix}--Loss')  # 保存损失值
    torch.save(model, f'{model_path}/optuna_model_{file_name_prefix}.pt')  # 保存最终训练的模型

    _ = modelError(model, train_loader, save_data=1,
                   save_file_name=f'{modelError_path}/train_{file_name_prefix}')
    _ = modelError(model, val_loader, save_data=1,
                   save_file_name=f'{modelError_path}/val_{file_name_prefix}')
    # 绘制学习率曲线
    plot_learning_rate(learning_rate, title=f'{file_name_prefix}--Learning Rate')


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-sd', '--seed', default=1, type=int)
    parser.add_argument('-tp', '--data_path', default='data/data.csv')
    parser.add_argument('-bs', '--batch_size', type=int, default=64)
    parser.add_argument('-ep', '--epochs', type=int, default=600)
    parser.add_argument('-ts', '--test_size', type=float, default=0.3)
    parser.add_argument('-lr', '--lr', type=float, default=0.1)
    parser.add_argument('-sf', '--save_file', default='temp_file')
    args = parser.parse_args()

    folder_name = 'Temp_file'
    args.save_file = folder_name
    main(args)

