import configparser as cp
import torch
from src.dataloader import make_data_loader
from src.nets.MRNet import MRNet
from src.round_train import train_one_epoch
from src.utils import getRootPath, get_lr_scheduler, set_optimizer_lr
import torch.optim as optim



# 获取项目根目录
rootPath = getRootPath()
filename = r'./config.ini'
inifile = cp.ConfigParser()
inifile.read(filename, 'UTF-8')
# 设置参数值  详细说明可以查看config.ini配置文件
Cuda = eval(inifile.get('train', 'Cuda'))
backbone = inifile.get('train', 'backbone')
model_path = rootPath + inifile.get('train', 'model_path')
batch_size = eval(inifile.get('train', 'batch_size'))
Init_Epoch = eval(inifile.get('train', 'Init_Epoch'))
Epoch = eval(inifile.get('train', 'Epoch'))
Init_lr = eval(inifile.get('train', 'Init_lr'))
Min_lr = eval(inifile.get('train', 'Min_lr'))
optimizer_type = inifile.get('train', 'optimizer_type')
momentum = eval(inifile.get('train', 'momentum'))
lr_decay_type = inifile.get('train', 'lr_decay_type')
weight_decay = eval(inifile.get('train', 'weight_decay'))
save_period = eval(inifile.get('train', 'save_period'))
save_dir = inifile.get('train', 'save_dir')
num_workers = eval(inifile.get('train', 'num_workers'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


if __name__ == '__main__':

    # ---------------------------------#
    #   载入模型并加载预训练权重
    # ---------------------------------#
    model = MRNet(backbond_model=model_path).cuda()
    # 变动预训练的权重 开启train
    model_train = model.train()

    # 损失函数采用交叉熵损失
    loss = torch.nn.CrossEntropyLoss()

    # -------------------------------------------------------------------#
    #   判断当前batch_size，自适应调整学习率
    # -------------------------------------------------------------------#
    nbs = 64
    lr_limit_max = 1e-3 if optimizer_type == 'adam' else 1e-1
    lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4
    Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
    Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)

    # ---------------------------------------#
    #   根据optimizer_type选择优化器
    # ---------------------------------------#
    optimizer = {
        'adam': optim.Adam(model.parameters(), Init_lr_fit, betas=(momentum, 0.999), weight_decay=weight_decay),
        'sgd': optim.SGD(model.parameters(), Init_lr_fit, momentum=momentum, nesterov=True, weight_decay=weight_decay)
    }[optimizer_type]

    # ---------------------------------------#
    #   获得学习率下降的公式
    # ---------------------------------------#
    lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, Epoch)

    # 用以训练的数据
    gen = make_data_loader('./data/MRNet-v1.0', 'train', 'axial')


    # 训练的主要流程
    for epoch in range(Init_Epoch, Epoch):
        # 设置优化器
        set_optimizer_lr(optimizer, lr_scheduler_func, epoch)
        # 每一轮训练
        train_one_epoch(model_train=model_train, model=model, loss=loss, optimizer=optimizer, epoch_step=1130,
                        epoch=epoch, gen=gen, Epoch=Epoch, save_period=save_period, save_dir=save_dir, cuda=Cuda)

