import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader

from models.fcn.fcn import FCN8s
from utils.dataset.RVSCdataset import Dataset
from utils.train.imageUtils import deeplab_dataset_collate
from utils.train.loss import LossHistory
from utils.train.trainUtils import pretrain, fit_one_epoch

if __name__ == "__main__":
    log_dir = "checkpoint_FCN8s_2/"
    # the size of the input image
    inputs_size = [256, 256, 3]
    # Number of categories +1
    NUM_CLASSES = 3
    # whether to use dice loss
    dice_loss = True
    # do not use backbone network pre training weights
    pretrained = True
    # whether to use cuda
    Cuda = True
    # data set path
    trainDataset_path = "../../../data/RVSC2012/TrainingSet"
    testDataset_path1 = "../../../data/RVSC2012/Test1Set"
    testDataset_path2 = "../../../data/RVSC2012/Test2Set"

    model = FCN8s(nclass=NUM_CLASSES,
                  pretrained_base=pretrained,
                  in_channels=inputs_size[-1],
                  model_dir="../model_data").train()

    loss_history = LossHistory(log_dir)
    if not pretrained:
        pretrain(model=model,
                 model_path=r"../model_data/FCN8s_voc.pth")

    if Cuda:
        net = torch.nn.DataParallel(model)
        cudnn.benchmark = True
        net = net.cuda()

    # The main feature extraction network feature is general, freezing training can speed up the training speed
    # It can also prevent the weights from being destroyed at the beginning of training.
    # Init_Epoch is the initial generation
    # Interval_Epoch is a generation that freezes training
    # Epoch is the total training generation
    # Prompt OOM or insufficient video memory, please reduce the Batch_size
    if True:
        lr = 1e-14
        Init_Epoch = 0
        Interval_Epoch = 50
        Batch_size = 8

        optimizer = optim.Adam(model.parameters(),
                               lr=lr,
                               weight_decay=0.0005)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)

        train_dataset = Dataset(data_dir=trainDataset_path, image_size=inputs_size)
        val_dataset = Dataset(data_dir=testDataset_path1, image_size=inputs_size)

        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True, collate_fn=deeplab_dataset_collate)
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=deeplab_dataset_collate)

        epoch_size = train_dataset.len // Batch_size
        epoch_size_val = val_dataset.len // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("The data set is too small for training. Please expand the data set.")

        for param in model.vgg.parameters():
            param.requires_grad = False

        for epoch in range(Init_Epoch, Interval_Epoch):
            fit_one_epoch(model=model,
                          num_classes=NUM_CLASSES,
                          dice_loss=dice_loss,
                          optimizer=optimizer,
                          loss_history=loss_history,
                          epoch=epoch,
                          epoch_size=epoch_size,
                          epoch_size_val=epoch_size_val,
                          gen=gen,
                          gen_val=gen_val,
                          interval_epoch=Interval_Epoch,
                          cuda=Cuda,
                          log_dir=log_dir)
            lr_scheduler.step()

    if True:
        lr = 1e-15
        Interval_Epoch = 50
        Epoch = 100
        Batch_size = 8

        optimizer = optim.Adam(model.parameters(),
                               lr=lr,
                               weight_decay=0.0005)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)

        train_dataset = Dataset(data_dir=trainDataset_path, image_size=inputs_size)
        val_dataset = Dataset(data_dir=testDataset_path1, image_size=inputs_size)

        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True)
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True)

        epoch_size = train_dataset.len // Batch_size
        epoch_size_val = val_dataset.len // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("The data set is too small for training. Please expand the data set.")

        for param in model.vgg.parameters():
            param.requires_grad = True

        for epoch in range(Interval_Epoch, Epoch):
            fit_one_epoch(model=model,
                          num_classes=NUM_CLASSES,
                          dice_loss=dice_loss,
                          optimizer=optimizer,
                          loss_history=loss_history,
                          epoch=epoch,
                          epoch_size=epoch_size,
                          epoch_size_val=epoch_size_val,
                          gen=gen,
                          gen_val=gen_val,
                          interval_epoch=Epoch,
                          cuda=Cuda,
                          log_dir=log_dir)
            lr_scheduler.step()
