import os
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader

from models.SENet.SEUnet import SimSEUnet
from utils.dataset.VOCdataset import Dataset
from utils.train.imageUtils import deeplab_dataset_collate
from utils.train.loss import LossHistory
from utils.train.trainUtils import pretrain, fit_one_epoch

if __name__ == "__main__":
    log_dir = "checkpoint_SEUnet_2/"
    # 输入图片的大小
    # inputs_size = [512, 512, 3]
    inputs_size = [128, 128, 3]
    # 分类个数+1
    NUM_CLASSES = 21
    # 是否使用dice_loss(此处不使用效果更佳)
    dice_loss = True
    # 不使用主干网络预训练权重
    pretrained = True
    # 使用Cuda
    Cuda = True
    # 数据集路径
    dataset_path = "../../../data/VOC2012/VOCdevkit/VOC2012"

    model = SimSEUnet(num_classes=NUM_CLASSES,
                      in_channels=inputs_size[-1]).train()

    loss_history = LossHistory(log_dir)
    if not pretrained:
        pretrain(model=model,
                 model_path=r"../model_data/SEUnet_voc.pth")

    if Cuda:
        net = torch.nn.DataParallel(model)
        cudnn.benchmark = True
        net = net.cuda()

    # 打开数据集的txt
    with open(os.path.join(dataset_path, "ImageSets/Segmentation/train.txt"), "r") as f:
        train_lines = f.readlines()

    # 打开验证的txt
    with open(os.path.join(dataset_path, "ImageSets/Segmentation/val.txt"), "r") as f:
        val_lines = f.readlines()

    # The main feature extraction network feature is general, freezing training can speed up the training speed
    # It can also prevent the weights from being destroyed at the beginning of training.
    # Init_Epoch is the initial generation
    # Interval_Epoch is a generation that freezes training
    # Epoch is the total training generation
    # Prompt OOM or insufficient video memory, please reduce the Batch_size
    if True:
        lr = 1e-4
        Init_Epoch = 0
        Interval_Epoch = 25
        Batch_size = 2

        optimizer = optim.Adam(model.parameters(), lr)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)

        train_dataset = Dataset(train_lines, inputs_size, NUM_CLASSES, True, dataset_path)
        val_dataset = Dataset(val_lines, inputs_size, NUM_CLASSES, False, dataset_path)
        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True, collate_fn=deeplab_dataset_collate)
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=deeplab_dataset_collate)

        epoch_size = len(train_lines) // Batch_size
        epoch_size_val = len(val_lines) // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小，无法进行训练，请扩充数据集。")

        for epoch in range(Init_Epoch, Interval_Epoch):
            fit_one_epoch(model=model,
                          num_classes=NUM_CLASSES,
                          dice_loss=dice_loss,
                          optimizer=optimizer,
                          loss_history=loss_history,
                          epoch=epoch,
                          epoch_size=epoch_size,
                          epoch_size_val=epoch_size_val,
                          gen=gen,
                          gen_val=gen_val,
                          interval_epoch=Interval_Epoch,
                          cuda=Cuda,
                          log_dir=log_dir)
            lr_scheduler.step()

    if True:
        lr = 1e-5
        Interval_Epoch = 25
        Epoch = 50
        Batch_size = 2

        optimizer = optim.Adam(model.parameters(), lr)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)

        train_dataset = Dataset(train_lines, inputs_size, NUM_CLASSES, True, dataset_path)
        val_dataset = Dataset(val_lines, inputs_size, NUM_CLASSES, False, dataset_path)
        gen = DataLoader(train_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True, collate_fn=deeplab_dataset_collate)
        gen_val = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=deeplab_dataset_collate)

        epoch_size = len(train_lines) // Batch_size
        epoch_size_val = len(val_lines) // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小，无法进行训练，请扩充数据集。")

        for epoch in range(Interval_Epoch, Epoch):
            fit_one_epoch(model=model,
                          num_classes=NUM_CLASSES,
                          dice_loss=dice_loss,
                          optimizer=optimizer,
                          loss_history=loss_history,
                          epoch=epoch,
                          epoch_size=epoch_size,
                          epoch_size_val=epoch_size_val,
                          gen=gen,
                          gen_val=gen_val,
                          interval_epoch=Epoch,
                          cuda=Cuda,
                          log_dir=log_dir)
            lr_scheduler.step()
