"""
在低对比度数据下(Low Contrast Roads)，只进行语义分割的模型的训练代码
"""
import os
import sys
import torch

import argparse

from torch.utils.data import DataLoader
from torch.nn import DataParallel
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime

# from lib.C2FNet import DSConv_C2FNet
# from lib.NLLinkNet.nllinknet_location import NL34_LinkNet
# from lib.FPNet.FPNet import DSConv_FPNet
from models.unet import UNet
from models.pspnet import PSPNet
from models.segnet import SegNet
from models.deeplabv3_plus import DeepLab
from models.linknet import LinkNet
from utils.lowcontrast import lowDataSet
from utils.utils import clip_gradient, adjust_lr
from utils.loss import dice_bce_loss_func
from utils.metrics import Evaluator
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils.AdaX import AdaXW


# os.environ['CUDA_VISIBLE_DEVICES'] = '2,3,4,5,6,7'
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"

def normalize_tensor(tensor):
    min_val = torch.min(tensor)
    max_val = torch.max(tensor)
    normalized_tensor = (tensor - min_val) / (max_val - min_val)
    return normalized_tensor


def forward_and_backward(model, images, gts, optimizer=None, is_train=True):
    # 模型有2个输出，一个粗分割，一个精细分割
    if opt.model_name == 'PSPNet':
        if is_train:
            pred_coarse, pred_refine = model(images)
            # 归一化
            pred_coarse = normalize_tensor(pred_coarse)
            pred_refine = normalize_tensor(pred_refine)
            gts = torch.unsqueeze(gts, 1)
            loss = dice_bce_loss_func(pred_coarse, gts) + dice_bce_loss_func(pred_refine, gts)
            preds = pred_refine.data.cpu().numpy()
        else:
            prediction = model(images)
            # 归一化
            normalized_prediction = normalize_tensor(prediction)
            gts = torch.unsqueeze(gts, 1)
            loss = dice_bce_loss_func(normalized_prediction, gts)
            preds = normalized_prediction.data.cpu().numpy()
    # LinkNet模型归一化条件不一样
    elif opt.model_name == 'LinkNet':
        # 归一化
        normalized_images = normalize_tensor(images)
        prediction = model(normalized_images)
        # 归一化
        prediction = normalize_tensor(prediction)
        gts = torch.unsqueeze(gts, 1)
        loss = dice_bce_loss_func(prediction, gts)
        preds = prediction.data.cpu().numpy()
    # 模型只有1个输出，即只有一个分割结果
    else:
        prediction = model(images)
        # 归一化
        normalized_prediction = normalize_tensor(prediction)
        gts = torch.unsqueeze(gts, 1)
        loss = dice_bce_loss_func(normalized_prediction, gts)
        preds = normalized_prediction.data.cpu().numpy()

    if optimizer is not None and is_train:
        loss.backward()
        clip_gradient(optimizer, opt.clip)
        optimizer.step()

    return loss, preds, gts


# 训练函数
def train(train_loader, model, optimizer, epoch):
    model.train()
    evaluator.reset()
    train_loss = 0.0
    # 训练日志
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists("Low_Contrast/{}".format(opt.model_name)):
        os.makedirs("Low_Contrast/{}".format(opt.model_name))
    file = open("Low_Contrast/{}/train_log.txt".format(opt.model_name), "a")

    for i, sample in enumerate(tqdm(train_loader)):
        optimizer.zero_grad()
        images = sample['image'].cuda()
        gts = sample['label'].cuda()
        loss, preds, gts = forward_and_backward(model, images, gts, optimizer, is_train=True)
        labels = gts.cpu().numpy()

        preds[preds >= 0.3] = 1
        preds[preds < 0.3] = 0

        evaluator.add_batch(labels, preds)
        train_loss += loss.item()

        if (i + 1) % 20 == 0 or (i + 1) == train_batch:  # 每20个batch，打印指标
            ACC = evaluator.Pixel_Accuracy()  # 准确率（道路）
            ACC_class = evaluator.Pixel_Accuracy_Class()  # 平均准确率
            mIOU = evaluator.Mean_Intersection_over_Union()  # 平均Iou
            IOU = evaluator.Intersection_over_Union()  # IoU（道路）
            Precision = evaluator.Pixel_Precision()  # 精度（道路）
            Recall = evaluator.Pixel_Recall()  # 召回率（道路）
            F1 = evaluator.Pixel_F1()  # F1分数（道路）

            train_result = '{} Epoch [{:03d}/{:03d}], Train Batch [{:04d}/{:04d}], Loss {:.4f}, Acc {}, Acc_class {}, ' \
                           'mIOU {}, IOU {}, Precision {}, Recall {}, F1 {}'.format(
                datetime.now(), epoch, opt.epoch, i + 1, train_batch, train_loss / (i + 1),
                ACC, ACC_class, mIOU,
                IOU, Precision, Recall, F1
            )
            file.write(train_result + '\n')
            print(train_result)


    file.close()

    iou = evaluator.Intersection_over_Union()
    if not os.path.exists("Low_Contrast/{}/ckpt/".format(opt.model_name)):
        os.makedirs("Low_Contrast/{}/ckpt/".format(opt.model_name))
    save_path = 'Low_Contrast/{}/ckpt/'.format(opt.model_name)

    torch.save({
        'epoch': epoch,
        'state_dict': model.module.state_dict(),
        'optimizer': optimizer.state_dict(),
        'train_iou': iou
    }, save_path + '{}.pth.tar'.format(opt.model_name))
    print('[Saving Snapshot:]', save_path + '{}.pth.tar'.format(opt.model_name))


# 测试函数
def valid(valid_loader, model, epoch, best):
    model.eval()
    evaluator.reset()
    valid_loss = 0.0

    # 测试日志
    if not os.path.exists("Low_Contrast/{}".format(opt.model_name)):
        os.makedirs("Low_Contrast/{}".format(opt.model_name))
    file = open("Low_Contrast/{}/valid_log.txt".format(opt.model_name), "a")
    for i, sample in enumerate(tqdm(valid_loader)):
        images = sample['image'].cuda()
        gts = sample['label'].cuda()

        with torch.no_grad():
            loss, preds, gts = forward_and_backward(model, images, gts, is_train=False)
        labels = gts.cpu().numpy()
        preds[preds >= 0.3] = 1
        preds[preds < 0.3] = 0

        evaluator.add_batch(labels, preds)
        valid_loss += loss.item()

        if (i + 1) % 5 == 0 or (i + 1) == test_batch:
            ACC = evaluator.Pixel_Accuracy()
            ACC_class = evaluator.Pixel_Accuracy_Class()
            mIOU = evaluator.Mean_Intersection_over_Union()
            IOU = evaluator.Intersection_over_Union()
            Precision = evaluator.Pixel_Precision()
            Recall = evaluator.Pixel_Recall()
            F1 = evaluator.Pixel_F1()
            valid_result = '{} Epoch [{:03d}/{:03d}], Valid Batch [{:04d}/{:04d}], Loss {:.4f}, Acc {}, Acc_class {}, ' \
                           'mIOU {}, IOU {}, Precision {}, Recall {}, F1 {}'.format(
                datetime.now(), epoch, opt.epoch, i + 1, test_batch, valid_loss / (i + 1),
                ACC, ACC_class, mIOU,
                IOU, Precision, Recall, F1
            )
            file.write(valid_result + '\n')
            print(valid_result)

    file.close()

    # 每一次测试的最终指标
    iou = evaluator.Intersection_over_Union()
    if not os.path.exists("Low_Contrast/{}/ckpt/".format(opt.model_name)):
        os.makedirs("Low_Contrast/{}/ckpt/".format(opt.model_name))
    save_path = 'Low_Contrast/{}/ckpt/'.format(opt.model_name)

    # 根据道路IoU保存最优的模型权重
    if iou > best:
        torch.save({
            'epoch': epoch,
            'state_dict': model.module.state_dict(),
            'optimizer': optimizer.state_dict(),
            'iou': iou
        }, save_path + '{}_best.pth.tar'.format(opt.model_name))
        return iou
    return best


if __name__ == '__main__':
    models = {'PSPNet': PSPNet, 'UNet': UNet, 'SegNet': SegNet, "DeepLab": DeepLab, 'LinkNet': LinkNet}

    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch', type=int,
                        default=20, help='epoch number')  # 设置训练轮数
    parser.add_argument('--lr', type=float,
                        default=1e-3, help='learning rate')  # 学习率
    parser.add_argument('--batch_size', type=int,
                        default=4, help='batch size')
    parser.add_argument('--clip', type=float,
                        default=0.5, help='gradient clipping margin')  # 梯度裁剪
    parser.add_argument('--decay_rate', type=float,
                        default=0.01, help='decay rate of learning rate')  # 学习率衰减
    parser.add_argument('--decay_epoch', type=int,
                        default=10, help='every n epochs decay learning rate')  # 学习率衰减的间隔
    parser.add_argument('--model_name', type=str,
                        default='LinkNet')  # 训练的模型名称
    parser.add_argument('--base_size', type=int,
                        default=512, help='base image size')  # 图像尺寸
    parser.add_argument('--crop_size', type=int,
                        default=512, help='crop image size')
    parser.add_argument('--start_epoch', type=int,
                        default=1, help='train from this epoch')  # 从第几轮开始训练
    parser.add_argument('--resume', type=bool,
                        default=False, help='whether to continue training')  # 是否继续训练模型
    parser.add_argument('--no_improve', type=int,
                        default=5, help='After a few epochs, the IOU did not improve')  # 早停轮数
    opt = parser.parse_args()

    # ---- build datasets ----
    try:
        train_ds = lowDataSet(opt, base_dir='Low_Contrast_Roads/', split='train')
        test_ds = lowDataSet(opt, base_dir='Low_Contrast_Roads/', split='test')
    except FileNotFoundError as e:
        print(f"Error: {e}")
        sys.exit()

    train_loader = DataLoader(train_ds, batch_size=opt.batch_size, shuffle=True, num_workers=6)
    test_loader = DataLoader(test_ds, batch_size=opt.batch_size, shuffle=False, num_workers=6)
    train_batch = len(train_loader)  # 训练集batch总数
    test_batch = len(test_loader)

    evaluator = Evaluator(num_class=2)  # 评价指标, 道路分割是二分类，因此类别是2
    # writer = SummaryWriter('./run/DSConv_C2FNet/logs')

    # ---- build models ----
    network = models[opt.model_name]
    num_classes = 1
    model = network(num_classes).cuda()
    model = DataParallel(model)
    # model = DataParallel(model, device_ids=[0,1,2,3])  # 多GPU训练模型

    params = model.parameters()
    optimizer = AdaXW(params, opt.lr)

    # 加载之前保存的模型权重，继续训练模型
    if opt.resume:
        checkpoint = torch.load('Low_Contrast/{}/ckpt/{}.pth.tar'.format(opt.model_name, opt.model_name))
        checkpoint_best = torch.load('Low_Contrast/{}/ckpt/{}_best.pth.tar'.format(opt.model_name, opt.model_name))

        model.module.load_state_dict(checkpoint['state_dict'])  # 加载模型权重
        optimizer.load_state_dict(checkpoint['optimizer'])  # 加载优化器权重
        start_epoch = checkpoint['epoch'] + 1
        best_IOU = checkpoint_best['iou']

        del checkpoint
        del checkpoint_best

        print("Continue to Train {} From Epoch {}".format(opt.model_name, start_epoch))
    # 从头开始训练模型
    else:
        start_epoch = 1
        best_IOU = 0.0
        print("Start to Train {}".format(opt.model_name))
    # # 定义一个余弦退火调度器
    # scheduler = CosineAnnealingLR(optimizer, T_max=opt.decay_epoch, eta_min=0)
    # 我们创建一个ReduceLROnPlateau调度器，如果IoU在6个epoch内没有增加，那么学习率会被乘以0.1
    scheduler = ReduceLROnPlateau(optimizer, 'max', patience=6, factor=0.1)
    # ---- Training & Testing ----
    try:
        best_IOU = 0.0
        no_improve = 0
        for epoch in range(start_epoch, opt.epoch + 1):
            # 调整学习率
            # adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
            print('----------------------Training-----------------------')
            train(train_loader, model, optimizer, epoch)
            # 清理CUDA缓存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            # # 调整学习率,PSPNet适合余弦退火策略
            # if opt.model_name == 'PSPNet':
            #     scheduler.step()
            # else:
            #     adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
            # 每轮，在测试集上测试一次模型
            print('----------------------Testing-----------------------')
            current_IOU = valid(test_loader, model, epoch, best_IOU)
            print("Current IOU is: {}".format(current_IOU))
            # 使用scheduler.step(val_iou)来更新学习率
            scheduler.step(current_IOU)
            if current_IOU > best_IOU:
                best_IOU = current_IOU
                no_improve = 0
            else:
                no_improve += 1

            # 如果在10轮内IoU没有改善，则停止训练
            if no_improve >= opt.no_improve:
                print("Early stopping with best IOU: ", best_IOU)
                break

            file = open("Low_Contrast/{}/valid_log.txt".format(opt.model_name), "a")
            file.write("Best IOU is: {}".format(best_IOU) + "\n")
            file.close()
    except RuntimeError as e:
        print(f"Error: {e}")
