import torch
import torch.nn as nn
import torchvision
import torch.backends.cudnn as cudnn
import torch.optim
import os
import sys
import argparse
import time
import dataloader
import model
import Myloss
import numpy as np
from tqdm import tqdm
from visdom import Visdom
from torch.utils.data import DataLoader
from torchvision import transforms
from utils.losses import MSELoss,L1Loss,PSNRLoss
from utils.loss_ssim import SSIMLoss


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)





def train(config):
    # 实例化一个窗口
    wind = Visdom()
    os.environ['CUDA_VISIBLE_DEVICES']='0'
    os.environ['OMP_NUM_THREADS']= '0'
    os.environ['PYTORCH_CUDA_ALLOC_CONF']='max_split_size_mb:32'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    DA_DCE_Net = model.DA_DCE_Net().to(device)
    # mse_loss = MSELoss()
    # l1_loss = L1Loss()
    # psnr_loss = PSNRLoss()
    ssim_loss = SSIMLoss()
    start_epoch = 0
    DA_DCE_Net.apply(weights_init)
    train_dataset = dataloader.lowlight_loader(config.lowlight_images_path,config.paired_normal_path)
    train_loader = DataLoader(train_dataset, batch_size=config.train_batch_size, shuffle=True, num_workers=config.num_workers, pin_memory=True)
    val_loader = DataLoader(train_dataset, batch_size=config.val_batch_size, shuffle=False, num_workers=config.num_workers, pin_memory=True)
    # loss初始化窗口信息
    wind.line([0.], # Y的第一个点的坐标
            [0.], # X的第一个点的坐标
            win = 'train_loss', # 窗口的名称
            opts = dict(title = 'train_loss')# 图像的标例
    )
    # loss初始化窗口信息
    wind.line([0.], # Y的第一个点的坐标
            [0.], # X的第一个点的坐标
            win = 'dce_loss', # 窗口的名称
            opts = dict(title = 'dce_loss')# 图像的标例
    )
    # loss初始化窗口信息
    wind.line([0.], # Y的第一个点的坐标
            [0.], # X的第一个点的坐标
            win = 'ssim_loss', # 窗口的名称
            opts = dict(title = 'ssim_loss')# 图像的标例
    )
    # lr初始化窗口信息
    wind.line([0.], # Y的第一个点的坐标
            [0.], # X的第一个点的坐标
            win = 'lr', # 窗口的名称
            opts = dict(title = 'lr')# 图像的标例
    )
    L_color = Myloss.L_color()
    L_spa = Myloss.L_spa(device)
    L_exp = Myloss.L_exp(16,0.6, device)
    L_TV = Myloss.L_TV()
    optimizer = torch.optim.Adam(DA_DCE_Net.parameters(), lr=config.lr, weight_decay=config.weight_decay)
    scheduler =  torch.optim.lr_scheduler.CosineAnnealingLR(optimizer = optimizer, T_max =  config.num_epochs)
    if config.load_pretrain == True:
        DA_DCE_Net.load_state_dict(torch.load(config.pretrain_dir))
    if config.train_with_ckpt == True:
        path_checkpoint = config.snapshots_folder+"/checkpoint/ckpt_best.pth"  # 断点路径
        checkpoint = torch.load(path_checkpoint)  # 加载断点
        DA_DCE_Net.load_state_dict(checkpoint['net'])  # 加载模型可学习参数
        optimizer.load_state_dict(checkpoint['optimizer'])  # 加载优化器参数
        start_epoch = checkpoint['epoch']  # 设置开始的epoch
        scheduler.load_state_dict(checkpoint['lr_scheduler'])
    for epoch in range(start_epoch, config.num_epochs):
        DA_DCE_Net.train()
        loop = tqdm(enumerate(train_loader), total =len(train_loader))
        for iteration, (img_lowlight, img_normal) in loop:

            img_lowlight = img_lowlight.to(device)
            img_normal = img_normal.to(device)

            enhanced_image_1, enhanced_image, denoise_result, A = DA_DCE_Net(img_lowlight)

            Loss_TV = 200*L_TV(A)

            loss_spa = torch.mean(L_spa(enhanced_image, img_lowlight))

            loss_col = 5*torch.mean(L_color(enhanced_image))

            loss_exp = 10*torch.mean(L_exp(enhanced_image))
            #calculate denoising loss
            #restore image pixel
            # denoising_img, normal_img = denoise_result*255, img_normal*255
            # loss_l1 = l1_loss(denoising_img, normal_img)
            # loss_psnr = psnr_loss(denoising_img, normal_img)
            loss_ssim = 1-ssim_loss(denoise_result, img_normal*255.)

            loss_dce = Loss_TV + loss_spa + loss_col + loss_exp
            # best_loss
            loss =  loss_dce + loss_ssim
            #
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm(DA_DCE_Net.parameters(),config.grad_clip_norm)
            optimizer.step()

            #更新信息
            loop.set_description(f'Epoch [{epoch}/{config.num_epochs}]')
            loop.set_postfix(loss = loss.cpu().detach().numpy(), lr=scheduler.get_last_lr()[0])
        #打印和保存模型快照
        # if ((iteration+1) % config.display_iter) == 0:
        #     print("Loss at iteration", iteration+1, ":", loss.item())
        # if ((iteration+1) % config.snapshot_iter) == 0:
        torch.save(DA_DCE_Net.state_dict(), config.snapshots_folder + "/Epoch" + str(epoch) + '.pth')
        # draw charts and update lr
        wind.line([loss.cpu().detach().numpy()],[epoch],win = 'train_loss',update = 'append')
        wind.line([loss_dce.cpu().detach().numpy()],[epoch],win = 'dce_loss',update = 'append')
        wind.line([loss_ssim.cpu().detach().numpy()],[epoch],win = 'ssim_loss',update = 'append')
        wind.line([scheduler.get_last_lr()[0]],[epoch],win = 'lr',update = 'append')
        scheduler.step()
        # estimate model
        DA_DCE_Net.eval()
        with torch.no_grad():
            for data in val_loader:
                img_lowlight, img_normal = data
                img_lowlight = img_lowlight.to(device)
                img_normal = img_normal.to(device)*255.
                _, enhanced_image, denoise_result, A = DA_DCE_Net(img_lowlight)
                wind.images(torch.concat([img_lowlight*255.,enhanced_image*255.,denoise_result], dim=0),nrow=12, opts={"title":"epoch {} images".format(epoch)})
                break

        #save check point
        checkpoint = {
            "net": DA_DCE_Net.state_dict(),
            'optimizer':optimizer.state_dict(),
            "epoch": epoch+1,
            'lr_scheduler': scheduler.state_dict()
        }
        if not os.path.isdir(config.snapshots_folder+"/checkpoint"):
            os.mkdir(config.snapshots_folder+"/checkpoint")
        torch.save(checkpoint, config.snapshots_folder+'/checkpoint/ckpt_best.pth')




if __name__ == "__main__":

    parser = argparse.ArgumentParser()

    # Input Parameters
    parser.add_argument('--lowlight_images_path', type=str, default="Zero-DCE_code/data/train_data/low/")
    parser.add_argument('--paired_normal_path', type=str, default="Zero-DCE_code/data/train_data/normal/")
    parser.add_argument('--lr', type=float, default=0.0001)
    parser.add_argument('--weight_decay', type=float, default=0.0001)
    parser.add_argument('--grad_clip_norm', type=float, default=0.1)
    parser.add_argument('--num_epochs', type=int, default=300)
    parser.add_argument('--train_batch_size', type=int, default=5)
    parser.add_argument('--val_batch_size', type=int, default=4)
    parser.add_argument('--num_workers', type=int, default=1)
    parser.add_argument('--display_iter', type=int, default=10)
    parser.add_argument('--snapshot_iter', type=int, default=10)
    parser.add_argument('--snapshots_folder', type=str, default="./snapshots_PLA")
    parser.add_argument('--load_pretrain', type=bool, default= False)
    parser.add_argument('--train_with_ckpt', type=bool, default= False)
    parser.add_argument('--pretrain_dir', type=str, default= "./snapshots/Epoch202.pth")

    config = parser.parse_args()

    if not os.path.exists(config.snapshots_folder):
        os.mkdir(config.snapshots_folder)


    train(config)









