import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm

from G import get_Generator
from D import get_Discriminator, GANLoss
import numpy as np
import os
import math
import argparse
import random
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
from datasets import GoProDataset
import time
import yaml
from metric_counter import MetricCounter
from base_model import BaseModel

parser = argparse.ArgumentParser(description="Deep Multi-Patch Hierarchical Network")
parser.add_argument("-e", "--epochs", type=int, default=10)
parser.add_argument("-se", "--start_epoch", type=int, default=0)
parser.add_argument("-b", "--batchsize", type=int, default=1)
parser.add_argument("-s", "--imagesize", type=int, default=128)
parser.add_argument("-l", "--learning_rate", type=float, default=0.0001)
parser.add_argument('--input_nc', type=int, default=64)
parser.add_argument('--lambda_L1', type=float, default=0.5)
parser.add_argument('--gpu_ids', type=list, default=[])
parser.add_argument('--isTrain', type=bool, default=True)
parser.add_argument('--checkpoints_dir', type=str, default='/')
parser.add_argument('--name', type=str, default='GAN')
parser.add_argument('--preprocess', type=str, default='scale_width')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam default=0.5')
parser.add_argument('--gan_mode', type=str, default='vanilla')
args = parser.parse_args()

# Hyper Parameters
METHOD = "DMPHN"
LEARNING_RATE = args.learning_rate
EPOCHS = args.epochs
BATCH_SIZE = args.batchsize
IMAGE_SIZE = args.imagesize
DEVICE = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
metric_counter = MetricCounter('DMPHN')


# def save_deblur_images(images, iteration, epoch):
#     filename = './checkpoints/' + METHOD + "/epoch" + str(epoch) + "/" + "Iter_" + str(iteration) + "_deblur.png"
#     torchvision.utils.save_image(images, filename)

class DMPHNModel(BaseModel):

    # @staticmethod
    # def modify_commandline_options(parser, is_train=True):
    #
    #     # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
    #     parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
    #     if is_train:
    #         parser.set_defaults(pool_size=0, gan_mode='vanilla')
    #         parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
    #
    #     return parser

    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call
        # <BaseModel.get_current_losses>
        self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call
        # <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call
        # <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']

        # define networks (both generator and discriminator)
        self.netG = get_Generator()

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images;
            # Therefore, #channels for D is input_nc + output_nc
            self.netD = get_Discriminator()

            # define loss functions
            self.criterionGAN = GANLoss(opt.gan_mode, target_real_label=0.8, target_fake_label=0.2).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)

    def set_input(self, images):
        """Unpack input data from the dataloader and perform necessary pre-processing steps.

        Parameters:
            input (dict): include the data itself and its metadata information.

        The option 'direction' can be used to swap images in domain A and domain B.
        """

        self.blur = images['blur_image'].to(self.device)
        self.real = images['sharp_image'].to(self.device)

    def forward(self):
        """Run forward pass; called by both functions <optimize_parameters> and <test>."""
        self.fake = self.netG(self.blur)  # G(A)

    def backward_D(self):
        """Calculate GAN loss for the discriminator"""
        # Fake; stop backprop to the generator by detaching fake_B
        # we use conditional GANs; we need to feed both input and output to the discriminator
        pred_fake = self.netD(self.fake.detach())
        self.loss_D_fake = self.criterionGAN(pred_fake, False)
        # Real
        pred_real = self.netD(self.real)
        self.loss_D_real = self.criterionGAN(pred_real, True)
        # combine loss and calculate gradients
        self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
        self.loss_D.backward()

    def backward_G(self):
        """Calculate GAN and L1 loss for the generator"""
        # First, G(A) should fake the discriminator
        pred_fake = self.netD(self.fake)
        self.loss_G_GAN = self.criterionGAN(pred_fake, True)
        # Second, G(A) = B
        self.loss_G_L1 = self.criterionL1(self.fake, self.real) * self.opt.lambda_L1
        # combine loss and calculate gradients
        self.loss_G = self.loss_G_GAN + self.loss_G_L1
        self.loss_G.backward()

    def optimize_parameters(self):
        self.forward()  # compute fake images: G(A)

        # update D
        self.set_requires_grad(self.netD, True)  # enable backprop for D
        self.optimizer_D.zero_grad()  # set D's gradients to zero
        self.backward_D()  # calculate gradients for D
        self.optimizer_D.step()  # update D's weights

        # update G
        self.set_requires_grad(self.netD, False)  # D requires no gradients when optimizing G
        self.optimizer_G.zero_grad()  # set G's gradients to zero
        self.backward_G()  # calculate graidents for G
        self.optimizer_G.step()  # udpate G's weights

    def get_input(self, data):
        img = data['a']
        inputs = img
        targets = data['b']
        inputs, targets = inputs.cuda(), targets.cuda()
        return inputs, targets

    def tensor2im(self, image_tensor, imtype=np.uint8):
        image_numpy = image_tensor[0].cpu().float().numpy()
        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
        return image_numpy.astype(imtype)

    def get_images_and_metrics(self, inp, output, target) -> (float, float, np.ndarray):
        inp = self.tensor2im(inp)
        fake = self.tensor2im(output.data)
        real = self.tensor2im(target.data)
        # psnr = PSNR(fake, real)
        # ssim = SSIM(fake, real, multichannel=True)
        vis_img = np.hstack((inp, fake, real))  # 在水平方向上平铺
        # return psnr, ssim, vis_img


def main():
    with open('config.yaml', 'r') as f:
        config = yaml.load(f)

    models = DMPHNModel(args)

    # 加载预训练模型（如果有的话
    if os.path.exists(str('./checkpoints/' + METHOD + '.pkl')):
        models.netG.load_state_dict(
            torch.load(str('./checkpoints/' + METHOD + '.pkl'), map_location=torch.device('cpu')))
        print("load models success")

    print("init data folders")
    # 加载数据集
    train_dataset = GoProDataset(
        blur_image_files='datas/train_blur_file.txt',
        sharp_image_files='datas/train_sharp_file.txt',
        root_dir='D:\Dataset\GOPRO',
        crop=True,
        crop_size=IMAGE_SIZE,
        transform=transforms.Compose([
            transforms.ToTensor()
        ]))
    # test_dataset = GoProDataset(
    #     blur_image_files='datas/test_blur_file.txt',
    #     sharp_image_files='datas/test_sharp_file.txt',
    #     root_dir='D:\Dataset\GOPRO',
    #     crop=True,
    #     crop_size=IMAGE_SIZE,
    #     transform=transforms.Compose([
    #         transforms.ToTensor()
    #     ]))
    # test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)
    train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    print("Loading data...")

    print("Train...")
    for epoch in range(args.start_epoch, EPOCHS):
        start = 0
        iteration = 0
        metric_counter.clear()
        tq = tqdm(train_dataloader, total=12)  # total=epoch_size
        tq.set_description('Epoch {}, lr {}'.format(epoch, 0.0001))
        for images in tq:
            models.set_input(images)
            models.optimize_parameters()
            G_GAN_loss, G_L1_loss, D_real_loss, D_fake_loss = map(lambda x: x[1], models.get_current_losses().items())
            metric_counter.add_losses(G_GAN_loss, G_L1_loss, D_real_loss, D_fake_loss)
            iteration += 1
            tq.set_postfix(loss=(G_GAN_loss, G_L1_loss, D_real_loss, D_fake_loss))
        metric_counter.write_to_tensorboard(epoch, validation=False)
        # 保存最好模型
        if metric_counter.update_best_model():
            torch.save(models.netG.state_dict(), str('./checkpoints/' + METHOD + "_best.pkl"))
        # 保存最近模型
        torch.save(models.netG.state_dict(), str('./checkpoints/' + METHOD + "_last.pkl"))
    # , "loss:%.4f" % loss.item()


if __name__ == '__main__':
    main()
