import torch
import torch.nn as nn
import argparse
from torch.autograd import Variable
import torchvision.models as models
import os
from torch.utils import data
from models.colornet import generator
import numpy as np
from PIL import Image
from skimage.color import rgb2yuv, yuv2rgb
import cv2

from alfred.utils.log import logger as logging
from alfred.dl.torch.common import device
from alfred.dl.torch.model_summary import summary


def parse_args():
    parser = argparse.ArgumentParser(description="Train a GAN based model")
    parser.add_argument(
        "-d",
        "--training_dir",
        type=str,
        required=True,
        help="Training directory (folder contains all 256*256 images)")
    parser.add_argument("-t",
                        "--test_image",
                        type=str,
                        default="images/3.jpeg",
                        help="Test image location")
    parser.add_argument("-c",
                        "--checkpoint_location",
                        default="weights",
                        type=str,
                        help="Place to save checkpoints")
    parser.add_argument("-e",
                        "--epoch",
                        type=int,
                        default=120,
                        help="Epoches to run training")
    parser.add_argument("-b",
                        "--batch_size",
                        type=int,
                        default=2,
                        help="batch size")
    parser.add_argument("-p",
                        "--pixel_loss_weights",
                        type=float,
                        default=1000.0,
                        help="Pixel-wise loss weights")
    parser.add_argument("--g_every",
                        type=int,
                        default=1,
                        help="Training generator every k iteration")
    parser.add_argument("--g_lr",
                        type=float,
                        default=1e-4,
                        help="learning rate for generator")
    parser.add_argument("--d_lr",
                        type=float,
                        default=1e-4,
                        help="learning rate for discriminator")
    args = parser.parse_args()
    return args


# define data generator
class PairImageDataset(data.Dataset):
    def __init__(self, path):
        files = os.listdir(path)
        self.files = [os.path.join(path, x) for x in files]

    def __len__(self):
        return len(self.files)

    def __getitem__(self, index):
        img = Image.open(self.files[index])
        yuv = rgb2yuv(img)
        y = yuv[..., 0] - 0.5
        u_t = yuv[..., 1] / 0.43601035
        v_t = yuv[..., 2] / 0.61497538
        return torch.Tensor(np.expand_dims(y, axis=0)), torch.Tensor(
            np.stack([u_t, v_t], axis=0))


def train(args):
    os.makedirs(args.checkpoint_location, exist_ok=True)

    # Define G, same as torch version
    G = generator().to(device)
    # summary(G, (1, 448, 448))
    D = models.resnet18(pretrained=False, num_classes=2)
    D.fc = nn.Sequential(nn.Linear(512, 1), nn.Sigmoid())
    D = D.to(device)

    weights_p = os.path.join(args.checkpoint_location, 'colorize_gan_0.pth.tar')
    if (os.path.exists(weights_p)):
        checkpoint = torch.load(weights_p)
        D.load_state_dict(checkpoint['D'])
        G.load_state_dict(checkpoint['G'])
        logging.info('D and G weights loaded from: {}'.format(weights_p))
    else:
        logging.info('resume weights not found, train from scratch.')

    train_ds = PairImageDataset(args.training_dir)
    logging.info('loaded dataset from: {}, data length: {}'.format(args.training_dir, train_ds.__len__()))
    train_dataloader = data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=0)
    if args.test_image is not None:
        test_img_ori = Image.open(args.test_image).convert('RGB')
        test_img = test_img_ori.resize((256, 256))
        test_yuv = rgb2yuv(test_img)
        test_inf = test_yuv[..., 0].reshape(1, 1, 256, 256)
        test_var = Variable(torch.Tensor(test_inf - 0.5)).to(device)

    # save test image for beginning
    if args.test_image is not None:
        test_res = G(test_var)
        uv = test_res.cpu().detach().numpy()
        uv[:, 0, :, :] *= 0.436
        uv[:, 1, :, :] *= 0.615
        test_yuv = np.concatenate([test_inf, uv], axis=1).reshape(3, 256, 256)
        test_rgb = yuv2rgb(test_yuv.transpose(1, 2, 0))
        cv2.imwrite(os.path.join(args.checkpoint_location, 'test_init.jpg'),
                    (test_rgb.clip(min=0, max=1) * 256)[:, :, [2, 1, 0]])

    i = 0
    adversarial_loss = torch.nn.BCELoss()
    optimizer_G = torch.optim.Adam(G.parameters(),
                                   lr=args.g_lr,
                                   betas=(0.5, 0.999))
    optimizer_D = torch.optim.Adam(D.parameters(),
                                   lr=args.d_lr,
                                   betas=(0.5, 0.999))
    for epoch in range(args.epoch):
        for i, (y, uv) in enumerate(train_dataloader):
            try:
                # Adversarial ground truths
                valid = Variable(torch.Tensor(y.size(0), 1).fill_(1.0),
                                requires_grad=False).to(device)
                fake = Variable(torch.Tensor(y.size(0), 1).fill_(0.0),
                                requires_grad=False).to(device)

                yvar = Variable(y).to(device)
                uvvar = Variable(uv).to(device)
                real_imgs = torch.cat([yvar, uvvar], dim=1)

                optimizer_G.zero_grad()
                uvgen = G(yvar)
                # Generate a batch of images
                gen_imgs = torch.cat([yvar.detach(), uvgen], dim=1)

                # Loss measures generator's ability to fool the discriminator
                g_loss_gan = adversarial_loss(D(gen_imgs), valid)
                g_loss = g_loss_gan + args.pixel_loss_weights * torch.mean(
                    (uvvar - uvgen)**2)
                if i % args.g_every == 0:
                    g_loss.backward()
                    optimizer_G.step()

                optimizer_D.zero_grad()
                # Measure discriminator's ability to classify real from generated samples
                real_loss = adversarial_loss(D(real_imgs), valid)
                fake_loss = adversarial_loss(D(gen_imgs.detach()), fake)
                d_loss = (real_loss + fake_loss) / 2
                d_loss.backward()
                optimizer_D.step()
                if i % 100 == 0:
                    logging.info(
                        "Epoch: %d, iter: %d, [D loss: %f] [G total loss: %f] [G GAN Loss: %f]"
                        % (epoch, i, d_loss.item(), g_loss.item(), g_loss_gan.item()))
                    save_weights(
                        {'D': D.state_dict(), 'G': G.state_dict(), 'epoch': epoch},
                        epoch
                    )
                    if args.test_image is not None:
                        test_res = G(test_var)
                        uv = test_res.cpu().detach().numpy()
                        uv[:, 0, :, :] *= 0.436
                        uv[:, 1, :, :] *= 0.615
                        test_yuv = np.concatenate([test_inf, uv],
                                                axis=1).reshape(3, 256, 256)
                        test_rgb = yuv2rgb(test_yuv.transpose(1, 2, 0))
                        test_res = (test_rgb.clip(min=0, max=1) * 256)[:, :, [2, 1, 0]]
                        test_res = cv2.resize(test_res, (test_img_ori.width, test_img_ori.height))
                        cv2.imwrite(os.path.join(args.checkpoint_location, 'test_epoch_' + str(epoch) + '.jpg'),
                            test_res)
                        cv2.imwrite(os.path.join(args.checkpoint_location, 'test_ori.jpg'),
                            np.array(test_img_ori, dtype=np.uint8))
            except KeyboardInterrupt:
                logging.info('interrupted. try saving model now..')
                save_weights(
                    {'D': D.state_dict(), 'G': G.state_dict(), 'epoch': epoch}, 0
                )
                logging.info('saved.')
                exit(0)
    save_weights(
        {'D': D.state_dict(), 'G': G.state_dict(), 'epoch': epoch}, 0
    )


def save_weights(to_save_dict, epoch):
    torch.save(to_save_dict, os.path.join(args.checkpoint_location, 
    'colorize_gan_{}.pth.tar'.format(epoch)))


if __name__ == "__main__":
    args = parse_args()
    train(args)
