import os
import argparse
from PIL import Image
import torch
from torchvision import transforms
from torchvision.utils import save_image, make_grid
from model import VGGEncoder, Decoder
from style_swap import style_swap
import cv2
import numpy as np
import matplotlib.pyplot as plt


normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

trans = transforms.Compose([transforms.ToTensor(),
                            normalize])


def transf_image(tensor, nrow=8, padding=2,
               normalize=False, range=None, scale_each=False, pad_value=0, format=None):
    """Save a given Tensor into an image file.

    Args:
        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images by calling ``make_grid``.
        fp - A filename(string) or file object
        format(Optional):  If omitted, the format to use is determined from the filename extension.
            If a file object was used instead of a filename, this parameter should always be used.
        **kwargs: Other arguments are documented in ``make_grid``.
    """
    from PIL import Image
    grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
                     normalize=normalize, range=range, scale_each=scale_each)
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
    ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
    # im = Image.fromarray(ndarr)
    # im = cv2.fromarray(ndarr)
    return ndarr

def denorm(tensor, device):
    std = torch.Tensor([0.229, 0.224, 0.225]).reshape(-1, 1, 1).to(device)
    mean = torch.Tensor([0.485, 0.456, 0.406]).reshape(-1, 1, 1).to(device)
    res = torch.clamp(tensor * std + mean, 0, 1)
    return res


def train(e, d, path_c, path_s, device, patch_size):
    s = cv2.imread(path_s)
    s = cv2.cvtColor(s, cv2.COLOR_BGR2RGB)
    s_tensor = trans(s).unsqueeze(0).to(device)
    # s_tensor = torch.from_numpy(s).to(device)
    c = Image.open(path_c)
    # s = Image.open(path_s)
    c_tensor = trans(c).unsqueeze(0).to(device)
    # s_tensor = trans(s).unsqueeze(0).to(device)
    with torch.no_grad():
        print("cf and sf")
        print(c_tensor.size())
        cf = e(c_tensor)
        print("cf done")
        sf = e(s_tensor)
        print("cs done")
        style_swap_res = style_swap(cf, sf, patch_size, 1)
        del cf
        del sf
        del e
        out = d(style_swap_res)
        c_denorm = denorm(c_tensor, device)
        out_denorm = denorm(out, device)
        # res = torch.cat([c_denorm, out_denorm], dim=0)
        # res = res.to('cpu')

    # try:
    return transf_image(out_denorm, nrow=1)



def main():
    parser = argparse.ArgumentParser(description='Style Swap by Pytorch')
    parser.add_argument('--content', '-c', type=str, default=None,
                        help='Content image path e.g. content.jpg')
    parser.add_argument('--style', '-s', type=str, default=None,
                        help='Style image path e.g. image.jpg')
    parser.add_argument('--output_name', '-o', type=str, default=None,
                        help='Output path for generated image, no need to add ext, e.g. out')
    parser.add_argument('--patch_size', '-p', type=int, default=3,
                        help='Size of extracted patches from style features')
    parser.add_argument('--gpu', '-g', type=int, default=0,
                        help='GPU ID(nagative value indicate CPU)')
    parser.add_argument('--model_state_path', type=str, default='model_state.pth',
                        help='save directory for result and loss')

    args = parser.parse_args()

    # set device on GPU if available, else CPU
    if torch.cuda.is_available() and args.gpu >= 0:
        device = torch.device(f'cuda:{args.gpu}')
        print(f'# CUDA available: {torch.cuda.get_device_name(0)}')
    else:
        device = 'cpu'

    # set model

    content_path = args.content
    style_path = args.style

    e = VGGEncoder().to(device)
    d = Decoder()
    d.load_state_dict(torch.load(args.model_state_path, map_location='cuda:0'))
    d = d.to(device)
    for dirpath_c, dirs_c, files_c in os.walk(content_path):
        for file_c in files_c:
            for dirpath_s, dirs_s, files_s in os.walk(style_path):
                for file_s in files_s:
                    im = train(e, d, os.path.join(dirpath_c, file_c), os.path.join(dirpath_s, file_s), device, args.patch_size)
                    # im.save(os.path.join('data', f'{args.output_name}.jpg'))
                    # plt.figure("dog")
                    # plt.imshow(im)
                    # plt.axis('off')
                    # plt.plot()
                    b, g, r = cv2.split(im)
                    im = cv2.merge([r, g, b])
                    cv2.imwrite('1.png', im)
                    cv2.imshow('video', im)
                    cv2.waitKey(100)
                    # cv2.imshow('title', cv2.imread(os.path.join('data', f'{args.output_name}.jpg')))

                        # save_image(res, f'{args.output_name}_pair.jpg', nrow=2)

                        # o = Image.open(f'{args.output_name}_pair.jpg')
                        # s = s.resize((i // 4 for i in c.size))
                        # box = (o.width // 2, o.height - s.height)
                        # o.paste(s, box)
                        # o.save(f'{args.output_name}_style_transfer_demo.jpg', quality=95)
                        # print(f'result saved into files starting with {args.output_name}')
                    # except:
                    #     pass


if __name__ == '__main__':
    main()
