# Copyright (c) 2021, NVIDIA CORPORATION.  All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto.  Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.

"""Project given image to the latent space of pretrained network pickle."""

import copy
import os
from time import perf_counter
from tqdm import tqdm

import click
import pickle
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
from torchvision.utils import save_image

import dnnlib
import legacy
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader

from segmentation.dataset import NameAndImage
from segmentation.utils import IntList


def project(
    G,
    data_dir,
    save_dir,
    samples,
    length,
    *,
    num_steps                  = 1000,
    w_avg_samples              = 10000,
    initial_learning_rate      = 0.1,
    initial_noise_factor       = 0.05,
    lr_rampdown_length         = 0.25,
    lr_rampup_length           = 0.05,
    noise_ramp_length          = 0.75,
    regularize_noise_weight    = 1e5,
    verbose                    = False,
    device: torch.device
):

    def logprint(*args):
        if verbose:
            print(*args)

    G_ = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore

    # Compute w stats.
    logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
    z_samples = np.random.RandomState(123).randn(w_avg_samples, G_.z_dim)
    w_samples = G_.mapping(torch.from_numpy(z_samples).to(device), None)  # [N, L, C]
    w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32)       # [N, 1, C]
    w_avg = np.mean(w_samples, axis=0, keepdims=True)      # [1, 1, C]
    w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5

    # # Setup noise inputs.
    # noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }

    # Load VGG16 feature detector.
    url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
    # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
    with dnnlib.util.open_url(url) as f:
        vgg16 = torch.jit.load(f).eval().to(device)

    loader = DataLoader(
        NameAndImage(data_dir, samples, length),
        batch_size=1, shuffle=False, drop_last=False, num_workers=1, pin_memory=True
    )

    # Start
    for cur, (filename, image) in enumerate(loader):
        G_ = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore

        # Setup noise inputs.
        noise_bufs = { name: buf for (name, buf) in G_.synthesis.named_buffers() if 'noise_const' in name }

        # Features for target image.
        image = image.to(device).to(torch.float32)    # 1, C, H, W
        if image.shape[1]==1:
            image = image.repeat(1,3,1,1)
        image_ = image.clone().detach()

        # vgg16 change the input image
        target_features = vgg16(image, resize_images=False, return_lpips=True)

        w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable
        # w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)
        optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)

        # Init noise.
        for buf in noise_bufs.values():
            buf[:].detach()
            buf[:] = torch.randn_like(buf)
            buf.requires_grad = True

        pbar = tqdm(range(num_steps), dynamic_ncols=True, smoothing=0.01, ascii=True)
        for step in pbar:
            # Learning rate schedule.
            t = step / num_steps
            w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
            lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
            lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
            lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
            lr = initial_learning_rate * lr_ramp
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

            # Synth images from opt_w.
            w_noise = torch.randn_like(w_opt) * w_noise_scale
            ws = (w_opt + w_noise).repeat([1, G_.mapping.num_ws, 1])
            synth_images = G_.synthesis(ws, noise_mode='const')

            # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
            synth_images = (synth_images + 1) * (255/2)

            if synth_images.shape[1]==1:
                synth_images = synth_images.repeat(1,3,1,1)

            if step == num_steps-1:
                synth_images_ = synth_images.clone().detach()

            # Features for synth images.
            # vgg16 change the input image
            synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
            dist = (target_features - synth_features).square().sum()

            # Noise regularization.
            reg_loss = 0.0
            for v in noise_bufs.values():
                noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()
                while True:
                    reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2
                    reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2
                    if noise.shape[2] <= 8:
                        break
                    noise = F.avg_pool2d(noise, kernel_size=2)
            loss = dist + reg_loss * regularize_noise_weight

            # Step
            optimizer.zero_grad(set_to_none=True)
            loss.backward()
            optimizer.step()

            pbar.set_description(f'sample {0}/{cur}/{len(samples)*length} | step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} | loss {float(loss):<5.2f}')

            # Normalize noise.
            with torch.no_grad():
                for buf in noise_bufs.values():
                    buf -= buf.mean()
                    buf *= buf.square().mean().rsqrt()

        # # visualization
        # to_save = torch.cat(to_save, dim=0)
        # save_image(to_save/255, f'./show.png')

        save_image(torch.cat([image_/255, synth_images_/255], dim=0), os.path.join(data_dir, 'ws_v', f"{filename[0]}.png"), padding=0)

        with open(os.path.join(data_dir, 'ws', f"{filename[0]}.pkl"), "wb") as f:
            # shape: 1, 512
            pickle.dump({"w": w_opt.detach().squeeze(0).cpu()}, f)

#----------------------------------------------------------------------------

# @click.command()
# @click.option('--save_dir',               help='Network pickle save', required=True)
# @click.option('--data_dir',               help='Training data save', metavar='DIR')
# @click.option('--num-steps',              help='Number of optimization steps', type=int, default=1000, show_default=True)
# @click.option('--samples',                help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
# @click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
# @click.option('--seed',                   help='Random seed', type=int, default=403, show_default=True)
# @click.option('--sub',                    help='sub process', is_flag=True)
def run_projection(
    save_dir: str,
    data_dir:str,
    samples,
    length:int,
    seed: int,
    num_steps: int,
):
    """Project given image to the latent space of pretrained network pickle.

    Examples:

    \b
    python projector.py --outdir=out --target=~/mytargetimg.png \\
        --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
    """
    np.random.seed(seed)
    torch.manual_seed(seed)

    # mk outdir
    # if not sub:
    #     assert not os.path.exists(os.path.join(save_dir, f'ws_{num_steps}'))
    #     os.makedirs(os.path.join(save_dir, f'ws_{num_steps}'))

    # Load networks.
    network_pkl = os.path.join(save_dir, 'network-snapshot-best.pkl')
    print('Loading networks from "%s"...' % network_pkl)
    device = torch.device('cuda')
    with dnnlib.util.open_url(network_pkl) as fp:
        G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore

    # Optimize projection.
    start_time = perf_counter()
    project(
        G,
        save_dir=save_dir,
        data_dir=data_dir,
        samples=samples,
        length=length,
        num_steps=num_steps,
        device=device,
        verbose=True
    )
    print (f'Elapsed: {(perf_counter()-start_time):.1f} s')

    # Save final projected frame and W vector.
    # projected_w = projected_w_steps[-1]
    # synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
    # synth_image = (synth_image + 1) * (255/2)
    # synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
    # PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')
    # np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())

#----------------------------------------------------------------------------

if __name__ == "__main__":
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    # run_projection(
    #     save_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
    #     data_dir='data/CANDI-128-256',
    #     samples=[32, 79, 12, 88, 36],
    #     length=128,
    #     seed=0,
    #     num_steps=300,
    # ) # pylint: disable=no-value-for-parameter
    # os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    # run_projection(
    #     save_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
    #     data_dir='data/CANDI-128-256',
    #     samples=[17, 64, 27, 74, 45],
    #     length=128,
    #     seed=0,
    #     num_steps=300,
    # ) # pylint: disable=no-value-for-parameter
    # os.environ["CUDA_VISIBLE_DEVICES"] = '4'
    # run_projection(
    #     save_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
    #     data_dir='data/CANDI-128-256',
    #     samples=[61, 38, 51, 62, 65],
    #     length=128,
    #     seed=0,
    #     num_steps=300,
    # ) # pylint: disable=no-value-for-parameter
    os.environ["CUDA_VISIBLE_DEVICES"] = '7'
    # run_projection(
    #     save_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
    #     data_dir='data/CANDI-128-256',
    #     samples=[33, 5, 53, 97, 49],
    #     length=128,
    #     seed=0,
    #     num_steps=300,
    # ) # pylint: disable=no-value-for-parameter
    run_projection(
        save_dir='save/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
        data_dir='data/OASIS-128-256',
        samples=[197],
        length=128,
        seed=0,
        num_steps=800,
    ) # pylint: disable=no-value-for-parameter
# [32, 79, 12, 88, 36, 17, 64, 27, 74, 45, 61, 38, 51, 62, 65, 33, 5, 53, 97, 49]
# d = ISIC2018Task1()

#----------------------------------------------------------------------------
