# Copyright (c) 2021, NVIDIA CORPORATION.  All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto.  Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.

"""Generate style mixing image matrix using pretrained network pickle."""

from io import BytesIO
from projector import project
import base64
import imageio
import uvicorn
from pydantic import BaseModel
from fastapi import FastAPI
import os
import re
from typing import List

import click
import dnnlib
import numpy as np
import PIL.Image
import torch

import legacy

# ----------------------------------------------------------------------------


# def num_range(s: str) -> List[int]:
#     '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''

#     range_re = re.compile(r'^(\d+)-(\d+)$')
#     m = range_re.match(s)
#     if m:
#         return list(range(int(m.group(1)), int(m.group(2))+1))
#     vals = s.split(',')
#     return [int(x) for x in vals]


# ----------------------------------------------------------------------------
network_pkl = "anime.pkl"
outdir = "out"
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
    G = legacy.load_network_pkl(f)['G_ema'].to(device)  # type: ignore

# os.makedirs(outdir, exist_ok=True)
if __name__ == '__main__':

    uvicorn.run(app='main:app', host="0.0.0.0",
                port=8000, reload=False, debug=False)
app = FastAPI()


def image_to_base64(imageRaw):
    output_buffer = BytesIO()
    imageRaw.save(output_buffer, format='JPEG')
    byte_data = output_buffer.getvalue()
    base64_str = base64.b64encode(byte_data)
    return base64_str


# @click.command()
# @click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True)
# @click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True)
# @click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True)
# @click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
# @click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
def generate_style_mix(
    row_seeds: List[int],
    col_seeds: List[int],
    # = List[0, 1, 2, 3, 4, 5, 6]
    col_styles: List[int] = [0, 1, 2, 3, 4, 5, 6],
    truncation_psi: float = 1,
    noise_mode: str = "const",
) -> List:
    print("row_seeds", row_seeds)
    print("col_seeds", col_seeds)
    print("col_styles", col_styles)
    """Generate images using pretrained network pickle.

    Examples:

    \b
    python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\
        --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
    """

    print('Generating W vectors...')
    all_seeds = list(set(row_seeds + col_seeds))
    all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim)
                      for seed in all_seeds])
    all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
    w_avg = G.mapping.w_avg
    all_w = w_avg + (all_w - w_avg) * truncation_psi
    w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}

    # print('Generating images...')
    # all_images = G.synthesis(all_w, noise_mode=noise_mode)
    # all_images = (all_images.permute(0, 2, 3, 1) * 127.5 +
    #               128).clamp(0, 255).to(torch.uint8).cpu().numpy()
    # image_dict = {(seed, seed): image for seed,
    #               image in zip(all_seeds, list(all_images))}
    image_dict = {}
    print('Generating style-mixed images...')
    for row_seed in row_seeds:
        for col_seed in col_seeds:
            w = w_dict[row_seed].clone()
            w[col_styles] = w_dict[col_seed][col_styles]
            image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
            image = (image.permute(0, 2, 3, 1) * 127.5 +
                     128).clamp(0, 255).to(torch.uint8)
            image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()

    print('Saving images...')
    # os.makedirs(outdir, exist_ok=True)
    imageArr = []
    parmArr = []
    for (row_seed, col_seed), image in image_dict.items():
        imageArr.append(image_to_base64(PIL.Image.fromarray(image, 'RGB')))
        parmArr.append([row_seed, col_seed])
    return imageArr, parmArr
    # print(imageArr[0])
    # print('Saving image grid...')
    # W = G.img_resolution
    # H = G.img_resolution
    # canvas = PIL.Image.new(
    #     'RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
    # for row_idx, row_seed in enumerate([0] + row_seeds):
    #     for col_idx, col_seed in enumerate([0] + col_seeds):
    #         if row_idx == 0 and col_idx == 0:
    #             continue
    #         key = (row_seed, col_seed)
    #         if row_idx == 0:
    #             key = (col_seed, col_seed)
    #         if col_idx == 0:
    #             key = (row_seed, row_seed)
    #         canvas.paste(PIL.Image.fromarray(
    #             image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
    # canvas.save(f'{outdir}/grid.png')


def base64_to_image(base64_str) -> PIL.Image.Image:
    # base64_data = re.sub('^data:image/.+;base64,', '', base64_str)
    byte_data = base64.b64decode(base64_str)
    image_data = BytesIO(byte_data)
    img = PIL.Image.open(image_data)
    return img


def run_projection(
    base_raw: str,
    seed: int = 666,
    num_steps: int = 50
):
    """Project given image to the latent space of pretrained network pickle.

    Examples:

    \b
    python projector.py --outdir=out --target=~/mytargetimg.png \\
        --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
    """
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Load target image.
    target_pil = base64_to_image(base_raw).convert('RGB')
    w, h = target_pil.size
    s = min(w, h)
    target_pil = target_pil.crop(
        ((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
    target_pil = target_pil.resize(
        (G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
    target_uint8 = np.array(target_pil, dtype=np.uint8)

    # Optimize projection.
    projected_w_steps = project(
        G,
        target=torch.tensor(target_uint8.transpose(
            [2, 0, 1]), device=device),  # pylint: disable=not-callable
        num_steps=num_steps,
        device=device,
        verbose=True
    )

    # Render debug output: optional video and projected image and W vector.

    # Save final projected frame and W vector.
    # target_pil.save(f'{outdir}/target.png')
    projected_w = projected_w_steps[-1]
    synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
    synth_image = (synth_image + 1) * (255/2)
    synth_image = synth_image.permute(0, 2, 3, 1).clamp(
        0, 255).to(torch.uint8)[0].cpu().numpy()
    return image_to_base64(PIL.Image.fromarray(synth_image, 'RGB'))
    # np.savez(f'{outdir}/projected_w.npz',
    #          w=projected_w.unsqueeze(0).cpu().numpy())


class Item(BaseModel):

    rows: list = []
    cols: list = []
    styles: list = []
    steps: float


class Items1(BaseModel):
    raw: str
    seed: int
    steps: int


@app.post("/getImages")
async def get_image(item: Item):
    # results = {"item": item}
    results, ids = generate_style_mix(row_seeds=item.rows,
                                      col_seeds=item.cols, col_styles=item.styles, truncation_psi=item.steps)
    return {"images": results, "params": ids}


@app.post("/getProjection")
async def get_projection(item1: Items1):
    result = run_projection(
        base_raw=item1.raw, seed=item1.seed, num_steps=item1.steps)
    return {"image": result}

# ----------------------------------------------------------------------------

# if __name__ == "__main__":
#     generate_style_mix()  # pylint: disable=no-value-for-parameter

# ----------------------------------------------------------------------------
