"""

this is not QT app
it will solve with any image
"""
import abc
from genericpath import exists
from models import get_pretrained
from models.dynamic_channel import set_uniform_channel_ratio, reset_generator
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from alfred.dl.torch.common import device
import time
import os


torch.set_grad_enabled(False)
direction_map = {
    'smiling': '31_Smiling',
    'young': '39_Young',
    'wavy hair': '33_Wavy_Hair',
    'gray hair': '17_Gray_Hair',
    'blonde hair': '09_Blond_Hair',
    'eyeglass': '15_Eyeglasses',
    'mustache': '22_Mustache',
}
n_style_to_change = 12


def image_to_np(x):
    assert x.shape[0] == 1
    x = x.squeeze(0).permute(1, 2, 0)
    x = (x + 1) * 0.5  # 0-1
    x = (x * 255).cpu().numpy().astype('uint8')
    return x


def demo():
    output_dir = 'output'
    os.makedirs(output_dir, exist_ok=True)
    pretrained_type = 'generator'
    config_name = 'anycost-ffhq-config-f'

    # np_f = 'assets/lyx.npy'
    # ori_img_f = 'assets/lyx-project.png'

    np_f = 'assets/01_anne.npy'
    ori_img_f = 'assets/01_anne.jpg'

    g = get_pretrained(pretrained_type, config=config_name)
    set_uniform_channel_ratio(g, 1)
    g.target_res = 1024
    g.to(device)

    latent = torch.from_numpy(np.load(np_f)).view(1, -1, 512).to(device)
    mean_style = g.mean_style(10000)

    boundaries = get_pretrained('boundary', config_name)
    print(boundaries.keys())

    ori_im = cv2.imread(ori_img_f)

    # smile
    fi = 0
    for bb in direction_map.keys():
        b = boundaries[direction_map[bb]].view(1, 1, -1).to(device)
        latent_edit = latent.clone()

        print('testing direction: ', bb)
        max_t = 60
        for i in range(-max_t//2, max_t):
            latent_edit[:, :n_style_to_change] = latent_edit[:,
                                                             :n_style_to_change] + b * (i/max_t) * 0.07
            t0 = time.time()
            out = g(latent_edit, noise=None, randomize_noise=False,
                    input_is_style=True)[0].clamp(-1, 1)
            c = time.time() - t0
            # print('cost: {}, fps: {}'.format(c, 1/c))
            a = image_to_np(out)
            a = cv2.cvtColor(a, cv2.COLOR_RGB2BGR)
            cv2.imwrite(os.path.join(
                output_dir, f'{fi}.png'), np.hstack([ori_im, a]))
            fi += 1
            cv2.imshow('ada', a)
            cv2.imshow('a', ori_im)
            cv2.waitKey(1)
    cv2.waitKey(0)


if __name__ == '__main__':
    demo()
