"""

this file will makeup a face
using segmentation model we trained
via BiseNet

the model size is only 53M, pretty small and portable.


"""
import torch
import cv2
import os
import numpy as np
from skimage.filters import gaussian
import sys
from alfred.dl.torch.common import device
from model import BiSeNet
import torchvision.transforms as transforms
from PIL import Image


n_classes = 19
net = BiSeNet(n_classes=n_classes)
net.to(device)
save_pth = 'res/cp/79999_iter.pth'
net.load_state_dict(torch.load(save_pth))
net.eval()
print('model loaded.')


def get_parsing_for_img(pil_img):
    """
    img should be pil_img
    """
    to_tensor = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
    with torch.no_grad():
        image = pil_img.resize((512, 512), Image.BILINEAR)
        img = to_tensor(image)
        img = torch.unsqueeze(img, 0)
        img = img.to(device)
        out = net(img)[0]
        parsing = out.squeeze(0).cpu().numpy().argmax(0)
        # print(parsing)
        print(np.unique(parsing))
        print(parsing.shape)
        return parsing


def sharpen(img):
    img = img * 1.0
    gauss_out = gaussian(img, sigma=5, multichannel=True)
    alpha = 1.5
    img_out = (img - gauss_out) * alpha + img
    img_out = img_out / 255.0
    mask_1 = img_out < 0
    mask_2 = img_out > 1
    img_out = img_out * (1 - mask_1)
    img_out = img_out * (1 - mask_2) + mask_2
    img_out = np.clip(img_out, 0, 1)
    img_out = img_out * 255
    return np.array(img_out, dtype=np.uint8)


def makeup(image, parsing, part=17, color=[230, 50, 20]):
    b, g, r = color  # [10, 50, 250]       # [10, 250, 10]
    tar_color = np.zeros_like(image)
    tar_color[:, :, 0] = b
    tar_color[:, :, 1] = g
    tar_color[:, :, 2] = r
    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV)
    if part == 12 or part == 13:
        image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
    elif part == 4 or part == 5:
        image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
    elif part == 2 or part == 3:
        image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
    else:
        image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1]
    changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)
    if part == 17 or part ==2 or part ==3:
        changed = sharpen(changed)
    changed[parsing != part] = image[parsing != part]
    # changed = cv2.resize(changed, (512, 512))
    return changed



if __name__ == '__main__':
    # 1  face
    # 10 nose
    # 11 teeth
    # 12 upper lip
    # 13 lower lip
    # 17 hair
    # 4 left eye
    # 5 right eye    
    # 2 bow
    # 3 bow

    image_path = 'images/11.jpg'
    if len(sys.argv) > 1:
        image_path = sys.argv[1]
    print('making up for image: {}'.format(image_path))

    image = cv2.imread(image_path)
    ori = image.copy()
    parsing = get_parsing_for_img(Image.fromarray(image))
    parsing = cv2.resize(
        parsing, image.shape[0:2], interpolation=cv2.INTER_NEAREST)

    # hair, lip, eye
    parts = [17, 12, 13]
    # 20, 20, 200->purple 255, 38, 139->blue 4, 181, 89->green
    colors = [[ 20, 20, 200], [100, 12, 230], [100, 12, 230]]
    # colors = [[100, 200, 100]]
    for part, color in zip(parts, colors):
        image = makeup(image, parsing, part, color)
    makedup_img = cv2.resize(image, (512, 512))    
    cv2.imshow('image', cv2.resize(ori, (512, 512)))
    cv2.imshow('color', cv2.resize(image, (512, 512)))

    combined_img = np.hstack([ cv2.resize(ori, (512, 512)), makedup_img])
    cv2.imwrite('res/makedup.png', combined_img)
    cv2.waitKey(0)
