#!/usr/bin/env python
# coding: utf-8
import sys
import os

import align_images_lib
import encode_images_lib
import fit_faces_lib

# # Improve your images
# 
# First make sure you select **GPU** as runtime type under Runtime > Runtime Type

# In[1]:


#os.system('pip install --force tqdm==4.24.0')


# In[2]:


#run_line_magic('tensorflow_version', '1.x')
import tensorflow as tf
print(tf.__version__)


# ## Using Google Drive
# 
# Google Colab does not store data after you close the session.
# We will be using Google Drive to permanently store created images and latent vectors.
# 
# If you don't want to use Google Drive you can skip this step, just make sure you download the results you want to keep then.

# In[3]:


#from google.colab import drive
#drive.mount('/content/drive')
#run_line_magic('cd', '/content/drive/My\\ Drive')


# ## Getting started
# 
# Alright, now that the annoying part is done. Let's get started!
# 
# First we need to clone the Repo and open the directory.

# In[ ]:


#os.system('git clone https://github.com/ju-leon/RetouchML.git')


# In[ ]:


#run_line_magic('cd', 'RetouchML/')
#os.system('mkdir aligned_images raw_images alignement_vector out')
for p in [ "aligned_images", "raw_images", "alignement_vector", "out"]:
    if not os.path.exists(p):
        os.makedirs(p)
#run_line_magic('ls', '')


# In[5]:


import glob, os
import PIL.Image


def display(im):
    im.show()
# ## Upload your image
# 
# To use your own image, upload it into the raw_iamges folder. Then hit run so see if its there...

# In[9]:


path = "raw_images/"
valid_images = (".jpg",".png")
for f in os.listdir(path):
    if f.endswith(valid_images):
        img = PIL.Image.open(path + f)
        display(img.resize((int(img.size[0]/3), int(img.size[1]/3)), PIL.Image.LANCZOS))


# ## Aligning images
# 
# All there?
# Then let's go ahead and crop on the head.
# Run the following skripts to crop and see the results.

# In[10]:
#os.system('python align_images.py raw_images/ aligned_images/ alignement_vector/')
landmarks_model_path=align_images_lib.install()
align_images_lib.exec("raw_images/", "aligned_images/", "alignement_vector/",landmarks_model_path)

# In[11]:


path = "aligned_images/"
for f in os.listdir(path):
    if f.endswith(valid_images):
        img = PIL.Image.open(path + f)
        display(img.resize((256,256), PIL.Image.LANCZOS))


# ## Encoding
# 
# Now comes the fun part. We need to translate what we can see into a latent vector.
# for that we will use a StyleGan encoder.
# 
# This will take a while, so sit back and let Googles GPU do their jobs...

# In[12]:


#os.system('python encode_images.py --help')


# In[21]:


#os.system('python encode_images.py aligned_images/ generated_images/ latent_representations/      --vgg_url=https://rolux.org/media/stylegan/vgg16_zhang_perceptual.pkl      --lr=0.4 --iterations=200      --use_best_loss=True --early_stopping=True --load_resnet=True')
args=encode_images_lib.init_args()
args.src_dir='aligned_images/'
args.generated_images_dir='generated_images'
args.dlatent_dir='latent_representations/'
args.vgg_url='https://rolux.org/media/stylegan/vgg16_zhang_perceptual.pkl'      
args.lr=0.4 
args.iterations=200      
args.use_best_loss=True 
args.early_stopping=True 
args.use_preprocess_input=False
args.load_resnet="data\\finetuned_resnet.h5"
args.resnet_image_size=256
encode_images_lib.exec(args)

# ### Result
# 
# And here we go. The following image is the encoded image in latent sapce. It should look pretty similar to the original representation, although it might vary slightly.
# 
# 
# If you're not happy with the result try running the encoding again with different parameters.

# In[22]:


path = "generated_images/"
for f in os.listdir(path):
    if f.endswith(valid_images):
        img = PIL.Image.open(path + f)
        display(img.resize((256,256), PIL.Image.LANCZOS))


# In[26]:


import keras
model = keras.models.load_model('/content/drive/My Drive/RetouchML/models/no_pca.keras')


# In[24]:


import math
import pickle
import PIL.Image
import numpy as np
#import config
import dnnlib
import dnnlib.tflib as tflib
from encoder.generator_model import Generator
URL_FFHQ = 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl'
tflib.init_tf()
with dnnlib.util.open_url(URL_FFHQ, cache_dir="cache") as f:
    generator_network, discriminator_network, Gs_network = pickle.load(f)

generator = Generator(Gs_network, batch_size=1, randomize_noise=False)

model_res = 1024
model_scale = int(2*(math.log(model_res,2)-1))

def generate_raw_image(latent_vector):
    latent_vector = latent_vector.reshape((1, model_scale, 512))
    generator.set_dlatents(latent_vector)
    return generator.generate_images()[0]

def generate_image(latent_vector):
    img_array = generate_raw_image(latent_vector)
    img = PIL.Image.fromarray(img_array, 'RGB')
    return img


# #### Computing the gradient
# 
# In order to improve the face, we'll follow the gradient.
# This should give us a better version of the image that is still close to the original image.

# In[36]:


import copy
from tqdm import tqdm

latent_vector = np.load('/content/drive/My Drive/RetouchML/latent_representations/merkel_01.npy')
img = generate_image(latent_vector)
display(img.resize((512,512),PIL.Image.LANCZOS))

predictions = model.predict(latent_vector).reshape(-1)
print(np.mean(predictions))


for x in range(2):
  x_tensor = tf.convert_to_tensor(latent_vector, dtype=tf.float32)

  with tf.GradientTape() as t:
      t.watch(x_tensor)
      output = model(x_tensor)

  gradients = tf.gradients(output, x_tensor)

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    result_output=np.array(sess.run(gradients)).reshape(18,512)

  latent_vector += result_output
  print(np.mean(model.predict(latent_vector)))
  img = generate_image(latent_vector)
  display(img.resize((512,512),PIL.Image.LANCZOS))

predictions = model.predict(latent_vector).reshape(-1)
print(predictions)

img = generate_image(latent_vector)
display(img.resize((512,512),PIL.Image.LANCZOS))
img.save("generated_images/merkel_01.png", "PNG")


# In[37]:


#/content/RetouchML/masks/IMG_20200209_150540_01.png
#/content/drive/My Drive/RetouchML/raw_images/pexels-anna-shvets-4672292.jpg
#/content/drive/My Drive/RetouchML/raw_images/pexels-sound-on-3756989.jpg
img_name = "merkel.jpg"
raw_path = "raw_images/" + img_name
out_path = "out/" + img_name
face_path = "generated_images/" 
mask_path = "masks/"
vector_path = "alignement_vector/"
#os.system('python fit_faces.py $raw_path $face_path $mask_path $vector_path $out_path')
args=fit_faces_lib.init_args()
install_path=fit_faces_lib.install()
args.src_file= raw_path 
args.face_path=face_path 
ags.mask_path=mask_path 
args.face_landmarks_path=vector_path 
args.dst_file=out_path
fit_faces_lib.exec(install_path,args)



# In[38]:


path = "out/"
valid_images = (".jpg",".png")
for f in os.listdir(path):
    if f.endswith(valid_images):
        img = PIL.Image.open(path + f)
        display(img.resize((int(img.size[0]/3), int(img.size[1]/3)), PIL.Image.LANCZOS))


# #### Done
# 
# And that's it. The difference is subtle but in direct comaprison with the original images, you can clearly see the changes.

# In[ ]:




