# import tensorflow as tf
import cv2
import numpy as np
from load_runtime import load_tflite_model

# Function to load an image from a file, and add a batch dimension.
def load_img(path_to_img,target_dim):
    image = cv2.imread(path_to_img)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (target_dim, target_dim))
    image_normalized = cv2.normalize(image_resized, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC3)
    input_data = np.expand_dims(image_normalized, axis=0)
    return input_data

# Function to pre-process by resizing an central cropping it.
def preprocess_image(image, target_dim):
  # Resize the image so that the shorter dimension becomes 256px.
  shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
  short_dim = min(shape)
  scale = target_dim / short_dim
  new_shape = tf.cast(shape * scale, tf.int32)
  image = tf.image.resize(image, new_shape)
  # Central crop the image.
  image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
  return image

content_path = '/home/pi/belfry-2611573_1280.jpg'
# content_path = '/home/pi/dog.jpg'
# content_path = '/home/pi/person.jpg'
style_path = '/home/pi/scream.jpg'

style_predict_path = '/home/pi/Lepi_Data/ros/style_transfer/magenta_arbitrary-image-stylization-v1-256_int8_prediction_1.tflite'
style_transform_path = '/home/pi/Lepi_Data/ros/style_transfer/magenta_arbitrary-image-stylization-v1-256_int8_transfer_1.tflite'

# Load the input images.
preprocessed_style_image = load_img(style_path,256)
preprocessed_content_image = load_img(content_path,384)

# Preprocess the input images.
# preprocessed_content_image = preprocess_image(content_image, 384)
# preprocessed_style_image = preprocess_image(style_image, 256)

print('Style Image Shape:', preprocessed_style_image.shape)
print('Content Image Shape:', preprocessed_content_image.shape)


# Function to run style prediction on preprocessed style image.
def run_style_predict(preprocessed_style_image):
  # Load the model.
  interpreter = load_tflite_model(model_path=style_predict_path)

  # Set model input.
  interpreter.allocate_tensors()
  input_details = interpreter.get_input_details()
  interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image)

  # Calculate style bottleneck.
  interpreter.invoke()
  style_bottleneck = interpreter.tensor(
      interpreter.get_output_details()[0]["index"]
      )()

  return style_bottleneck

# Calculate style bottleneck for the preprocessed style image.
style_bottleneck = run_style_predict(preprocessed_style_image)
print('Style Bottleneck Shape:', style_bottleneck.shape)


# Run style transform on preprocessed style image
def run_style_transform(style_bottleneck, preprocessed_content_image):
  # Load the model.
  interpreter = load_tflite_model(model_path=style_transform_path)

  # Set model input.
  input_details = interpreter.get_input_details()
  interpreter.allocate_tensors()

  # Set model inputs.
  interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image)
  interpreter.set_tensor(input_details[1]["index"], style_bottleneck)
  interpreter.invoke()

  # Transform content image.
  stylized_image = interpreter.tensor(
      interpreter.get_output_details()[0]["index"]
      )()

  return stylized_image
import time
# Stylize the content image using the style bottleneck.
start = time.time()
stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)
end = time.time()
print('Stylized: ',(end-start)*1000)
# print(stylized_image[0])
# Visualize the output.
cv2.imshow('Stylized Image',stylized_image[0])

# Calculate style bottleneck of the content image.
style_bottleneck_content = run_style_predict(
    load_img(content_path,256)
    )

# Define content blending ratio between [0..1].
# 0.0: 0% style extracts from content image.
# 1.0: 100% style extracted from content image.
content_blending_ratio = 0.5 

# Blend the style bottleneck of style image and content image
style_bottleneck_blended = content_blending_ratio * style_bottleneck_content \
                           + (1 - content_blending_ratio) * style_bottleneck

# Stylize the content image using the style bottleneck.
start = time.time()
stylized_image_blended = run_style_transform(style_bottleneck_blended,
                                             preprocessed_content_image)
end = time.time()
print('Blended: ',(end-start)*1000)

# Visualize the output.
cv2.imshow('Blended Stylized Image',stylized_image_blended[0])

cv2.waitKey(0)