import os
import sys

import IPython.display
import cv2
import matplotlib as mpl

import pylab
import matplotlib.pyplot as plt
from keras.api.keras import preprocessing

from tensorflow import Variable
from tensorflow.python.training.adam import AdamOptimizer

mpl.rcParams['figure.figsize'] = (12, 15)
mpl.rcParams['axes.grid'] = False

import numpy as np
import time
from PIL import Image


# 定义加载图片函数
def load_img(path_to_img):
    img = Image.open(path_to_img)   # 加载图片
    long = max(img.size)   # 获取图片的最大边长(长或宽)
    max_dim = 512
    scale = max_dim/long
    img = img.resize((round(img.size[0]*scale),round(img.size[1]*scale)),Image.ANTIALIAS)
    img = preprocessing.image.img_to_array(img)  # 将图片数组维度扩展成batch dimension，也就是CNN期待输入的shape
    img = np.expand_dims(img, axis=0)
    return img


# 定义图像显示函数
def imshow(img, title):
    out_img = np.squeeze(img, axis=0)  # 先移除Batch这个维度
    plt.title(title)
    plt.imshow(out_img)
    pylab.show()

# 定义图片路径
# style_path = "./test/StyleTransfer/input/Style/Style1.jpg"
style_path = sys.argv[1]

# content_path = './test/StyleTransfer/input/Content/Content1.jpg'
content_path = sys.argv[2]

# 绘制内容图像
plt.subplot(1, 2, 1)
p_img = load_img(content_path).astype('uint8')
imshow(p_img, "Content Image")

# 绘制风格图像
plt.subplot(1, 2, 2)
p_img = load_img(style_path).astype('uint8')
imshow(p_img, "Style Image")


# 图像预处理，根据VGG的训练过程，对输入图像进行同样的预处理，VGG网络在图像上进行训练，每个信道通过均值和信道BGR进行归一化处理
# import tensorflow.contrib.eager as tfe
import tensorflow as tf
from tensorflow.python.keras import models
from tensorflow.python.keras import losses
from tensorflow.python.keras import layers
from tensorflow.python.keras import backend as K

tf.compat.v1.enable_eager_execution()  # 使用eager execution的目的是使我们能够用更清晰和易读的方式完成训练
print("Eager execution: {}".format(tf.executing_eagerly()))


def load_and_process_img(path_to_img):
    img = load_img(path_to_img)  # 加载图像
    img = tf.keras.applications.vgg19.preprocess_input(img)  # 使用VGG19来处理图像
    return img


# 查看优化后的输出结果，要求进行反转处理，反转后处理的值是在-00-+00之间，所以要对值进行限制，限制为0-255
def deprocess_img(processed_img):
    x=processed_img.copy()   # 复制一个副本
    if len(x.shape) == 4:    # 如果x的维度是4，就移除batch那一维度
        x=np.squeeze(x, 0)
    assert len(x.shape) == 3, ("输入的反转处理图像维度必须是[1,height,width,channel]或者[height,width,chanel]")
    if len(x.shape) != 3:
        raise ValueError("无效的图像！")
    # 执行图像预处理反转的步骤，为了通过和信道的归一化处理，这三个信道均值分别如下
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    x = x[:, :, ::-1]           # 反转
    x = np.clip(x, 0, 255).astype('uint8')  # 裁剪数组上的值为0-255
    return x


# 通过vgg19模型的中间层来创建模型
content_layers = ['block5_conv2']  # 特征映射层的内容层名字
style_layers = [                  # 需要的风格化的层的名字
    'block1_conv1',
    'block2_conv1',
    'block3_conv1',
    'block4_conv1',
    'block5_conv1'
]
num_content_layers = len(content_layers)
num_style_layers =len(style_layers)


# get_model()函数,加载VGG19模型并访问中间层，通过中间层来创建新模型，然后返回一个Keras模型，接收图像输入，并输出样式和内容中间层；即使用Keras的Functional API，通过期待的输出来激活和定义我们的模型
def get_model():
    vgg = tf.keras.applications.vgg19.VGG19(include_top=False,weights='imagenet')  # 加载预训练VGG19模型，他是在imagenet数据上训练而来的
    vgg.trainable = False
    style_outputs = [vgg.get_layer(name).output for name in style_layers]   # 设置这些层不参与训练
    content_outputs = [vgg.get_layer(name).output for name in content_layers]   # 获取与样式对应的输出层
    model_outputs = style_outputs+content_outputs                    # 获取与内容对应的输出层
    return models.Model(vgg.input, model_outputs)   # 构建模型

# 损失函数计算
# 计算内容图像损失值
def get_content_loss(base_content,target):
    return tf.reduce_mean(tf.square(base_content - target))


#比较两个图像输出的gram矩阵
def gram_matrix(input_tensor):
    channels = int(input_tensor.shape[-1])
    a = tf.reshape(input_tensor, [-1, channels])
    n = tf.shape(a)[0]
    gram = tf.matmul(a, a, transpose_a=True)
    return gram/tf.cast(n, tf.float32)

# 计算风格损失值
def get_style_loss(base_style, gram_target):
    height, width, channels = base_style.get_shape().as_list()
    gram_style= gram_matrix(base_style)
    return tf.reduce_mean(tf.square(gram_style - gram_target))


def get_feature_representations(model, content_path, style_path):
    content_image = load_and_process_img(content_path)
    style_image = load_and_process_img(style_path)
    # 批量计算style和content的特征
    style_outputs = model(style_image)
    content_outputs = model(content_image)
    # 从模型上获取style和content特征
    style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]
    content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]
    return style_features, content_features

# 计算损失
def compute_loss(model, loss_weights, init_image, gram_style_features, content_features):
    style_weight, content_weight = loss_weights
    model_outputs = model(init_image)
    style_output_features = model_outputs[:num_style_layers]
    content_output_features = model_outputs[num_style_layers:]
    style_score = 0
    content_score = 0
    weight_per_style_layer = 1.0/float(num_style_layers)
    for target_style, comb_style in zip(gram_style_features, style_output_features):
        style_score += weight_per_style_layer*get_style_loss(comb_style[0], target_style)
    weight_per_content_layer = 1.0/float(num_content_layers)
    for target_content, comb_content in zip(content_features, content_output_features):
        content_score += weight_per_content_layer*get_content_loss(comb_content[0], target_content)
    style_score *= style_weight
    content_score *= content_weight
    loss = style_score+content_score
    return loss, style_score, content_score

# 计算梯度
def compute_grads(cfg):
    with tf.GradientTape() as tape:
        all_loss = compute_loss(**cfg)
    total_loss = all_loss[0]
    return tape.gradient(total_loss, cfg['init_image']),all_loss


model = get_model()
for layer in model.layers:
    layer.trainable = False
style_features, content_features = get_feature_representations(model, content_path, style_path)
gram_style_features = [gram_matrix(style_feature) for style_feature in style_features]
init_image = load_and_process_img(content_path)
init_image = Variable(init_image, dtype=tf.float32)
opt = AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)
best_loss, best_img = float('inf'), None
style_weight = 1e-2
content_weight = 1e3
loss_weights = (style_weight, content_weight)
cfg = {
    'model': model,
    'loss_weights': loss_weights,
    'init_image': init_image,
    'gram_style_features': gram_style_features,
    'content_features': content_features
}

import IPython.display
num_iterations = 10
num_rows = 2
num_cols = 5
display_interval = num_iterations/(num_rows*num_cols)
start_time = time.time()
global_start = time.time()
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255-norm_means
imgs = []
count = 0
for i in range(num_iterations):
    grads, all_loss = compute_grads(cfg)
    loss, style_score, content_score = all_loss
    opt.apply_gradients([(grads, init_image)])
    clipped = tf.clip_by_value(init_image, min_vals, max_vals)
    init_image.assign(clipped)
    end_time = time.time()
    if loss < best_loss:
        best_loss = loss
        best_img = deprocess_img(init_image.numpy())
    if i % (display_interval) == 0:
        start_time = time.time()
        plot_img = init_image.numpy()
        plot_img = deprocess_img(plot_img)
        count = count + 1
        cv2.imwrite("./StyleProcess/%d.jpg" % count, plot_img)
        imgs.append(plot_img)
        IPython.display.clear_output(wait=True)
        IPython.display.display_png(Image.fromarray(plot_img))
        print('Iteration: {}'.format(i))
        print('Total loss: {:.4e},'
              'style loss: {:.4e},'
              'content loss:{:.4e}'
              'time: {:.4f}s'.format(loss, style_score, content_score, time.time() - start_time))
print('Total time : {:4f}s'.format(time.time() - global_start))
IPython.display.clear_output(wait=True)
plt.figure(figsize=(14, 4))
for i,img in enumerate(imgs):
    plt.subplot(num_rows, num_cols, i+1)
    plt.imshow(img)
    plt.xticks([])
    plt.yticks([])
pylab.show()

Image.fromarray(best_img)
plt.imshow(best_img)


path = "D:/" + sys.argv[3] + "/Picture"
num = len(os.listdir(path))

# plt.savefig("./test/StyleTransfer/output/best_Style1.jpg")
plt.savefig("D:/" + sys.argv[3] + "/Picture/output" + str(num+1) + ".jpg")
print("D:/" + sys.argv[3] + "/Picture/output" + str(num+1) + ".jpg", end="")
pylab.show()






