import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import trange
import function
import model

# 定义权重等参数
style_weight = 0.025
content_weight = 1e4
total_variation_weight = 1e8
epochs = 10
steps_per_epoch = 100

# 定义运行函数
def style_trans(content_byte, style_byte):
    # 加载内容图片和风格图片
    content_img = function.load_img(content_byte)
    style_img = function.load_img(style_byte)

    # 定义卷积核，内容层使用深层次卷积核，风格层使用多层卷积核
    content_layers = ['block5_conv1']
    style_layers = ['block1_conv1',
                    'block2_conv1',
                    'block3_conv1', 
                    'block4_conv1', 
                    'block5_conv1']

    # 计算卷积层次
    content_layers_number = len(content_layers)
    style_layers_number = len(style_layers)

    # 调用模型
    TM = model.TransModel(style_layers, content_layers)
    style_targets = TM(style_img)['style']
    content_targets = TM(content_img)['content']

    # 损失函数
    def style_content_loss(outputs):
        # 用来表示style信息的网络层的输出，这里已经计算过Gram矩阵了
        style_outputs = outputs['style']
        # 用来表示content信息的网络层的输出，不需要计算Gram矩阵
        content_outputs = outputs['content']
        # 风格损失
        style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) for name in style_outputs.keys()])
        style_loss *= style_weight / style_layers_number
        # 内容损失
        content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) for name in content_outputs.keys()])
        content_loss *= content_weight / content_layers_number
        #总损失
        loss = style_loss + content_loss
        return loss

    # 定义优化器，对输入进行优化
    opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)
    img = tf.Variable(content_img + tf.random.truncated_normal(content_img.shape, mean=0.0, stddev=0.08), trainable=True)
    # 优化函数
    @tf.function()
    def train_step(image):
        #梯度下降
        with tf.GradientTape() as tape:
            outputs = TM(image)
            loss = style_content_loss(outputs)
            loss += total_variation_weight * function.total_variation_loss(image)

        grad = tape.gradient(loss, image)
        opt.apply_gradients([(grad, image)])
        image.assign(function.clip_0_1(image))

    for n in trange (epochs * steps_per_epoch):
        train_step(img)
    
    result_byte = img.read_value()[0].tobytes()
    return result_byte
'''
# 设置内容图片和风格图片的路径
content_path = 'content2.jpg'
style_path = 'style4.jpg'

# 加载内容图片和风格图片
content_img = function.load_img(content_path)
style_img = function.load_img(style_path)

# 定义卷积核，内容层使用深层次卷积核，风格层使用多层卷积核
content_layers = ['block5_conv1']
style_layers = ['block1_conv1',
                'block2_conv1',
                'block3_conv1', 
                'block4_conv1', 
                'block5_conv1']

# 计算卷积层次
content_layers_number = len(content_layers)
style_layers_number = len(style_layers)

# 调用模型
TM = model.TransModel(style_layers, content_layers)
style_targets = TM(style_img)['style']
content_targets = TM(content_img)['content']

# 损失函数
def style_content_loss(outputs):
    # 用来表示style信息的网络层的输出，这里已经计算过Gram矩阵了
    style_outputs = outputs['style']
    # 用来表示content信息的网络层的输出，不需要计算Gram矩阵
    content_outputs = outputs['content']
    # 风格损失
    style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) for name in style_outputs.keys()])
    style_loss *= style_weight / style_layers_number
    # 内容损失
    content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) for name in content_outputs.keys()])
    content_loss *= content_weight / content_layers_number
    #总损失
    loss = style_loss + content_loss
    return loss

# 定义优化器，对输入进行优化
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)
img = tf.Variable(content_img + tf.random.truncated_normal(content_img.shape, mean=0.0, stddev=0.08), trainable=True)
# 优化函数
@tf.function()
def train_step(image):
    #梯度下降
    with tf.GradientTape() as tape:
        outputs = TM(image)
        loss = style_content_loss(outputs)
        loss += total_variation_weight * function.total_variation_loss(image)

    grad = tape.gradient(loss, image)
    opt.apply_gradients([(grad, image)])
    image.assign(function.clip_0_1(image))

for n in trange (epochs * steps_per_epoch):
    train_step(img)

# 结果图片显示
plt.imshow(img.read_value()[0])
plt.show()
# 结果图片文件输出
Eimg = tf.image.convert_image_dtype (img.read_value()[0], tf.uint8)
Eimg = tf.image.encode_jpeg (Eimg)
tf.io.write_file ('result10.jpg', Eimg)
'''