#!/usr/bin/python
#coding:utf-8
'''
图像风格转换V1算法实现，使用vggnet训练好的模型

16层网络，3个全连接，13个卷积层
根据风格特征和内容特征，转换图片特征
'''
import numpy as np
import os
#import match
import tensorflow as tf 
from PIL import Image 
import time


VGG_MEAN =[103.939,116.779, 123.68]  #RGB三通道均值

class VGGNet():
    '''
    搭建vggnet-16结构，先创建网络结构，在从模型读取参数
    '''
    def __init__(self,data_dict):
        self.data_dict=data_dict
    
    def get_conv_filter(self,name):
        '''
        获取卷积层，返回变量
        '''
        return tf.constant(self.data_dict[name][0],name ='conv')
    
    def get_fc_weight(self,name):
        '''
        获取fc权重参数,返回变量
        '''
        return tf.constant(self.data_dict[name][0],name='fc')

    def get_bias(self,name):
        '''
        获取偏置
        '''
        return tf.constant(self.data_dict[name][1],name='bias')
    
    def conv_layer(self,x,name):
        '''
        创建卷积层
        '''
        with tf.name_scope(name):
            conv_w=self.get_conv_filter(name)
            conv_b=self.get_bias(name)
            h= tf.nn.conv2d(x,conv_w,[1,1,1,1],padding='SAME')
            h= tf.nn.bias_add(h,conv_b)
            h= tf.nn.relu(h) #激活函数
            return h
        
    def pooling_layer(self,x,name):
        '''
        构建pooling层
        '''
        return tf.nn.max_pool(x,ksize=[1,2,2,1],
                                strides=[1,2,2,1],
                                padding='SAME',
                                name=name)
    
    def fc_layer(self,x,name,activation=tf.nn.relu):
        '''
        全连接层
        '''
        with tf.name_scope(name):
            fc_w=self.get_fc_weight(name)
            fc_b =self.get_bias(name)
            h= tf.matmul(x,fc_w)
            h = tf.nn.bias_add(h,fc_b)
            if activation is None:
                 return h
            else:
                return activation(h)
    
    def flatten_layer(self,x,name):
        '''
        展平层
        '''
        with tf.name_scope(name):
            #[batch_size,image_width,image_height,channel]
            x_shape= x.get_shape().as_list()
            dim=1
            for d in x_shape[1:]:
                dim *= d 
            
            x=tf.reshape(x,[-1,dim])
            return x

    def build(self,x_rgb):
        '''
        构建vgg网络计算图
        args：
            x_rgb: [1,224,224,3]
        '''
        start_time = time.time()
        print('building model....')

        r,g,b =tf.split(x_rgb,[1,1,1],axis=3) #拆分数据
        x_bgr = tf.concat([b-VGG_MEAN[0],
                            g- VGG_MEAN[1],
                            r- VGG_MEAN[2]],
                            axis=3)
        
        assert x_bgr.get_shape().as_list()[1:] == [224,224,3]

        #构建网络
        self.conv1_1=self.conv_layer(x_bgr,b'conv1_1')
        self.conv1_2=self.conv_layer(self.conv1_1,b'conv1_2')
        self.pool1 = self.pooling_layer(self.conv1_2,'pool1')

        self.conv2_1=self.conv_layer(self.pool1,b'conv2_1')
        self.conv2_2=self.conv_layer(self.conv2_1,b'conv2_2')
        self.pool2 = self.pooling_layer(self.conv2_2,'pool2')

        self.conv3_1=self.conv_layer(self.pool2,b'conv3_1')
        self.conv3_2=self.conv_layer(self.conv3_1,b'conv3_2')
        self.conv3_3=self.conv_layer(self.conv3_2,b'conv3_3')
        self.pool3 = self.pooling_layer(self.conv3_3,'pool3')

        self.conv4_1=self.conv_layer(self.pool3,b'conv4_1')
        self.conv4_2=self.conv_layer(self.conv4_1,b'conv4_2')
        self.conv4_3=self.conv_layer(self.conv4_2,b'conv4_3')
        self.pool4 = self.pooling_layer(self.conv4_3,'pool4')

        self.conv5_1=self.conv_layer(self.pool4,b'conv5_1')
        self.conv5_2=self.conv_layer(self.conv5_1,b'conv5_2')
        self.conv5_3=self.conv_layer(self.conv5_2,b'conv5_3')
        self.pool5 = self.pooling_layer(self.conv5_3,'pool5')

        #展开最后池化层，和全连接相连
        self.flatten5 =self.flatten_layer(self.pool5, b'flatten' )
        self.fc6 = self.fc_layer(self.flatten5,b'fc6')
        self.fc7 = self.fc_layer(self.fc6,b'fc7')
        self.fc8 = self.fc_layer(self.fc7,b'fc8',activation=None)

        #计算softmax
        self.prob = tf.nn.softmax(self.fc8,name=b'prob')

        print('building model finished: %4ds ' % (time.time()-start_time))


vgg16_npy_path='./vgg16.npy'
#以上代码测试部分
# data_dict=np.load(vgg16_npy_path,allow_pickle=True,encoding='bytes').item() #加载VGGnet16预训练模型，并转换为字典
# vgg16_for_result = VGGNet(data_dict)
# content = tf.placeholder(tf.float32,shape=[1,224,224,3])
# vgg16_for_result.build(content)

#定义参数
content_img_path = './gugong.jpg'
style_img_path ='./xingkong.jpeg'

num_steps = 100 #步数
learning_rate =10 #学习率

lambda_c = 0.1  #内容特征比重
lambda_s = 500  #风格特征比重

output_dir = './output'
if not os.path.exists(output_dir):
    os.mkdir(output_dir)


#初始化图片
def initial_result(shape,mean,stddev):
    '''
    args:
        尺寸
        均值
        方差
    '''
    initial = tf.truncated_normal(shape,mean=mean,stddev=stddev)
    return tf.Variable(initial)


def read_img(img_name):
    img=Image.open(img_name)
    np_img= np.array(img)  #224,224,3
    np_img = np.asarray([np_img],dtype=np.int32) #(1,224,224,3)
    return np_img

def gram_matrix(x):
    '''
    计算gram矩阵
    args:
        x:features extracted from VGG Net,shape:[1,width,height,ch]
    '''
    b ,w ,h ,ch = x.get_shape().as_list()
    features = tf.reshape(x,[b,h*w,ch]) #[ch ,ch] -> (i,j)
    #[h*w ,ch] matrix -> [ch, h*w] * [h*w, ch] -> [ch, ch]
    gram = tf.matmul(features,features,adjoint_a=True)  /tf.constant(ch*w*h,tf.float32)#先对矩阵转置
    return gram



result = initial_result((1,224,224,3),127.5,20)
content_val = read_img(content_img_path)
style_val = read_img(style_img_path)

content = tf.placeholder(tf.float32,shape = [1,224,224,3]) #内容图像
style = tf.placeholder(tf.float32,shape = [1,224,224,3]) #风格图像

data_dict=np.load(vgg16_npy_path,allow_pickle=True,encoding='bytes').item()

#定义VGG网络
vgg_for_content= VGGNet(data_dict)
vgg_for_style= VGGNet(data_dict)
vgg_for_result= VGGNet(data_dict)

vgg_for_content.build(content)
vgg_for_style.build(style)
vgg_for_result.build(result)

#内容图像特征抽取
content_features = [
                    vgg_for_content.conv1_2,
                    vgg_for_content.conv2_2,
                    # vgg_for_content.conv3_3,
                    # vgg_for_content.conv4_3,
                    # vgg_for_content.conv5_3
                    ]
#结果内容图像特征抽取
result_content_features = [
                    vgg_for_result.conv1_2,
                    vgg_for_result.conv2_2,
                    # vgg_for_result.conv3_3,
                    # vgg_for_result.conv4_3,
                    # vgg_for_result.conv5_3
                    ]
#风格图像特征抽取
style_features = [
                    # vgg_for_style.conv1_2,
                    # vgg_for_style.conv2_2,
                    # vgg_for_style.conv3_3,
                    vgg_for_style.conv4_3,
                    # vgg_for_style.conv5_3
                    ]

style_gram= [gram_matrix(feature) for feature in style_features]

#结果风格图像特征抽取
result_style_features = [
                    # vgg_for_result.conv1_2,
                    # vgg_for_result.conv2_2,
                    # vgg_for_result.conv3_3,
                    vgg_for_result.conv4_3,
                    # vgg_for_result.conv5_3
                    ]
result_style_gram = [gram_matrix(feature) for feature in result_style_features]

#内容损失
content_loss = tf.zeros(1,tf.float32)
for c,c_ in zip(content_features, result_content_features):
    content_loss += tf.reduce_mean((c - c_) ** 2,[1,2,3])  #1、2、3 通道计算平均

#风格损失
style_loss = tf.zeros(1,tf.float32)
for s,s_ in zip(style_gram, result_style_gram):
    style_loss += tf.reduce_mean((s - s_) ** 2,[1,2])  #1、2、3 平方差损失函数

loss= content_loss * lambda_c + style_loss * lambda_s
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    for step in range(num_steps):
        loss_value,content_loss_value,style_loss_value,_=\
            sess.run([loss,content_loss,style_loss,train_op],
                    feed_dict={
                        content: content_val,
                        style: style_val,
                    })
        print('step: %d ,loss_value: %8.4f,content_loss: %8.4f ,style_loss: %8.4f' % (step+1,loss_value[0],content_loss_value[0],style_loss_value[0]))

        result_img_path = os.path.join(output_dir,'result-%05d.jpg' % (step+1))
        result_val = result.eval(sess)[0]
        result_val = np.clip(result_val,0,255) #数据裁剪，小于0 等于0 ，大于255等于255
        img_arr = np.asarray(result_val,np.uint8)
        img = Image.fromarray(img_arr)
        img.save(result_img_path)