import tensorflow as tf


def conv2d(x, input_filters, output_filters, kernel, strides, mode='REFLECT'):#mode='REFLECT' 映射值填充
    with tf.variable_scope('conv'):
        '''定义卷积核大小 filter: 卷积核. 类型和input必须相同，4维tensor, [filter_height, filter_width, in_channels, out_channels],如[5,5,3,32]'''
        shape = [kernel, kernel, input_filters, output_filters]
        '''从截断的正态分布中输出随机值
        tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
        shape: 一维的张量，也是输出的张量。
        mean: 正态分布的均值。 
        stddev: 正态分布的标准差。
        dtype: 输出的类型。
        seed: 一个整数，当设置之后，每次生成的随机数都一样。
        name: 操作的名字。
        '''
        weight = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weight')
        x_padded = tf.pad(x, [[0, 0], [int(kernel / 2), int(kernel / 2)], [int(kernel / 2), int(kernel / 2)], [0, 0]], mode=mode)
        return tf.nn.conv2d(x_padded, weight, strides=[1, strides, strides, 1], padding='VALID', name='conv')#padding='VALID'不考虑边界情况

'''转置卷积'''
def conv2d_transpose(x, input_filters, output_filters, kernel, strides):
    with tf.variable_scope('conv_transpose'):

        shape = [kernel, kernel, output_filters, input_filters]
        weight = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weight')

        batch_size = tf.shape(x)[0]
        height = tf.shape(x)[1] * strides
        width = tf.shape(x)[2] * strides
        output_shape = tf.stack([batch_size, height, width, output_filters])
        return tf.nn.conv2d_transpose(x, weight, output_shape, strides=[1, strides, strides, 1], name='conv_transpose')


def resize_conv2d(x, input_filters, output_filters, kernel, strides, training):
    '''
    An alternative to transposed convolution where we first resize, then convolve.
    See http://distill.pub/2016/deconv-checkerboard/

    For some reason the shape needs to be statically known for gradient propagation
    through tf.image.resize_images, but we only know that for fixed image size, so we
    plumb through a "training" argument
    一种替代转置卷积的方法，我们先调整卷积的大小，然后再进行卷积。
    出于某种原因，形状需要静态地了解梯度传播。
    通过tf.image.resize_images，但我们只知道对于固定的图像大小，所以我们通过“训练”辩论
    '''
    with tf.variable_scope('conv_transpose'):
        height = x.get_shape()[1].value if training else tf.shape(x)[1]
        width = x.get_shape()[2].value if training else tf.shape(x)[2]

        new_height = height * strides * 2
        new_width = width * strides * 2

        x_resized = tf.image.resize_images(x, [new_height, new_width], tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        # shape = [kernel, kernel, input_filters, output_filters]
        # weight = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weight')
        return conv2d(x_resized, input_filters, output_filters, kernel, strides)

'''归一化部分，分母增加了一个微小的平滑因子，避免除以0'''
def instance_norm(x):
    epsilon = 1e-9
    '''计算均值和方差[1,2]表示在1，2维上求解，keep_dims=True保持维度不变'''
    mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
    '''返回(（输入数据和均值的差）与（（方差和epsilon的和）的平方）的商)'''
    return tf.div(tf.subtract(x, mean), tf.sqrt(tf.add(var, epsilon)))

'''Batch_Norm是在一个Batch内不同样本间的标准化，而instance_norm在一个样本内的标准化'''
def batch_norm(x, size, training, decay=0.999):
    #定义变量分别以0，1填充
    beta = tf.Variable(tf.zeros([size]), name='beta')
    scale = tf.Variable(tf.ones([size]), name='scale')
    pop_mean = tf.Variable(tf.zeros([size]))
    pop_var = tf.Variable(tf.ones([size]))
    epsilon = 1e-3

    batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
    train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
    train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))

    def batch_statistics():
        with tf.control_dependencies([train_mean, train_var]):
            return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, epsilon, name='batch_norm')

    def population_statistics():
        return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, scale, epsilon, name='batch_norm')
    '''如果training条件成立则执行batch_statistics否则population_statistics'''
    return tf.cond(training, batch_statistics, population_statistics)

#激活函数
def relu(input):
    '''Return :A `Tensor`. Has the same type as `features`.'''
    relu = tf.nn.relu(input)
    # convert nan to zero (nan != nan)
    '''where(condition, x=None, y=None, name=None)
    condition中元素为True的元素替换为x中的元素，为False的元素替换为y中对应元素
    把nan转化为0， nan和nan比较结果为False
    '''
    nan_to_zero = tf.where(tf.equal(relu, relu), relu, tf.zeros_like(relu))
    return nan_to_zero

#残差
def residual(x, filters, kernel, strides):
    with tf.variable_scope('residual'):
        #两次卷积，一次激活
        conv1 = conv2d(x, filters, filters, kernel, strides)
        conv2 = conv2d(relu(conv1), filters, filters, kernel, strides)
        '''残差=x+conv2  H(X)=F(x)+X 
            F(X)=H(X)-X       H(X)为期望的输出 X是输入 输入输出的差（H(X)-X）即为残差'''
        residual = x + conv2

        return residual


def net(image, training):
    # 在通过前稍微填充一点，可以减少边界效果。四维填充
    image = tf.pad(image, [[0, 0], [10, 10], [10, 10], [0, 0]], mode='REFLECT')#边缘填充,映射填充
    '''3个卷积层，深度变化：3-->32-->64-->128，五次残差'''
    with tf.variable_scope('conv1'):
        conv1 = relu(instance_norm(conv2d(image, 3, 32, 9, 1)))#instance_norm样本进行标准化（均值为0，标准差为1）
    with tf.variable_scope('conv2'):
        conv2 = relu(instance_norm(conv2d(conv1, 32, 64, 3, 2)))
    with tf.variable_scope('conv3'):
        conv3 = relu(instance_norm(conv2d(conv2, 64, 128, 3, 2)))
    with tf.variable_scope('res1'):
        res1 = residual(conv3, 128, 3, 1)
    with tf.variable_scope('res2'):
        res2 = residual(res1, 128, 3, 1)
    with tf.variable_scope('res3'):
        res3 = residual(res2, 128, 3, 1)
    with tf.variable_scope('res4'):
        res4 = residual(res3, 128, 3, 1)
    with tf.variable_scope('res5'):
        res5 = residual(res4, 128, 3, 1)
    # print(res5.get_shape())
    '''两次转置卷积，使用反卷积重新生成图像'''
    with tf.variable_scope('deconv1'):
        '''两种转置卷积方式，有待商榷'''
        # deconv1 = relu(instance_norm(conv2d_transpose(res5, 128, 64, 3, 2)))
        deconv1 = relu(instance_norm(resize_conv2d(res5, 128, 64, 3, 2, training)))
    with tf.variable_scope('deconv2'):
        # deconv2 = relu(instance_norm(conv2d_transpose(deconv1, 64, 32, 3, 2)))
        deconv2 = relu(instance_norm(resize_conv2d(deconv1, 64, 32, 3, 2, training)))
    with tf.variable_scope('deconv3'):
        '''因为到这一步生成的图像大小已经和原图像相同，故不再进行反卷积'''
        '''两种激活函数，tanh(双曲正切曲线)的梯度变化更快，收敛速度更快'''
        # deconv_test = relu(instance_norm(conv2d(deconv2, 32, 32, 2, 1)))
        deconv3 = tf.nn.tanh(instance_norm(conv2d(deconv2, 32, 3, 9, 1)))
    '''deconv3的值域属于(-1, 1)，变换到[0, 255]'''
    y = (deconv3 + 1) * 127.5

    # Remove border effect reducing padding.删除边框效果减少填充
    height = tf.shape(y)[1]
    width = tf.shape(y)[2]
    '''切片函数'''
    y = tf.slice(y, [0, 10, 10, 0], tf.stack([-1, height - 20, width - 20, -1]))#张量切片

    return y
