import numpy as np
import tensorflow as tf
slim = tf.contrib.slim

SEED = 52113
np.random.seed(SEED)
tf.set_random_seed(SEED)

# z_num表示编码器被压缩的维度,这里是64,那么是否要变呢?
# 这个图像生成的尺寸是否与原图一致呢?
def DiscriminatorCNN(scope_name,x, input_channel, z_num, repeat_num, hidden_num, data_format):
	# x:shape=(32, 3, 64, 64) input_channel:3 z_num:64 repeat_num:4 hidden_num:128 data_format='NCHW'
	with tf.variable_scope(scope_name) as vs:
		# Encoder
		x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)

		# for循环中共4层，每层2次卷积，通道128-256-384-512   图像尺寸 64-32-16-8 输出形状：(32, 512, 8, 8)
		for idx in range(repeat_num):
			channel_num = hidden_num * (idx + 1)
			x = slim.conv2d(x, channel_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
			x = slim.conv2d(x, channel_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
			if idx < repeat_num - 1:
				x = slim.conv2d(x, channel_num, 3, 2, activation_fn=tf.nn.elu, data_format=data_format)

		# 定义一个临时变量，存储宽和高的值，因为是训练数据，所以h==w，而且是同步下降，故h==w
		hw = x.get_shape()[2]
		x = tf.reshape(x, [-1, np.prod([hw, hw, channel_num])])		# 重新折回2层 (32, 32768)
		z = x = slim.fully_connected(x, z_num, activation_fn=None)	# 重新折回初始尺寸 (32, 64)

		# Decoder
		num_output = int(np.prod([hw, hw, hidden_num]))				# 8192
		x = slim.fully_connected(x, num_output, activation_fn=None)	# (32, 8192)
		x = reshape(x, hw, hw, hidden_num)				# (32, 128, 8, 8) 调用了reshape函数

		for idx in range(repeat_num):
			x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
			x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
			if idx < repeat_num - 1:
				x = upscale(x, 2, data_format)	# 调用上面的函数，将图像填充为2倍尺寸8-16-32-64，最后(32, 128, 64, 64)
		# 卷积,将channel: 128->32 输出尺寸: (32, 3, 64, 64)
		out = slim.conv2d(x, input_channel, 3, 1, activation_fn=None, data_format=data_format)

	variables = tf.contrib.framework.get_variables(vs)	# 获取Encode中12个卷积,1个全连接,以及Decode中1个全连接,9个卷积的参数
	return out, z, variables		# 输出 解码器结果out:(32,3,64,64) 中间编码器结果z:(32, 64) 以及D的所有变量


def reshape(x, h, w, c):
	x = tf.reshape(x, [-1, c, h, w])
	return x


def upscale(x, scale, data_format):
	shape = x.get_shape().as_list()  # [16, 128, 8, 8]
	shape = [shape[0], shape[2], shape[3], shape[1]]  # [16, 8, 8, 128]

	_, h, w, _ = shape  # h:8  w: 8
	x = tf.transpose(x, [0, 2, 3, 1])  # (16, 8, 8, 128)
	x = tf.image.resize_nearest_neighbor(x, (h * scale, w * scale))  # (16, 16, 16, 128)
	x = tf.transpose(x, [0, 3, 1, 2])  # (16, 128, 16, 16)

	return x


