import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np

WEIGHT_INIT_STDDEV = 1e-3 	# DDcGan是 0.05

SEED = 52113

np.random.seed(SEED)
tf.set_random_seed(SEED)

class Generator(object):

	def __init__(self, sco):
		self.net_param = Network(sco)	# sco:   Generator

	def transform(self, vis, ir):
		img = tf.concat([vis, ir], axis=-1)	# 在第三维度拼接,(bs,ps,ps,2)
		generated_img = self.net_param.build_net(img)
		return generated_img


class Network(object):
	def __init__(self, scope_name):
		self.scope = scope_name
		self.weight_vars = []
		with tf.variable_scope(self.scope):		# 在 Generator 之下
			self.weight_vars.append(self._create_variables(2, 256, 5, scope = 'conv1_1'))
			self.weight_vars.append(self._create_variables(258, 128, 3, scope = 'dense_block_conv1'))
			self.weight_vars.append(self._create_variables(386, 64, 3, scope = 'dense_block_conv2'))
			self.weight_vars.append(self._create_variables(450, 32, 3, scope = 'dense_block_conv3'))
			self.weight_vars.append(self._create_variables(482, 16, 3, scope = 'dense_block_conv4'))
			self.weight_vars.append(self._create_variables(498, 8, 3, scope = 'dense_block_conv5'))
			self.weight_vars.append(self._create_variables(506, 1, 1, scope = 'dense_block_conv6'))

	def _create_variables(self, input_filters, output_filters, kernel_size, scope):
		shape = [kernel_size, kernel_size, input_filters, output_filters]
		with tf.variable_scope(scope):
			kernel = tf.Variable(tf.truncated_normal(shape, stddev = WEIGHT_INIT_STDDEV),
			                     name = 'kernel')
			kernel = weights_spectral_norm(kernel)

			bias = tf.Variable(tf.zeros([output_filters]), name = 'bias')
		return (kernel, bias)

	def build_net(self, image):
		# dense_indices = [1, 2, 3, 4, 5]
		dense_indices = [0,1,2,3,4,5]
		out = image
		for i in range(len(self.weight_vars)):
			kernel, bias = self.weight_vars[i]
			if i in dense_indices:
				out = conv2d(out, kernel, bias, dense = True, use_lrelu = True,
				             Scope = self.scope + '/bn' + str(i),BN=True)
			else:
				out = conv2d(out, kernel, bias, dense = False, use_lrelu = False,
				             Scope = self.scope + '/bn' + str(i),BN=False)
		return out



def conv2d(x, kernel, bias, dense = False, use_lrelu = True, Scope = None, BN = False):
	# padding image with reflection mode
	# x_padded = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode = 'REFLECT')
	# conv and add bias
	# out = tf.nn.conv2d(x_padded, kernel, strides = [1, 1, 1, 1], padding = 'VALID')
	# out = tf.nn.bias_add(out, bias)
	out = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME') + bias	# x:通道2
	if BN:			# 前四层
		with tf.variable_scope(Scope):
			tf.contrib.layers.batch_norm(out, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
	if use_lrelu:	# 前四层
		out = lrelu(out)
	if dense:		# 前4层
		out = tf.concat([out, x],axis=-1)	# x: 通道 2+256
	return out

def weights_spectral_norm(weights, u=None, iteration=1, update_collection=None, reuse=False, name='weights_SN'):
    with tf.variable_scope(name) as scope:
        if reuse:  # 测试和生成时为TRUE,表示使用训练好的参数
            scope.reuse_variables()

        w_shape = weights.get_shape().as_list()  # [5,5,2,256]
        w_mat = tf.reshape(weights, [-1, w_shape[-1]])  # [50,256]
        if u is None:
            u = tf.get_variable('u', shape=[1, w_shape[-1]], initializer=tf.truncated_normal_initializer(),
                                trainable=False)  # [1,256]

        def power_iteration(u, ite):  # [1,256],1
            v_ = tf.matmul(u, tf.transpose(w_mat))  # [1,256]*[256,50]
            v_hat = l2_norm(v_)  # [1,50]
            u_ = tf.matmul(v_hat, w_mat)  # [1,50]*[50,256]
            u_hat = l2_norm(u_)  # [1,256]
            return u_hat, v_hat, ite + 1

        u_hat, v_hat, _ = power_iteration(u, iteration)

        sigma = tf.matmul(tf.matmul(v_hat, w_mat), tf.transpose(u_hat))  # [1,50]*[50,256]*[256,1]

        w_mat = w_mat / sigma  # [50,256]  对weight的操作,上面的都是求sigm

        if update_collection is None:
            with tf.control_dependencies([u.assign(u_hat)]):  # 翻译:先使u=u_hat   这步操作可能是使u潜在更新
                w_norm = tf.reshape(w_mat, w_shape)  # 再变形,得到结果weight
        else:
            if not (update_collection == 'NO_OPS'):  # 如果不是"NO_OPS",那就加到名为update_collection的列表中
                print(update_collection)
                tf.add_to_collection(update_collection, u.assign(u_hat))

            w_norm = tf.reshape(w_mat, w_shape)  # 共性,都保存了u,并norm化了weight
        return w_norm

def l2_norm(input_x, epsilon=1e-12):
    input_x_norm = input_x / (tf.reduce_sum(input_x ** 2) ** 0.5 + epsilon)
    return input_x_norm

def lrelu(x, leak=0.2):
    return tf.maximum(x, leak * x)  # 这即是自定义的Leaky-ReLU,