import numpy as np
import tensorflow as tf


def batch_normalization(input,
						tfv_train_phase,
						ema_decay = 0.99,
                        eps = 1./(2**15),
                        use_scale = True,
                        use_shift = True,
                        name_scope = None):
	""" batch normalization, generally added before activation, can be used under multi-GPUs
	params:
		input: input tensor, can be 2nd-order for fc or 3rd-order for 2d conv
		tfv_train_phase: flag of whether is training
		ema_decay: EMA decay rate for updating mean and variance
		eps: epsilon to prevent division by 0
		use_scale: whether to use scale (gamma)
		use_shift: whether to use shift (beta)
		name_scope:
	"""
	reuse = tf.get_variable_scope().reuse
	with tf.variable_scope(name_scope, reuse = tf.AUTO_REUSE):
		shape = input.get_shape().as_list()
		assert len(shape) in [2, 4]
		n_out = shape[-1]

		# calculate mean and variance
		if len(shape) == 2:
			batch_mean, batch_variance = tf.nn.moments(input, [0], name = 'moments')
		else:
			batch_mean, batch_variance = tf.nn.moments(input, [0, 1, 2], name = 'moments')
		ema = tf.train.ExponentialMovingAverage(decay = ema_decay, zero_debias = True)

		if not reuse or reuse == tf.AUTO_REUSE:
			def mean_variance_with_update():
				with tf.control_dependencies([ema.apply([batch_mean, batch_variance])]):
					return (tf.identity(batch_mean), tf.identity(batch_variance))
			mean, variance = tf.cond(tfv_train_phase, mean_variance_with_update, lambda: (ema.average(batch_mean), ema.average(batch_variance)))
		else:
			vars = tf.get_variable_scope().global_variables()
			transform = lambda s: '/'.join(s.split('/')[-5:])
			mean_name = transform(ema.average_name(batch_mean))
			variance_name = transform(ema.average_name(batch_variance))
			existed = {}
			for v in vars:
				if (transform(v.op.name) == mean_name):
					existed['mean'] = v
				if (transform(v.op.name) == variance_name):
					existed['variance'] = v
			mean, variance = tf.cond(tfv_train_phase, lambda: (batch_mean, batch_variance), lambda: (existed['mean'], existed['variance']))

		# normalization
		std = tf.sqrt(variance + eps, name = 'std')
		output = (input - mean) / std

		# multiply gamma
		if use_scale:
			weights = tf.get_variable('weights', [n_out], initializer = tf.ones_initializer, trainable = True)
			output = tf.multiply(output, weights)

		# add beta
		if use_shift:
			biases = tf.get_variable('biases', [n_out], initializer = tf.zeros_initializer, trainable = True)
			output = tf.add(output, biases)

	return output


def fc(input,
	   output_size,
	   weights_initializer = tf.initializers.he_normal(),
	   weights_regularizer = None,
	   biases_initializer = tf.zeros_initializer,
	   biases_regularizer = None,
	   tfv_train_phase = None,
	   keep_prob = 0.9,
	   act_last = True,
	   name_scope = None):
	""" fully connected layer with dropout, activation function is ReLU and it will be canceled for the last layer to softmax
	params:
		input: input tensor 2nd-order - [batch_size, input_size]
		output_size: output dimension
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		act_last: control whether cancel the activation function for the last layer
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		# define weights and biases
		input_size = input.get_shape()[-1].value
		tfv_weights = tf.get_variable('var_weights', [input_size, output_size], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		output = tf.matmul(input, tfv_weights, name = 'output_mal')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')
		
		# add activation and dropout
		if act_last is True:
			if tfv_train_phase is not None:
				output = batch_normalization(output, tfv_train_phase, name_scope = 'batch_norm')
			output = tf.nn.relu(output, name = 'relu')
			output = tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')

	return output


def conv_2d(input,
			output_chs,
			filter_shape,
			strides = [1, 1],
			asymmetric = False,
			filter_initializer = tf.initializers.he_normal(),
			filter_regularizer = None,
			biases_initializer = tf.zeros_initializer,
			biases_regularizer = None,
			tfv_train_phase = None,
			act_first = True,
			name_scope = None):
	""" normal 2d convolution, activation function is ReLU
	params:
		input: input tensor, 4th-order - [batch_size, input_height, input_width, input_chs], where input_chs is the hidden number of input channels
		output_chs: number of output channels
		filter_shape: shape of single filter, [height, width]
		strides: scanning stride of filter
		asymmetric: whether let the convolution to be asymmetric, i.e., True for 3x3 conv will become a stack of 3x1 and 1x3 conv
		filter_initializer:
		filter_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		act_first: whether use activation for this layer, since in resnetv2 the activation of the first conv is placed in the first block or bottleneck
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		input_chs = input.get_shape()[-1].value

		if asymmetric:
			assert input_chs == output_chs, 'asymmetric conv should only be used without changing channels.'
			# define filters
			tfv_filter_1 = tf.get_variable('var_filter_1', [filter_shape[0], 1] + [input_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
			tfv_filter_2 = tf.get_variable('var_filter_2', [1, filter_shape[-1]] + [output_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)

			# first convolution and biases
			output = tf.nn.conv2d(input, tfv_filter_1, [1, 1, 1, 1], 'SAME', name = 'output_conv_1')
			if biases_initializer is not None:
				tfv_biases_1 = tf.get_variable('var_biases_1', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases_1, name = 'output_1')

			# add first activation
			if tfv_train_phase is not None:
				output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_1')
			output = tf.nn.relu(output, name = 'relu_1')

			# second convolution and biases
			output = tf.nn.conv2d(output, tfv_filter_2, [1] + strides + [1], 'SAME', name = 'output_conv_2')
			if biases_initializer is not None:
				tfv_biases_2 = tf.get_variable('var_biases_2', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases_2, name = 'output_2')
		else:
			# define filters			
			tfv_filter = tf.get_variable('var_filter', filter_shape + [input_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)

			# convolution and biases
			output = tf.nn.conv2d(input, tfv_filter, [1] + strides + [1], 'SAME', name = 'output_conv')
			if biases_initializer is not None:
				tfv_biases = tf.get_variable('var_biases', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases, name = 'output')

		# add (second) activation
		if act_first:
			if tfv_train_phase is not None:
				output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm')
			output = tf.nn.relu(output, name = 'relu')
		
	return output


def dw_conv_2d(input,
			   output_chs,
			   filter_shape,
			   strides = [1, 1],
			   asymmetric = False,
			   filter_initializer = tf.initializers.he_normal(),
			   filter_regularizer = None,
			   biases_initializer = tf.zeros_initializer,
			   biases_regularizer = None,
			   tfv_train_phase = None,
			   name_scope = None):
	""" depthwise 2d convolution, activation function is ReLU
	"""
	assert output_chs % input.get_shape()[-1].value == 0, 'for depthwise convolution, the number of output channels must be integral multiple of that of input channels.'
	with tf.variable_scope(name_scope):
		input_chs = input.get_shape()[-1].value
		channel_multiplier = output_chs // input_chs

		if asymmetric:
			assert output_chs % input_chs == 0, 'asymmetric conv should only be used for grouped depthwise.'
			# define filters
			tfv_filter_1 = tf.get_variable('var_filter_1', [filter_shape[0], 1] + [input_chs, 1], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
			tfv_filter_2 = tf.get_variable('var_filter_2', [1, filter_shape[-1]] + [input_chs, channel_multiplier], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)

			# first convolution and biases
			output = tf.nn.depthwise_conv2d(input, tfv_filter_1, [1, 1, 1, 1], 'SAME', name = 'output_conv_1')
			if biases_initializer is not None:
				tfv_biases_1 = tf.get_variable('var_biases_1', [input_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases_1, name = 'output_1')

			# add first activation
			if tfv_train_phase is not None:
				output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_1')
			output = tf.nn.relu(output, name = 'relu_1')

			# second convolution and biases
			output = tf.nn.depthwise_conv2d(output, tfv_filter_2, [1] + strides + [1], 'SAME', name = 'output_conv_2')
			if biases_initializer is not None:
				tfv_biases_2 = tf.get_variable('var_biases_2', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases_2, name = 'output_2')
		else:
			# define filters
			tfv_filter = tf.get_variable('var_filter', filter_shape + [input_chs, channel_multiplier], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)

			# convolution and biases
			output = tf.nn.depthwise_conv2d(input, tfv_filter, [1] + strides + [1], 'SAME', name = 'output_conv')
			if biases_initializer is not None:
				tfv_biases = tf.get_variable('var_biases', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				output = tf.add(output, tfv_biases, name = 'output')

		# add (second) activation
		if tfv_train_phase is not None:
			output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm')
		output = tf.nn.relu(output, name = 'relu')

	return output


def resnekt_bottleneck(input,
					   middle_chs_1,
					   middle_chs_2,
					   output_chs,
					   filter_initializer = tf.initializers.he_normal(),
					   filter_regularizer = None,
					   biases_initializer = tf.zeros_initializer,
					   biases_regularizer = None,
					   tfv_train_phase = None,
					   downsample_first = True,
					   act_last = False,
					   name_scope = None):
	""" resnekt bottleneck v1 like resnetv2, i.e., activation first before convolution
	params:
		input: input tensor, 4th-order - [batch_size, input_height, input_width, input_chs], where input_chs is the hidden number of input channels
		middle_chs_1: number of bottleneck channels at the input side
		middle_chs_2: number of bottleneck channels at the output side
		output_chs: number of output channels
		filter_initializer:
		filter_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		downsample_first: whether let the bottleneck downsample feature maps
		act_last: whether use activation for the output of last layer, since other bottlenecks in actual output without activation which is placed in the next bottleneck
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		input_chs = input.get_shape()[-1].value

		# identical or linear projection, the latter should come after the input activation
		shortcut = input
		if tfv_train_phase is not None:
			input = batch_normalization(input, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_input')
		input = tf.nn.relu(input, name = 'relu_input')
		if input_chs != output_chs:
			tfv_filter_input = tf.get_variable('var_filter_input', [1, 1] + [input_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
			if downsample_first:
				shortcut = tf.nn.conv2d(input, tfv_filter_input, [1, 2, 2, 1], 'SAME', name = 'shortcut')
			else:
				shortcut = tf.nn.conv2d(input, tfv_filter_input, [1, 1, 1, 1], 'SAME', name = 'shortcut')

		# the first 1x1 conv and the followed activation
		output = conv_2d(input, middle_chs_1, [1, 1], tfv_train_phase = tfv_train_phase, name_scope = 'conv_1x1')
		
		# the second 3x3 depthwise conv and the followed activation
		if input_chs == output_chs or downsample_first is False:
			output = dw_conv_2d(output, middle_chs_2, [3, 3], asymmetric = True, tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3')
		else:
			output = dw_conv_2d(output, middle_chs_2, [3, 3], [2, 2], asymmetric = True, tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3')

		# filters, the last 1x1 conv
		tfv_filter_ouput = tf.get_variable('var_filter_output', [1, 1] + [middle_chs_2, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
		output = tf.nn.conv2d(output, tfv_filter_ouput, [1, 1, 1, 1], 'SAME', name = 'conv_output')
		if biases_initializer is not None:
			tfv_biases_output = tf.get_variable('var_biases_output', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases_output, name = 'output')

		# residual connection
		output = tf.add(shortcut, output, name = 'output_final')

		# the last bottleneck should have activation to output
		if act_last:
			if tfv_train_phase is not None:
				output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_final')
			output = tf.nn.relu(output, name = 'relu_final')
		
	return output


def maxpool_2d(input,
			   ksize,
			   stride,
			   padding = 'SAME',
			   name_scope = None):
	""" 2D max pooling, stride is 2 in general, then ksize is 1 larger than stride
	params:
		input: input tensor, 4th-order - [batch_size, input_height, input_width, input_chs]
		ksize: size of pooling scan window, - [height, width]
		stride: scanning stride of pooling scan window, 2 dimensions, represent the stride along height and width respectively
		padding: 'SAME' or 'VALID', the former means the output has the same size with the input
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		output = tf.nn.max_pool(input, [1] + ksize + [1], [1] + stride + [1], padding, name = 'max_pool_output')
	return output


def avgpool_2d(input,
			   ksize,
			   stride,
			   padding = 'SAME',
			   is_gap = True,
			   name_scope = None):
	""" 2D average pooling, can be used to transform feature maps into vector which is termed as global average pooling
	params:
		input: input tensor, 4th-order - [batch_size, input_height, input_width, input_chs]
		ksize: size of pooling scan window, - [height, width]
		stride: scanning stride of pooling scan window, 2 dimensions, represent the stride along height and width respectively
		padding: 'SAME' or 'VALID', the former means the output has the same size with the input
		is_gap: whether treat this as global average pooling
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		output = tf.nn.avg_pool(input, [1] + ksize + [1], [1] + stride + [1], padding, name = 'avg_pool_output')
		if is_gap:
			sz = np.prod(output.get_shape().as_list()[1:])
			output = tf.reshape(output, [-1, sz])
	return output
