import math
import numpy as np
import tensorflow as tf
import t3f


def linear(input,
		   output_size,
		   weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
		   weights_regularizer = None,
		   biases_initializer = tf.zeros_initializer,
		   biases_regularizer = None,
		   name_scope = None):
	""" FC layer
	params:
		input: input tensor - [batch_size, input_size]
		output_size: 
		weights_initializer: 
		weights_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
	with tf.variable_scope(name_scope):
		input_size = input.get_shape()[-1].value
		tfv_weights = tf.get_variable('var_weights', [input_size, output_size], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

		output = tf.matmul(input, tfv_weights, name = 'output_nb')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output')

	return output


def conv_3d(input,
			output_chs,
			filter_shape,
			strides = [1, 1, 1],
			filter_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
			filter_regularizer = None,
			biases_initializer = tf.zeros_initializer,
			biases_regularizer = None,
			name_scope = None):
	""" 3D conv layer
	params:
		input: input tensor - [batch_size, input_depth, input_height, input_width, input_chs]
		output_chs: number of output channels
		filter_shape: shapes of filter cube, [d,h,w]
		strides: conv stride
		filter_initializer: 
		filter_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
	with tf.variable_scope(name_scope):
		input_chs = input.get_shape()[-1].value
		tfv_filter = tf.get_variable('var_filter', filter_shape + [input_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)

		output = tf.nn.conv3d(input, tfv_filter, [1] + strides + [1], 'SAME', name = 'output_nb')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output')
	
	return output


# 3D池化层
def maxpool_3d(input,
			   ksize,
			   stride,
			   name_scope = None):
	""" 3D maxpool layer
	params:
		input: input tensor - [batch_size, input_depth, input_height, input_width, input_chs]
		ksize: pooling filter shape - [d、h、w]
		stride: pooling stride
		name_scope: name of this layer
	"""
	with tf.variable_scope(name_scope):
		output = tf.nn.max_pool3d(input, [1] + ksize + [1], [1] + stride + [1], 'SAME', name = 'output')

	return output


def linear_tt(input,
			  output_size,
			  input_modes,
			  output_modes,
			  tt_ranks,
			  weights_regularizer = None,
			  biases_initializer = tf.zeros_initializer,
			  biases_regularizer = None,
			  name_scope = None):
	""" TT FC layer
	params:
		input: input tensor - [batch_size, input_size]
		output_size: 
		input_modes: product all the input modes must equal to input_size
		output_modes: product all the output modes must equal to output_size
		tt_ranks: TT ranks, the length+1 must equal to the length of input_modes or output_modes
		weights_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
	assert input.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_size == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	assert len(tt_ranks) == len(input_modes) - 1, 'The number of TT ranks must be matching to the tensor modes.'

	with tf.variable_scope(name_scope):
		# TT weights
		tt_initializer = t3f.glorot_initializer([input_modes, output_modes], tt_rank = [1] + tt_ranks + [1])
		tt_weights = t3f.get_variable('tt_weights', initializer = tt_initializer, regularizer = weights_regularizer, trainable = True)

		output = t3f.matmul(input, t3f.renormalize_tt_cores(tt_weights))
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.nn.bias_add(output, tfv_biases, name = 'output')

	return output


def linear_kcp(input,
               output_size,
               input_modes,
               output_modes,
               ktd_rank,
               cp_ranks,
               weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
               weights_regularizer = None,
               biases_initializer = tf.zeros_initializer,
               biases_regularizer = None,
               name_scope = None):
    """ KCP FC Layer
	params:
		input: input tensor - [batch_size, input_size]
		output_size: 
		input_modes: product all the input modes must equal to input_size
		output_modes: product all the output modes must equal to output_size
		ktd_rank: KT rank
        cp_ranks: in total 2*ktd_rank CP ranks
		weights_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
    assert input.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
    assert output_size == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
    assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
    assert len(cp_ranks) == 2 * ktd_rank, 'Input and output ranks must be equal.'
    assert len(input_modes) % 2 == 0, 'd must be even'
    
    with tf.variable_scope(name_scope):
        d = len(input_modes)
        batch_size = input.shape[0].value

		# KCP weights
        l_cores = []
        for i in range(d):
            for k in range(ktd_rank):
                var_shape_A = [input_modes[i], cp_ranks[k]]
                var_shape_B = [output_modes[i], cp_ranks[k + ktd_rank]]
                core_A = tf.get_variable('var_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
                core_B = tf.get_variable('var_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
                l_cores.append(core_A)
                l_cores.append(core_B)

        # inference calculate
        l_input_modes = input_modes + [1]
        l_output_modes = output_modes + [1]
        cur_inp = tf.identity(input)

        for i in range(0, d, 2):
            # current input shape is (batch_size*n_{1}n_{2}, m_{3}m_{4}, m_{5}m_{6}...) where m_{3}m_{4} will be contracted this time
            cur_inp = tf.reshape(cur_inp, [-1, l_input_modes[i] * l_input_modes[i + 1], np.prod(np.array(l_input_modes[i+2:]), dtype=np.int32)])
            l_k_outputs = []

            for k in range(ktd_rank):
                # A_k_i * A_k_(i+1).T and B_k_i * B_k_(i+1).T
                matrix_A = tf.matmul(l_cores[2 * ktd_rank * i + 2 * k], l_cores[2 * ktd_rank * (i + 1) + 2 * k], transpose_b = True)
                matrix_B = tf.matmul(l_cores[2 * ktd_rank * i + 2 * k + 1], l_cores[2 * ktd_rank * (i + 1) + 2 * k + 1], transpose_b = True)
                matrix_A = tf.reshape(matrix_A, [-1, 1])
                matrix_B = tf.reshape(matrix_B, [1, -1])
                matrix_AB = tf.matmul(matrix_A, matrix_B)
                out_k = tf.einsum('bml,mn->bnl', cur_inp, matrix_AB)
                l_k_outputs.append(out_k)

            output = tf.math.add_n(l_k_outputs)
            if i != d - 2:
                cur_inp = tf.identity(output)

        # reshape to (batch_size, n_{1}*...*n_{d})
        output = tf.reshape(output, [batch_size, -1])

        if biases_initializer is not None:
            tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
            output = tf.nn.bias_add(output, tfv_biases, name = 'output')
    
    return output


def conv_3d_tt(input,
			   output_chs,
			   filter_shape,
			   input_ch_modes,
			   output_ch_modes,
			   tt_ranks,
			   strides = [1, 1, 1],
			   filter_regularizer = None,
			   biases_initializer = tf.zeros_initializer,
			   biases_regularizer = None,
			   name_scope = None):
	""" TT 3D conv layer
	params:
		input: input tensor - [batch_size, input_depth, input_height, input_width, input_chs]
		output_chs: number of output channels
		filter_shape: shapes of filter cube, [d,h,w]
		input_ch_modes: modes of input channels, products equal to input_chs
		output_ch_modes: modes of output channels, products equal to output_chs
		tt_ranks: TT ranks，the length+1 for 1x1x1 conv or length for normal conv must equal to the length of input_ch_modes or output_ch_modes
		strides: conv stride
		filter_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
	assert input.get_shape()[-1].value == np.prod(input_ch_modes), 'Input modes must be the factors of the value of input channels.'
	assert output_chs == np.prod(output_ch_modes), 'Output modes must be the factors of the value of output channels.'
	assert len(input_ch_modes) == len(output_ch_modes), 'Modes of input and output channels must be equal.'
	if np.prod(filter_shape) != 1:
		assert len(tt_ranks) == len(input_ch_modes), 'The number of TT ranks must be equal to the input or output modes.'
	if np.prod(filter_shape) == 1:
		assert len(tt_ranks) == len(input_ch_modes) - 1 , 'The number of TT ranks must be matching to the tensor modes for 1x1x1 conv.'

	with tf.variable_scope(name_scope):
		# TT filters, all the modes infilter_shape split to two modes
		filters_size = np.prod(filter_shape)
		filters_sqrt = math.sqrt(filters_size)
		lower = int(filters_sqrt)
		upper = math.ceil(filters_sqrt)
		while (True):
			if filters_size % upper == 0:
				lower = filters_size // upper
				break
			elif filters_size % lower == 0:
				upper = filters_size // lower
				break
			else:
				lower -= 1
				upper += 1
		
		# TT filters, upper first since input_ch_modes are usually smaller
		if upper == 1 and lower == 1 :
			filters_shape = [input_ch_modes, output_ch_modes]
		else:
			filters_shape = [[upper] + input_ch_modes, [lower] + output_ch_modes]
		tt_initializer = t3f.glorot_initializer(filters_shape, tt_rank = [1] + tt_ranks + [1])
		tt_filters = t3f.get_variable('tt_filters', initializer = tt_initializer, regularizer = filter_regularizer, trainable = True)

		# tt_filters to tf.Tensor，upperc_1c_2...c_d * lowers_1s_2...s_d
		identity_matrix = tf.eye(np.prod(filters_shape[0]))
		filters = t3f.matmul(identity_matrix, t3f.renormalize_tt_cores(tt_filters))

		# reshape to upper * c_1 * c_2 *...* c_d * lower * s_1 * s_2 *...* s_d
		filters = tf.reshape(filters, [upper] + input_ch_modes + [lower] + output_ch_modes)

		# transpose to upper * lower * c_1 * c_2 *...* c_d * s_1 * s_2 *...* s_d
		inch_orders = []
		outch_orders = []
		d = len(input_ch_modes)
		for i in range(d):
			inch_orders.append(1 + i)
			outch_orders.append(2 + d + i)
		filters = tf.transpose(filters, [0, d + 1] + inch_orders + outch_orders)

		# reshape to upperlower * c_1c_2...c_d * s_1s_2...s_d
		input_chs = np.prod(input_ch_modes)
		filters = tf.reshape(filters, [upper * lower, input_chs, output_chs])

		# reshape to filters with normal conv shape, d * h * w * c_1c_2...c_d * s_1s_2...s_d
		filters = tf.reshape(filters, filter_shape + [input_chs] + [output_chs])

		output = tf.nn.conv3d(input, filters, [1] + strides + [1], 'SAME', name = 'output_nb')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output')

	return output


def conv_3d_kcp(input,
				output_chs,
				filter_shape,
				kcp_r_a,
				kcp_r_b,
				tfv_train_phase,
				strides = [1,1,1],
				downsample = False,
				filter_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
				filter_regularizer = None,
				biases_initializer = tf.zeros_initializer,
				biases_regularizer = None,
				name_scope = None):
	""" KCP mapping 3D conv layer
	params:
		input: input tensor - [batch_size, input_depth, input_height, input_width, input_chs]
		output_chs: number of output channels
		filter_shape: shapes of filter cube, [d,h,w]
		kcp_r_a: KCP rank of A
		kcp_r_b: KCP rank of B
		tfv_train_phase: 
		strides: conv stride
		downsample: whether downsample within
		filter_initializer: 
		filter_regularizer: 
		biases_initializer: 
		biases_regularizer: 
		name_scope: name of this layer
	"""
	with tf.variable_scope(name_scope):
		input_chs = input.get_shape()[-1].value
		input_depth = input.get_shape()[1].value
		input_height = input.get_shape()[2].value
		input_width = input.get_shape()[3].value

		# linear project
		shortcut = input
		if input_chs != output_chs:
			tfv_filter_input = tf.get_variable('var_filter_input', [1, 1, 1] + [input_chs, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
			if downsample:
				shortcut = tf.nn.conv3d(input, tfv_filter_input, [1, 2, 2, 2, 1], 'SAME', name = 'shortcut')
			else:
				shortcut = tf.nn.conv3d(input, tfv_filter_input, [1, 1, 1, 1, 1], 'SAME', name = 'shortcut')

		# layer 1, the 1st 1x1x1 conv
		tfv_filter_1 = tf.get_variable('var_filter_1', [1, 1, 1] + [input_chs, kcp_r_a], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
		if downsample:
			output = tf.nn.conv3d(input, tfv_filter_1, [1] + [2, 2, 2] + [1], 'SAME', name = 'output_conv_1')
		else:
			output = tf.nn.conv3d(input, tfv_filter_1, [1] + [1, 1, 1] + [1], 'SAME', name = 'output_conv_1')
		if biases_initializer is not None:
			tfv_biases_1 = tf.get_variable('var_biases_1', [kcp_r_a], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases_1, name = 'output_1')
		if tfv_train_phase is not None:
			output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_1')
		output = tf.nn.relu(output, name = 'relu_1')

		# layer 2, the 1st depthwise conv, input change to WxHxTChn, use 2d depthwise
		output = tf.transpose(output, [0,2,3,1,4])
		output = tf.reshape(output, [-1, input_height, input_width, input_depth*kcp_r_a])
		tfv_filter_2 = tf.get_variable('var_filter_2', [3, 3] + [input_depth*kcp_r_a, 1], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
		output = tf.nn.depthwise_conv2d(output, tfv_filter_2, [1, 1, 1, 1], 'SAME', name = 'output_conv_2')
		output = tf.reshape(output, [-1, input_height, input_width, input_depth, kcp_r_a])
		output = tf.transpose(output, [0,3,1,2,4])
		if biases_initializer is not None:
			tfv_biases_2 = tf.get_variable('var_biases_2', [kcp_r_a], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases_2, name = 'output_2')
		if tfv_train_phase is not None:
			output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_2')
		output = tf.nn.relu(output, name = 'relu_2')

		# layer 3, the 2nd depthwise conv, input change to WHxTxChn, use 2d depthwise
		output = tf.reshape(output, [-1, input_depth, input_height*input_width, kcp_r_a])
		tfv_filter_3 = tf.get_variable('var_filter_3', [3, 1] + [kcp_r_a, kcp_r_b // kcp_r_a], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
		if downsample:
			output = tf.nn.depthwise_conv2d(output, tfv_filter_3, [1, 2, 2, 1], 'SAME', name = 'output_conv_3')
		else:
			output = tf.nn.depthwise_conv2d(output, tfv_filter_3, [1, 1, 1, 1], 'SAME', name = 'output_conv_3')
		output = tf.reshape(output, [-1, input_depth, input_height, input_width, kcp_r_b])
		if biases_initializer is not None:
			tfv_biases_3 = tf.get_variable('var_biases_3', [kcp_r_b], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases_3, name = 'output_3')
		if tfv_train_phase is not None:
			output = batch_normalization(output, tfv_train_phase, ema_decay = 0.997, name_scope = 'batch_norm_3')
		output = tf.nn.relu(output, name = 'relu_3')

		# layer 4, the 2nd 1x1x1 conv
		tfv_filter_4 = tf.get_variable('var_filter_4', [1, 1, 1] + [kcp_r_b, output_chs], initializer = filter_initializer, regularizer = filter_regularizer, trainable = True)
		output = tf.nn.conv3d(output, tfv_filter_4, [1, 1, 1, 1, 1], 'SAME', name = 'output_conv_4')
		if biases_initializer is not None:
			tfv_biases_4 = tf.get_variable('var_biases_4', [output_chs], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases_4, name = 'output_4')

		# residual
		output = tf.add(shortcut, output, name = 'output_final')
		
	return output


def batch_normalization(input,
						tfv_train_phase,
						ema_decay = 0.99,
                        eps = 1e-3,
                        use_scale = True,
                        use_shift = True,
                        name_scope = None):
	""" BN, generally before ReLU, no SeLU any more
	params:
		input: 
		tfv_train_phase: 
		ema_decay: 
		eps: void to divide 0
		use_scale: whether use scale param (gamma)
		use_shift: whether use shift param (beta)
		name_scope: name of this layer
	"""
	reuse = tf.get_variable_scope().reuse
	with tf.variable_scope(name_scope):
		shape = input.get_shape().as_list()
		n_out = shape[-1]

		# mean and std
		if len(shape) == 2:
			batch_mean, batch_variance = tf.nn.moments(input, [0], name = 'moments')
		else:
			batch_mean, batch_variance = tf.nn.moments(input, [0, 1, 2, 3], name = 'moments')
		ema = tf.train.ExponentialMovingAverage(decay = ema_decay, zero_debias = True)
		if not reuse or reuse == tf.AUTO_REUSE:
			def mean_variance_with_update():
				with tf.control_dependencies([ema.apply([batch_mean, batch_variance])]):
					return (tf.identity(batch_mean), tf.identity(batch_variance))
			mean, variance = tf.cond(tfv_train_phase, mean_variance_with_update, lambda: (ema.average(batch_mean), ema.average(batch_variance)))
		else:
			vars = tf.get_variable_scope().global_variables()
			transform = lambda s: '/'.join(s.split('/')[-5:])
			mean_name = transform(ema.average_name(batch_mean))
			variance_name = transform(ema.average_name(batch_variance))
			existed = {}
			for v in vars:
				if (transform(v.op.name) == mean_name):
					existed['mean'] = v
				if (transform(v.op.name) == variance_name):
					existed['variance'] = v
			mean, variance = tf.cond(tfv_train_phase, lambda: (batch_mean, batch_variance), lambda: (existed['mean'], existed['variance']))

		# normalization
		std = tf.sqrt(variance + eps, name = 'std')
		output = (input - mean) / std

		# product gamma
		if use_scale:
			weights = tf.get_variable('weights', [n_out], initializer = tf.ones_initializer, trainable = True)
			output = tf.multiply(output, weights)

		# add beta
		if use_shift:
			biases = tf.get_variable('biases', [n_out], initializer = tf.zeros_initializer, trainable = True)
			output = tf.add(output, biases)

	return output
