import numpy as np
import tensorflow as tf


def fc(input,
	   output_size,
	   weights_initializer = tf.initializers.he_normal(),
	   weights_regularizer = None,
	   biases_initializer = tf.zeros_initializer,
	   biases_regularizer = None,
	   tfv_train_phase = None,
	   keep_prob = 0.9,
	   act_last = True,
	   name_scope = None):
	""" fully connected layer with dropout, activation function is ReLU and it will be canceled for the last layer to softmax
	params:
		input: input tensor 2nd-order - [batch_size, input_size]
		output_size: output dimension
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		act_last: control whether cancel the activation function for the last layer
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		# define weights and biases
		if (len(input.get_shape()) == 1):
			input = tf.expand_dims(input, 0)
		input_size = input.get_shape()[-1].value
		tfv_weights = tf.get_variable('var_weights', [input_size, output_size], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		output = tf.matmul(input, tfv_weights, name = 'output_mal')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')
		
		# add activation and dropout
		if act_last is True:
			output = tf.nn.relu(output, name = 'relu')
			output = tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')

	return output


def fc_kcp(input,
		   output_size,
		   input_modes,
		   output_modes,
		   ktd_rank,
		   cp_ranks,
		   weights_initializer = tf.initializers.he_normal(),
		   weights_regularizer = None,
		   biases_initializer = tf.zeros_initializer,
		   biases_regularizer = None,
		   tfv_train_phase = None,
		   keep_prob = 0.9,
		   act_last = True,
		   name_scope = None):
	""" fully connected layer in KCP format with dropout, activation function is ReLU and it will be canceled for the last layer to softmax
	params:
		input: input tensor 2nd-order - [batch_size, input_size]
		output_size: output dimension
		input_modes: factorization of input_size
		output_modes: factorization of output_size
		ktd_rank: KTD rank
		cp_ranks: totally 2*ktd_rank CP ranks (top ktd_rank ranks along m, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		act_last: control whether cancel the activation function for the last layer
		name_scope:
	"""
	assert input.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_size == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	assert len(input_modes) % 2 == 0, 'The value of orders must be even.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KCP gated matmul defination, new algo, consider only d is even
	def _kcp_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		l_input_modes = input_modes
		l_output_modes = output_modes

		# put the batch_size at the end, i.e., (m_{1}m_{2}..., batch_size)
		cur_inp = tf.transpose(input)

		for i in range(0, d, 2):
			# current input shape is (m_{3}m_{4}, m_{5}m_{6}...*batch_size*n_{1}n_{2}) where m_{3}m_{4} will be contracted this time
			cur_inp = tf.reshape(cur_inp, [l_input_modes[i] * l_input_modes[i + 1], -1])
			l_k_outputs = []

			for k in range(ktd_rank):
				# A_k_i * A_k_(i+1).T and B_k_i * B_k_(i+1).T
				matrix_A = tf.matmul(cores[2 * ktd_rank * i + 2 * k], cores[2 * ktd_rank * (i + 1) + 2 * k], transpose_b = True)
				matrix_B = tf.matmul(cores[2 * ktd_rank * i + 2 * k + 1], cores[2 * ktd_rank * (i + 1) + 2 * k + 1], transpose_b = True)

				# contract the m_{i}m_{i+1}
				matrix_A = tf.reshape(matrix_A, [1, -1])
				out_k = tf.matmul(matrix_A, cur_inp)

				# outer product to add n_{i}n_{i+1}
				matrix_B = tf.reshape(matrix_B, [1, -1])
				out_k = tf.reshape(out_k, [-1, 1])
				out_k = tf.matmul(out_k, matrix_B)

				l_k_outputs.append(out_k)

			output = tf.math.add_n(l_k_outputs)
			if i != d - 2:
				cur_inp = tf.identity(output)

		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])			
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input.shape[0].value

		# define variables of KCP cores
		l_W_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks[k]]
				var_shape_B = [output_modes[i], cp_ranks[k + ktd_rank]]
				W_core_A = tf.get_variable('var_W_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_core_B = tf.get_variable('var_W_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_cores.append(W_core_A)
				l_W_cores.append(W_core_B)

		output = _kcp_gated_matmul(input, l_W_cores, input_modes, output_modes, ktd_rank, cp_ranks[0:ktd_rank], cp_ranks[ktd_rank:], d, batch_size, 'W')

		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')
		
		# add activation and dropout
		if act_last is True:
			output = tf.nn.relu(output, name = 'relu')
			output = tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')

	return output
