import numpy as np
import tensorflow as tf


def _lstm_cell(input_x,
			   input_y,
			   input_c,
			   output_dim,
			   weights_initializer = tf.glorot_uniform_initializer,
			   weights_regularizer = None,
			   biases_initializer = tf.zeros_initializer,
			   biases_regularizer = None,
			   tfv_train_phase = None,
			   keep_prob = 0.9,
			   name_scope = None):
	""" single LSTM cell, based on 'LSTM: A Search Space Odyssey', should NOT be referenced outside this script
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		input_dim = input_x.shape[-1].value

		# define variables of forget gate
		W_f = tf.get_variable('var_weight_forget', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		W_i = tf.get_variable('var_weight_input', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		W_z = tf.get_variable('var_weight_state', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# define variables of output gate
		W_o = tf.get_variable('var_weight_output', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate forget gate
		output_f = tf.nn.sigmoid(tf.matmul(input_x, W_f) + tf.matmul(input_y, R_f) + input_c * p_f + b_f)
		
		# calculate input gate
		output_i = tf.nn.sigmoid(tf.matmul(input_x, W_i) + tf.matmul(input_y, R_i) + input_c * p_i + b_i)

		# calculate state gate
		output_z = tf.nn.tanh(tf.matmul(input_x, W_z) + tf.matmul(input_y, R_z) + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_o = tf.nn.sigmoid(tf.matmul(input_x, W_o) + tf.matmul(input_y, R_o) + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')

def lstm_layer(input_seq,
			   hidden_dim,
			   tfv_train_phase = None,
			   keep_prob = 0.9,
			   initializer = tf.glorot_uniform_initializer,
			   reverse = False,
			   name_scope = None):
	""" LSTM layer, there are num_seq (the last dim of input) LSTM cells (_lstm_cell) in a LSTM layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _lstm_cell(cur_x, cur_y, cur_c, hidden_dim, weights_initializer = initializer, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)


def _kcp_lstm_cell(input_x,
				   input_y,
				   input_c,
				   output_dim,
				   input_modes,
				   output_modes,
				   ktd_rank,
				   cp_ranks_W,
				   cp_ranks_R = None,
				   weights_initializer = tf.glorot_uniform_initializer,
				   weights_regularizer = None,
				   biases_initializer = tf.zeros_initializer,
				   biases_regularizer = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   name_scope = None):
	""" single LSTM cell in KCP format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	assert len(input_modes) % 2 == 0, 'The value of orders must be even.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KCP gated matmul defination, new algo, consider only d is even
	def _kcp_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		l_input_modes = input_modes
		l_output_modes = output_modes

		# put the batch_size at the end, i.e., (m_{1}m_{2}..., batch_size)
		cur_inp = tf.transpose(input)

		for i in range(0, d, 2):
			# current input shape is (m_{3}m_{4}, m_{5}m_{6}...*batch_size*n_{1}n_{2}) where m_{3}m_{4} will be contracted this time
			cur_inp = tf.reshape(cur_inp, [l_input_modes[i] * l_input_modes[i + 1], -1])
			l_k_outputs = []

			for k in range(ktd_rank):
				# A_k_i * A_k_(i+1).T and B_k_i * B_k_(i+1).T
				matrix_A = tf.matmul(cores[2 * ktd_rank * i + 2 * k], cores[2 * ktd_rank * (i + 1) + 2 * k], transpose_b = True)
				matrix_B = tf.matmul(cores[2 * ktd_rank * i + 2 * k + 1], cores[2 * ktd_rank * (i + 1) + 2 * k + 1], transpose_b = True)

				# contract the m_{i}m_{i+1}
				matrix_A = tf.reshape(matrix_A, [1, -1])
				out_k = tf.matmul(matrix_A, cur_inp)

				# outer product to add n_{i}n_{i+1}
				matrix_B = tf.reshape(matrix_B, [1, -1])
				out_k = tf.reshape(out_k, [-1, 1])
				out_k = tf.matmul(out_k, matrix_B)

				l_k_outputs.append(out_k)

			output = tf.math.add_n(l_k_outputs)
			if i != d - 2:
				cur_inp = tf.identity(output)

		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])			
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_f_core_A = tf.get_variable('var_W_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_f_core_B = tf.get_variable('var_W_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_f_cores.append(W_f_core_A)
				l_W_f_cores.append(W_f_core_B)
		if cp_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_f_core_A = tf.get_variable('var_R_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_f_core_B = tf.get_variable('var_R_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_f_cores.append(R_f_core_A)
					l_R_f_cores.append(R_f_core_B)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_i_core_A = tf.get_variable('var_W_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_i_core_B = tf.get_variable('var_W_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_i_cores.append(W_i_core_A)
				l_W_i_cores.append(W_i_core_B)
		if cp_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_i_core_A = tf.get_variable('var_R_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_i_core_B = tf.get_variable('var_R_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_i_cores.append(R_i_core_A)
					l_R_i_cores.append(R_i_core_B)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_z_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_z_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core_A)
				l_W_z_cores.append(W_z_core_B)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_z_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_o_core_A = tf.get_variable('var_W_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_o_core_B = tf.get_variable('var_W_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_o_cores.append(W_o_core_A)
				l_W_o_cores.append(W_o_core_B)
		if cp_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_o_core_A = tf.get_variable('var_R_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_o_core_B = tf.get_variable('var_R_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_o_cores.append(R_o_core_A)
					l_R_o_cores.append(R_o_core_B)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate forget gate
		output_W_f = _kcp_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_f')
		if cp_ranks_R is not None:
			output_R_f = _kcp_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = _kcp_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_i')
		if cp_ranks_R is not None:
			output_R_i = _kcp_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = _kcp_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _kcp_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = _kcp_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_o')
		if cp_ranks_R is not None:
			output_R_o = _kcp_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')

def kcp_lstm_layer(input_seq,
				   hidden_dim,
				   input_modes,
				   output_modes,
				   ktd_rank,
				   cp_ranks_W,
				   cp_ranks_R = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   initializer = tf.glorot_uniform_initializer,
				   reverse = False,
				   name_scope = None):
	""" LSTM layer in KCP format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _kcp_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)
