import numpy as np
import tensorflow as tf


def _lstmp_cell(input_x,
				input_y,
				input_c,
				state_dim,
				output_dim,
				weights_initializer = tf.glorot_uniform_initializer,
				weights_regularizer = None,
				biases_initializer = tf.zeros_initializer,
				biases_regularizer = None,
				tfv_train_phase = None,
				keep_prob = 0.9,
				name_scope = None):
	""" single LSTMP cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, state_dim], is 0 for the initial time
		state_dim: dimension of the state (as output_dim in LSTM)
		output_dim: dimension of the output (projection from state_dim)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		input_dim = input_x.shape[-1].value

		# define variables of forget gate
		W_f = tf.get_variable('var_weight_forget', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_f = tf.get_variable('var_recurrent_forget', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		W_i = tf.get_variable('var_weight_input', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_i = tf.get_variable('var_recurrent_input', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		W_z = tf.get_variable('var_weight_state', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_z = tf.get_variable('var_recurrent_state', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# define variables of output gate
		W_o = tf.get_variable('var_weight_output', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_o = tf.get_variable('var_recurrent_output', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variable of projection
		P = tf.get_variable('var_weight_project', [state_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

		# calculate forget gate
		output_f = tf.nn.sigmoid(tf.matmul(input_x, W_f) + tf.matmul(input_y, R_f) + input_c * p_f + b_f)
		
		# calculate input gate
		output_i = tf.nn.sigmoid(tf.matmul(input_x, W_i) + tf.matmul(input_y, R_i) + input_c * p_i + b_i)

		# calculate state gate
		output_z = tf.nn.tanh(tf.matmul(input_x, W_z) + tf.matmul(input_y, R_z) + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_o = tf.nn.sigmoid(tf.matmul(input_x, W_o) + tf.matmul(input_y, R_o) + output_c * p_o + b_o)

		# calculate current output
		output_y = tf.matmul(output_o * tf.nn.tanh(output_c), P)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _tt_lstmp_cell(input_x,
				   input_y,
				   input_c,
				   state_dim,
				   output_dim,
				   input_modes,
				   state_modes,
				   output_modes,
				   tt_ranks_W = None,
				   tt_ranks_R = None,
				   weights_initializer = tf.glorot_uniform_initializer,
				   weights_regularizer = None,
				   biases_initializer = tf.zeros_initializer,
				   biases_regularizer = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   name_scope = None):
	""" single LSTMP cell in TT format, refer to _lstmp_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, state_dim], is 0 for the initial time
		state_dim: dimension of the state (as output_dim in LSTM)
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		state_modes: factorization of state_dim
		output_modes: factorization of output_dim
		tt_ranks_W: TT ranks of input matrices
		tt_ranks_R: TT ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	if input_modes is not None:
		assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	if state_modes is not None:
		assert state_dim == np.prod(state_modes), 'State modes must be the factors of state tensor.'
	if output_modes is not None:
		assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	if input_modes is not None and output_modes is not None:
		assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	if state_modes is not None and output_modes is not None:
		assert len(state_modes) == len(output_modes), 'Modes of state and output must be equal.'
	
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# TT gated matmul defination
	def _tt_gated_matmul(input, cores, input_modes, output_modes, tt_ranks, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1}*r_{0}), note that r_{0}=1
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# contraction between input and the ith core, output shape is (batch_size*n_{1}*...*n_{k-1}m_{k+1}...*m_{d}, n_{k}*r_{k})
			output = tf.matmul(cur_inp, cores[i], name = name + '_mal_core_%d' % (i + 1))
			# reshape output to (batch_size*n_{1}*...*n_{k-1}, m_{k+1}, m_{k+2}...*m_{d}, n_{k}, r_{k})
			if i == d - 1:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, output_modes[i], tt_ranks[i + 1]])
			else:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i], tt_ranks[i + 1]])
			# exchange m_{k+1} and n_{k}, reshape output to (batch_size*n_{1}*...*n_{k-1}n_{k}m_{k+2}...*m_{d}, m_{k+1}*r_{k})
			output = tf.transpose(output, [0, 3, 2, 1, 4])
			output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			if i != d - 1:
				cur_inp = tf.identity(output)
		output = tf.reshape(tf.squeeze(output), [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		if tt_ranks_W is not None or tt_ranks_R is not None:
			d = len(state_modes)
		input_dim = input_x.shape[-1].value
		batch_size = input_x.shape[0].value
		if tt_ranks_W is not None:
			l_tt_ranks_W = [1] + tt_ranks_W + [1]
		if tt_ranks_R is not None:
			l_tt_ranks_R = [1] + tt_ranks_R + [1]

		# define variables of forget gate
		if tt_ranks_W is not None:
			l_W_f_cores = []
			for i in range(d):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_shape = [l_tt_ranks_W[i] * input_modes[i], state_modes[i] * l_tt_ranks_W[i + 1]]
				W_f_core = tf.get_variable('var_W_forget_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_f_cores.append(W_f_core)
		else:
			W_f = tf.get_variable('var_weight_forget', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if tt_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				# shape is: (r_{k-1}*n_{k}, n_{k}*r_{k})
				var_shape = [l_tt_ranks_R[i] * output_modes[i], state_modes[i] * l_tt_ranks_R[i + 1]]
				R_f_core = tf.get_variable('var_R_forget_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_f_cores.append(R_f_core)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		if tt_ranks_W is not None:
			l_W_i_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_W[i] * input_modes[i], state_modes[i] * l_tt_ranks_W[i + 1]]
				W_i_core = tf.get_variable('var_W_input_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_i_cores.append(W_i_core)
		else:
			W_i = tf.get_variable('var_weight_input', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if tt_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], state_modes[i] * l_tt_ranks_R[i + 1]]
				R_i_core = tf.get_variable('var_R_input_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_i_cores.append(R_i_core)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		if tt_ranks_W is not None:
			l_W_z_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_W[i] * input_modes[i], state_modes[i] * l_tt_ranks_W[i + 1]]
				W_z_core = tf.get_variable('var_W_state_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core)
		else:
			W_z = tf.get_variable('var_weight_state', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if tt_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], state_modes[i] * l_tt_ranks_R[i + 1]]
				R_z_core = tf.get_variable('var_R_state_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_core)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		if tt_ranks_W is not None:
			l_W_o_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_W[i] * input_modes[i], state_modes[i] * l_tt_ranks_W[i + 1]]
				W_o_core = tf.get_variable('var_W_output_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_o_cores.append(W_o_core)
		else:
			W_o = tf.get_variable('var_weight_output', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if tt_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], state_modes[i] * l_tt_ranks_R[i + 1]]
				R_o_core = tf.get_variable('var_R_output_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_o_cores.append(R_o_core)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# define variable of projection
		P = tf.get_variable('var_weight_project', [state_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

		# calculate forget gate
		if tt_ranks_W is not None:
			output_W_f = _tt_gated_matmul(input_x, l_W_f_cores, input_modes, state_modes, l_tt_ranks_W, d, batch_size, 'W_f')
		else:
			output_W_f = tf.matmul(input_x, W_f, name = 'W_f')
		if tt_ranks_R is not None:
			output_R_f = _tt_gated_matmul(input_y, l_R_f_cores, output_modes, state_modes, l_tt_ranks_R, d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)
		
		# calculate input gate
		if tt_ranks_W is not None:
			output_W_i = _tt_gated_matmul(input_x, l_W_i_cores, input_modes, state_modes, l_tt_ranks_W, d, batch_size, 'W_i')
		else:
			output_W_i = tf.matmul(input_x, W_i, name = 'W_i')
		if tt_ranks_R is not None:
			output_R_i = _tt_gated_matmul(input_y, l_R_i_cores, output_modes, state_modes, l_tt_ranks_R, d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)
		
		# calculate state gate
		if tt_ranks_W is not None:
			output_W_z = _tt_gated_matmul(input_x, l_W_z_cores, input_modes, state_modes, l_tt_ranks_W, d, batch_size, 'W_z')
		else:
			output_W_z = tf.matmul(input_x, W_z, name = 'W_z')
		if tt_ranks_R is not None:
			output_R_z = _tt_gated_matmul(input_y, l_R_z_cores, output_modes, state_modes, l_tt_ranks_R, d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)
		
		# calculate current state
		output_c = output_f * input_c + output_i * output_z
		
		# calculate output gate
		if tt_ranks_W is not None:
			output_W_o = _tt_gated_matmul(input_x, l_W_o_cores, input_modes, state_modes, l_tt_ranks_W, d, batch_size, 'W_o')
		else:
			output_W_o = tf.matmul(input_x, W_o, name = 'W_o')
		if tt_ranks_R is not None:
			output_R_o = _tt_gated_matmul(input_y, l_R_o_cores, output_modes, state_modes, l_tt_ranks_R, d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)
		
		# calculate current output
		output_y = tf.matmul(output_o * tf.nn.tanh(output_c), P)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _ktd_lstmp_cell(input_x,
					input_y,
					input_c,
					state_dim,
					output_dim,
					input_modes,
					state_modes,
					output_modes,
					ktd_rank,
					cp_ranks_W = None,
					cp_ranks_R = None,
					weights_initializer = tf.glorot_uniform_initializer,
					weights_regularizer = None,
					biases_initializer = tf.zeros_initializer,
					biases_regularizer = None,
					tfv_train_phase = None,
					keep_prob = 0.9,
					name_scope = None):
	""" single LSTMP cell in KTD format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, state_dim], is 0 for the initial time
		state_dim: dimension of the state (as output_dim in LSTM)
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		state_modes: factorization of state_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	if input_modes is not None:
		assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
		assert len(input_modes) % 2 == 0, 'Dimension must be even.'
	if state_modes is not None:
		assert state_dim == np.prod(state_modes), 'State modes must be the factors of state tensor.'
	if output_modes is not None:
		assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
		assert len(output_modes) % 2 == 0, 'Dimension must be even.'
	if input_modes is not None and output_modes is not None:
		assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	if state_modes is not None and output_modes is not None:
		assert len(state_modes) == len(output_modes), 'Modes of state and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KTD gated matmul defination
	def _ktd_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		def __kron(a,b):
			[am,an]=a.shape.as_list()
			[bm,bn]=b.shape.as_list()
			a = tf.reshape(a,[am,1,an,1])
			b = tf.reshape(b,[1,bm,1,bn])
			K = tf.reshape(tf.multiply(a,b),[am*bm,an*bn])
			return K

		l_Ws = []
		for i in range(0,d,2):
			# ith CP core (m_{i}*n_{i}, r^{A}*r^{B})
			l_matrices_1 = []
			for k in range(ktd_rank):
				l_matrices_1.append(__kron(cores[2 * ktd_rank * i + 2 * k], cores[2 * ktd_rank * i + 2 * k + 1]))
			W_1 = tf.concat(l_matrices_1, axis = -1)

			# (i+1)th CP core
			l_matrices_2 = []
			for k in range(ktd_rank):
				l_matrices_2.append(__kron(cores[2 * ktd_rank * (i + 1) + 2 * k], cores[2 * ktd_rank * (i + 1) + 2 * k + 1]))
			W_2 = tf.concat(l_matrices_2, axis = -1)

			# m*n, m*n --> m*m, n*n
			W_pair = tf.matmul(W_1, tf.transpose(W_2))
			m_1 = cores[2 * ktd_rank * i + 2 * k].shape[0]
			n_1 = cores[2 * ktd_rank * i + 2 * k + 1].shape[0]
			m_2 = cores[2 * ktd_rank * (i + 1) + 2 * k].shape[0]
			n_2 = cores[2 * ktd_rank * (i + 1) + 2 * k + 1].shape[0]
			W_pair = tf.reshape(W_pair, [m_1, n_1, m_2, n_2])
			W_pair = tf.transpose(W_pair, [0, 2, 1, 3])
			W_pair = tf.reshape(W_pair, [m_1 * m_2, n_1 * n_2])
			l_Ws.append(W_pair)

		cur_inp = tf.reshape(input, [batch_size, input_modes[0]*input_modes[1], -1])
		cur_inp = tf.einsum('bxy,xi->biy', cur_inp, l_Ws[0])
		output = tf.einsum('biy,yj->bij', cur_inp, l_Ws[1])
		return tf.reshape(output, [batch_size, -1])

	with tf.variable_scope(name_scope):
		if cp_ranks_W is not None or cp_ranks_R is not None:
			d = len(state_modes)
		input_dim = input_x.shape[-1].value
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		if cp_ranks_W is not None:
			l_W_f_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [input_modes[i], cp_ranks_W[k]]
					var_shape_B = [state_modes[i], cp_ranks_W[k + ktd_rank]]
					W_f_core_A = tf.get_variable('var_W_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_f_core_B = tf.get_variable('var_W_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_W_f_cores.append(W_f_core_A)
					l_W_f_cores.append(W_f_core_B)
		else:
			W_f = tf.get_variable('var_weight_forget', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if cp_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [state_modes[i], cp_ranks_R[k + ktd_rank]]
					R_f_core_A = tf.get_variable('var_R_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_f_core_B = tf.get_variable('var_R_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_f_cores.append(R_f_core_A)
					l_R_f_cores.append(R_f_core_B)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		if cp_ranks_W is not None:
			l_W_i_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [input_modes[i], cp_ranks_W[k]]
					var_shape_B = [state_modes[i], cp_ranks_W[k + ktd_rank]]
					W_i_core_A = tf.get_variable('var_W_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_i_core_B = tf.get_variable('var_W_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_W_i_cores.append(W_i_core_A)
					l_W_i_cores.append(W_i_core_B)
		else:
			W_i = tf.get_variable('var_weight_input', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if cp_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [state_modes[i], cp_ranks_R[k + ktd_rank]]
					R_i_core_A = tf.get_variable('var_R_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_i_core_B = tf.get_variable('var_R_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_i_cores.append(R_i_core_A)
					l_R_i_cores.append(R_i_core_B)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		if cp_ranks_W is not None:
			l_W_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [input_modes[i], cp_ranks_W[k]]
					var_shape_B = [state_modes[i], cp_ranks_W[k + ktd_rank]]
					W_z_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_z_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_W_z_cores.append(W_z_core_A)
					l_W_z_cores.append(W_z_core_B)
		else:
			W_z = tf.get_variable('var_weight_state', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [state_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_z_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		if cp_ranks_W is not None:
			l_W_o_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [input_modes[i], cp_ranks_W[k]]
					var_shape_B = [state_modes[i], cp_ranks_W[k + ktd_rank]]
					W_o_core_A = tf.get_variable('var_W_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_o_core_B = tf.get_variable('var_W_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_W_o_cores.append(W_o_core_A)
					l_W_o_cores.append(W_o_core_B)
		else:
			W_o = tf.get_variable('var_weight_output', [input_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		if cp_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [state_modes[i], cp_ranks_R[k + ktd_rank]]
					R_o_core_A = tf.get_variable('var_R_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_o_core_B = tf.get_variable('var_R_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_o_cores.append(R_o_core_A)
					l_R_o_cores.append(R_o_core_B)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, state_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [state_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variable of projection
		P = tf.get_variable('var_weight_project', [state_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		
		# calculate forget gate
		if cp_ranks_W is not None:
			output_W_f = _ktd_gated_matmul(input_x, l_W_f_cores, input_modes, state_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_f')
		else:
			output_W_f = tf.matmul(input_x, W_f, name = 'W_f')
		if cp_ranks_R is not None:
			output_R_f = _ktd_gated_matmul(input_y, l_R_f_cores, output_modes, state_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		if cp_ranks_W is not None:
			output_W_i = _ktd_gated_matmul(input_x, l_W_i_cores, input_modes, state_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_i')
		else:
			output_W_i = tf.matmul(input_x, W_i, name = 'W_i')
		if cp_ranks_R is not None:
			output_R_i = _ktd_gated_matmul(input_y, l_R_i_cores, output_modes, state_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		if cp_ranks_W is not None:
			output_W_z = _ktd_gated_matmul(input_x, l_W_z_cores, input_modes, state_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		else:
			output_W_z = tf.matmul(input_x, W_z, name = 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _ktd_gated_matmul(input_y, l_R_z_cores, output_modes, state_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		if cp_ranks_W is not None:
			output_W_o = _ktd_gated_matmul(input_x, l_W_o_cores, input_modes, state_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_o')
		else:
			output_W_o = tf.matmul(input_x, W_o, name = 'W_o')
		if cp_ranks_R is not None:
			output_R_o = _ktd_gated_matmul(input_y, l_R_o_cores, output_modes, state_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = tf.matmul(output_o * tf.nn.tanh(output_c), P)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def lstmp_layer(input_seq,
				hidden_dim,
				output_dim,
				tfv_train_phase = None,
				keep_prob = 0.9,
				initializer = tf.glorot_uniform_initializer,
				reverse = False,
				name_scope = None):
	""" LSTMP layer, there are num_seq (the last dim of input) LSTMP cells (_lstmp_cell) in a LSTMP layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension (not final) of weight matrix in LSTM unit(input dimension is input_dim)
		output_dim: dimension of the final output after projection
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, output_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _lstmp_cell(cur_x, cur_y, cur_c, hidden_dim, output_dim, weights_initializer = initializer, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)


def tt_lstmp_layer(input_seq,
				   hidden_dim,
				   output_dim,
				   input_modes,
				   hidden_modes,
				   output_modes,
				   tt_ranks_W = None,
				   tt_ranks_R = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   flag_share = False,
				   initializer = tf.glorot_uniform_initializer,
				   reverse = False,
				   name_scope = None):
	""" LSTMP layer in TT format, refer to lstmp_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension (not final) of weight matrix in LSTM unit(input dimension is input_dim)
		output_dim: dimension of the final output after projection
		input_modes: factorization of input_dim
		hidden_modes: factorization of hidden_dim
		output_modes: factorization of output_dim
		tt_ranks_W: TT ranks of input matrices
		tt_ranks_R: TT ranks of recurrent matrices
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, output_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _tt_lstmp_cell(cur_x, cur_y, cur_c, hidden_dim, output_dim, input_modes, hidden_modes, output_modes, tt_ranks_W, tt_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'tt_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)


def ktd_lstmp_layer(input_seq,
					hidden_dim,
					output_dim,
					input_modes,
					hidden_modes,
					output_modes,
					ktd_rank,
					cp_ranks_W = None,
					cp_ranks_R = None,
					tfv_train_phase = None,
					keep_prob = 0.9,
					flag_share = False,
					initializer = tf.glorot_uniform_initializer,
					reverse = False,
					name_scope = None):
	""" LSTMP layer in KTD format, refer to lstmp_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension (not final) of weight matrix in LSTM unit(input dimension is input_dim)
		output_dim: dimension of the final output after projection
		input_modes: factorization of input_dim
		hidden_modes: factorization of hidden_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, output_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _ktd_lstmp_cell(cur_x, cur_y, cur_c, hidden_dim, output_dim, input_modes, hidden_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)
