import numpy as np
import tensorflow as tf


def _lstm_cell(input_x,
			   input_y,
			   input_c,
			   output_dim,
			   weights_initializer = tf.glorot_uniform_initializer,
			   weights_regularizer = None,
			   biases_initializer = tf.zeros_initializer,
			   biases_regularizer = None,
			   tfv_train_phase = None,
			   keep_prob = 0.9,
			   name_scope = None):
	""" single LSTM cell, based on 'LSTM: A Search Space Odyssey', should NOT be referenced outside this script
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		input_dim = input_x.shape[-1].value

		# define variables of forget gate
		W_f = tf.get_variable('var_weight_forget', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		W_i = tf.get_variable('var_weight_input', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		W_z = tf.get_variable('var_weight_state', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# define variables of output gate
		W_o = tf.get_variable('var_weight_output', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate forget gate
		output_f = tf.nn.sigmoid(tf.matmul(input_x, W_f) + tf.matmul(input_y, R_f) + input_c * p_f + b_f)
		
		# calculate input gate
		output_i = tf.nn.sigmoid(tf.matmul(input_x, W_i) + tf.matmul(input_y, R_i) + input_c * p_i + b_i)

		# calculate state gate
		output_z = tf.nn.tanh(tf.matmul(input_x, W_z) + tf.matmul(input_y, R_z) + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_o = tf.nn.sigmoid(tf.matmul(input_x, W_o) + tf.matmul(input_y, R_o) + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _tt_lstm_cell(input_x,
				  input_y,
				  input_c,
				  output_dim,
				  input_modes,
				  output_modes,
				  tt_ranks_W,
				  tt_ranks_R = None,
				  weights_initializer = tf.glorot_uniform_initializer,
				  weights_regularizer = None,
				  biases_initializer = tf.zeros_initializer,
				  biases_regularizer = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  name_scope = None):
	""" single LSTM cell in TT format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tt_ranks_W: TT ranks of input matrices
		tt_ranks_R: TT ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# TT gated matmul defination
	def _tt_gated_matmul(input, cores, input_modes, output_modes, tt_ranks, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1}*r_{0}), note that r_{0}=1
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# contraction between input and the ith core, output shape is (batch_size*n_{1}*...*n_{k-1}m_{k+1}...*m_{d}, n_{k}*r_{k})
			output = tf.matmul(cur_inp, cores[i], name = name + '_mal_core_%d' % (i + 1))
			# reshape output to (batch_size*n_{1}*...*n_{k-1}, m_{k+1}, m_{k+2}...*m_{d}, n_{k}, r_{k})
			if i == d - 1:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, output_modes[i], tt_ranks[i + 1]])
			else:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i], tt_ranks[i + 1]])
			# exchange m_{k+1} and n_{k}, reshape output to (batch_size*n_{1}*...*n_{k-1}n_{k}m_{k+2}...*m_{d}, m_{k+1}*r_{k})
			output = tf.transpose(output, [0, 3, 2, 1, 4])
			output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			if i != d - 1:
				cur_inp = tf.identity(output)
		output = tf.reshape(tf.squeeze(output), [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value
		l_tt_ranks_W = [1] + tt_ranks_W + [1]
		if tt_ranks_R is not None:
			l_tt_ranks_R = [1] + tt_ranks_R + [1]

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			W_f_core = tf.get_variable('var_W_forget_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_f_cores.append(W_f_core)
		if tt_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				# shape is: (r_{k-1}*n_{k}, n_{k}*r_{k})
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_f_core = tf.get_variable('var_R_forget_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_f_cores.append(R_f_core)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			W_i_core = tf.get_variable('var_W_input_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_i_cores.append(W_i_core)
		if tt_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_i_core = tf.get_variable('var_R_input_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_i_cores.append(R_i_core)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			W_z_core = tf.get_variable('var_W_state_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_z_cores.append(W_z_core)
		if tt_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_z_core = tf.get_variable('var_R_state_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_core)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			W_o_core = tf.get_variable('var_W_output_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_o_cores.append(W_o_core)
		if tt_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_o_core = tf.get_variable('var_R_output_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_o_cores.append(R_o_core)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# calculate forget gate
		output_W_f = _tt_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size, 'W_f')
		if tt_ranks_R is not None:
			output_R_f = _tt_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)
		
		# calculate input gate
		output_W_i = _tt_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size, 'W_i')
		if tt_ranks_R is not None:
			output_R_i = _tt_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)
		
		# calculate state gate
		output_W_z = _tt_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size, 'W_z')
		if tt_ranks_R is not None:
			output_R_z = _tt_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)
		
		# calculate current state
		output_c = output_f * input_c + output_i * output_z
		
		# calculate output gate
		output_W_o = _tt_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size, 'W_o')
		if tt_ranks_R is not None:
			output_R_o = _tt_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)
		
		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _tt_lstm_cell_share(input_x,
						input_y,
						input_c,
						output_dim,
						input_modes,
						output_modes,
						tt_ranks_W,
						tt_ranks_R=None,
						weights_initializer=tf.glorot_uniform_initializer,
						weights_regularizer=None,
						biases_initializer=tf.zeros_initializer,
						biases_regularizer=None,
						tfv_train_phase=None,
						keep_prob=0.9,
						name_scope=None):
	""" single LSTM cell in TT format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tt_ranks_W: TT ranks of input matrices
		tt_ranks_R: TT ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# TT gated matmul defination
	def tt_gated_matmul(input, cores, input_modes, output_modes, tt_ranks, d, batch_size, name):
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			output = tf.matmul(cur_inp, cores[i], name=name + '_mal_core_%d' % (i + 1))
			if i == d - 1:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32), 1, -1,
											 output_modes[i], tt_ranks[i + 1]])
			else:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32),
											 input_modes[i + 1], -1, output_modes[i], tt_ranks[i + 1]])
			output = tf.transpose(output, [0, 3, 2, 1, 4])
			output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			if i != d - 1:
				cur_inp = tf.identity(output)
		output = tf.reshape(tf.squeeze(output), [batch_size, -1])
		return output

	with tf.variable_scope(name_scope,reuse=tf.AUTO_REUSE):
		d = len(input_modes)
		batch_size = input_x.shape[0].value
		l_tt_ranks_W = [1] + tt_ranks_W + [1]
		if tt_ranks_R is not None:
			l_tt_ranks_R = [1] + tt_ranks_R + [1]

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			if i>0:
				W_f_core = tf.get_variable('var_W_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			else:
				W_f_core = tf.get_variable('var_W_forget_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
									   regularizer=weights_regularizer, trainable=True)
			l_W_f_cores.append(W_f_core)
		if tt_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				# shape is: (r_{k-1}*n_{k}, n_{k}*r_{k})
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_f_core = tf.get_variable('var_R_forget_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
				l_R_f_cores.append(R_f_core)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			if i>0:
				W_i_core = tf.get_variable('var_W_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			else:
				W_i_core = tf.get_variable('var_W_input_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
									   regularizer=weights_regularizer, trainable=True)
			l_W_i_cores.append(W_i_core)
		if tt_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_i_core = tf.get_variable('var_R_input_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
				l_R_i_cores.append(R_i_core)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of state gate
		l_W_z_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			if i>0:
				W_z_core = tf.get_variable('var_W_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			else:
				W_z_core = tf.get_variable('var_W_state_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
									   regularizer=weights_regularizer, trainable=True)
			l_W_z_cores.append(W_z_core)
		if tt_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_z_core = tf.get_variable('var_R_state_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
				l_R_z_cores.append(R_z_core)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			var_shape = [l_tt_ranks_W[i] * input_modes[i], output_modes[i] * l_tt_ranks_W[i + 1]]
			if i>0:
				W_o_core = tf.get_variable('var_W_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			else:
				W_o_core = tf.get_variable('var_W_output_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
									   regularizer=weights_regularizer, trainable=True)
			l_W_o_cores.append(W_o_core)
		if tt_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				var_shape = [l_tt_ranks_R[i] * output_modes[i], output_modes[i] * l_tt_ranks_R[i + 1]]
				R_o_core = tf.get_variable('var_R_output_core_%d' % (i + 1), var_shape, initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
				l_R_o_cores.append(R_o_core)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# calculate forget gate
		output_W_f = tt_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size,
									 'W_f')
		if tt_ranks_R is not None:
			output_R_f = tt_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size,
										 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = tt_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size,
									 'W_i')
		if tt_ranks_R is not None:
			output_R_i = tt_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size,
										 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = tt_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size,
									 'W_z')
		if tt_ranks_R is not None:
			output_R_z = tt_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size,
										 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = tt_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, l_tt_ranks_W, d, batch_size,
									 'W_o')
		if tt_ranks_R is not None:
			output_R_o = tt_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, l_tt_ranks_R, d, batch_size,
										 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob=dropout_rate(keep_prob), name='dropout_y'), tf.nn.dropout(output_c, keep_prob=dropout_rate(keep_prob), name='dropout_c')


def _ht_lstm_cell(input_x,
				  input_y,
				  input_c,
				  output_dim,
				  input_modes,
				  output_modes,
				  ht_ranks_W,
				  ht_ranks_R=None,
				  weights_initializer=tf.glorot_uniform_initializer,
				  weights_regularizer=None,
				  biases_initializer=tf.zeros_initializer,
				  biases_regularizer=None,
				  tfv_train_phase=None,
				  keep_prob=0.9,
				  name_scope=None):
	""" single LSTM cell in HT format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ht_ranks_W: HT ranks of input matrices
		ht_ranks_R: HT ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# Kronecker
	def kron(a, b):
		[am, an] = a.shape.as_list()
		[bm, bn] = b.shape.as_list()
		a = tf.reshape(a, [am, 1, an, 1])
		b = tf.reshape(b, [1, bm, 1, bn])
		K = tf.reshape(tf.multiply(a, b), [am * bm, an * bn])
		return K

	# define var
	def get_var_wrap(name,
					 shape,
					 initializer,
					 regularizer,
					 trainable):

		return tf.get_variable(name,
							   shape=shape,
							   initializer=initializer,
							   regularizer=regularizer,
							   trainable=trainable)

	def ht_gated_matmul(inp,
			  inp_modes,  # n
			  out_modes,
			  matin_ranks,  # rk
			  matout_ranks,  # rk
			  cores_initializer=weights_initializer,
			  cores_regularizer=weights_regularizer,
			  trainable=True,
			  name_scope=None):

		with tf.variable_scope(name_scope):
			dimin = len(inp_modes)
			dimout = len(out_modes)
			matin_cores = []
			matout_cores = []
			batch_size = inp.shape[0].value
			inp = tf.reshape(inp, [batch_size, -1])

			cinit = cores_initializer
			creg = cores_regularizer
			# kron U1, U2
			for i in range(dimin // 2):
				matin_cores.append(get_var_wrap('mat_corein_%d' % (i + 1),
												shape=[inp_modes[i] * out_modes[i], matin_ranks[i]],
												initializer=cinit,
												regularizer=creg,
												trainable=trainable))

			matin_kron = kron(matin_cores[0], matin_cores[1])
			# kron U3, U4
			for i in range(2, 2 + dimout // 2):
				matout_cores.append(get_var_wrap('mat_coreout_%d' % (i + 1),
												 shape=[inp_modes[i] * out_modes[i], matout_ranks[i - 2]],
												 initializer=cinit,
												 regularizer=creg,
												 trainable=trainable))

			matout_kron = kron(matout_cores[0], matout_cores[1])
			# [r12,r34]
			blast = get_var_wrap('mat_coreinlast',
								 shape=[matin_ranks[-1], matout_ranks[-1]],
								 initializer=cinit,
								 regularizer=creg,
								 trainable=trainable)

			# [r1r2,r12]
			tb12 = get_var_wrap('tb12',
								shape=[matin_ranks[0] * matin_ranks[1], matin_ranks[2]],
								initializer=cinit,
								regularizer=creg,
								trainable=trainable)
			# [r3r4,r34]
			tb34 = get_var_wrap('tb34',
								shape=[matout_ranks[0] * matout_ranks[1], matout_ranks[2]],
								initializer=cinit,
								regularizer=creg,
								trainable=trainable)
			# recover
			matin_kron = tf.matmul(matin_kron, tb12)
			matin_kron = tf.matmul(matin_kron, blast)

			matout_kron = tf.matmul(matout_kron, tb34)
			matout_kron = tf.reshape(matout_kron,
									 [inp_modes[2], out_modes[2], inp_modes[3], out_modes[3], matout_ranks[2]])
			matout_kron = tf.transpose(matout_kron, [0, 2, 1, 3, 4])
			matout_kron = tf.reshape(matout_kron,
									 [inp_modes[2] * inp_modes[3] * out_modes[2] * out_modes[3], matout_ranks[2]])
			matout_kron = tf.transpose(matout_kron, [1, 0])

			# [n1n2m1m2*1]
			mat_cores1 = tf.reshape(matin_kron,
									[inp_modes[0], out_modes[0], inp_modes[1], out_modes[1], matout_ranks[-1]])
			mat_cores1 = tf.transpose(mat_cores1, [0, 2, 1, 3, 4])
			mat_cores1 = tf.reshape(mat_cores1,
									[inp_modes[0] * inp_modes[1], out_modes[0] * out_modes[1], matout_ranks[-1]])
			mat_cores1 = tf.reshape(mat_cores1, [-1, matout_ranks[-1]])

			mat_cores = tf.matmul(mat_cores1, matout_kron)
			mat_cores = tf.reshape(mat_cores,
								   [inp_modes[0] * inp_modes[1], out_modes[0] * out_modes[1],
									inp_modes[2] * inp_modes[3],
									out_modes[2] * out_modes[3]])
			mat_cores = tf.transpose(mat_cores, [0, 2, 1, 3])
			mat_cores = tf.reshape(mat_cores, [inp_modes[0] * inp_modes[1] * inp_modes[2] * inp_modes[3],
											   out_modes[0] * out_modes[1] * out_modes[2] * out_modes[3]])

			out = tf.matmul(inp, mat_cores)

		return out

	with tf.variable_scope(name_scope):
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		# calc forget gate
		output_W_f = ht_gated_matmul(input_x, input_modes, output_modes, ht_ranks_W, ht_ranks_W, name_scope='W_f')
		if ht_ranks_R is not None:
			output_R_f = ht_gated_matmul(input_y, output_modes, output_modes, ht_ranks_R, ht_ranks_R, name_scope= 'R_f')
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer, regularizer=weights_regularizer, trainable=True)
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calc input gate
		output_W_i = ht_gated_matmul(input_x,input_modes, output_modes, ht_ranks_W,ht_ranks_W, name_scope='W_i')
		if ht_ranks_R is not None:
			output_R_i = ht_gated_matmul(input_y, output_modes, output_modes, ht_ranks_R, ht_ranks_R, name_scope='R_i')
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer, regularizer=weights_regularizer, trainable=True)
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calc state gate
		output_W_z = ht_gated_matmul(input_x, input_modes, output_modes, ht_ranks_W,ht_ranks_W, name_scope='W_z')
		if ht_ranks_R is not None:
			output_R_z = ht_gated_matmul(input_y, output_modes, output_modes, ht_ranks_R, ht_ranks_R,  name_scope='R_z')
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer, regularizer=weights_regularizer, trainable=True)
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# current state 
		output_c = output_f * input_c + output_i * output_z

		# calc output gate
		output_W_o = ht_gated_matmul(input_x, input_modes, output_modes, ht_ranks_W, ht_ranks_W,name_scope='W_o')
		if ht_ranks_R is not None:
			output_R_o = ht_gated_matmul(input_y, output_modes, output_modes, ht_ranks_R, ht_ranks_R, name_scope='R_o')
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer, regularizer=weights_regularizer, trainable=True)
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _ht_lstm_cell_share(input_x,
						input_y,
						input_c,
						output_dim,
						input_modes,
						output_modes,
						ht_ranks_W,
						ht_ranks_R=None,
						weights_initializer=tf.glorot_uniform_initializer,
						weights_regularizer=None,
						biases_initializer=tf.zeros_initializer,
						biases_regularizer=None,
						tfv_train_phase=None,
						keep_prob=0.9,
						name_scope=None):
	""" single LSTM cell in HT format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ht_ranks_W: HT ranks of input matrices
		ht_ranks_R: HT ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'
	assert len(ht_ranks_W) == len(input_modes) - 1,'The number of TT ranks must be matching to the tensor modes.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# Kronecker
	def kron(a, b):

		[am, an] = a.shape.as_list()
		[bm, bn] = b.shape.as_list()
		a = tf.reshape(a, [am, 1, an, 1])
		b = tf.reshape(b, [1, bm, 1, bn])
		K = tf.reshape(tf.multiply(a, b), [am * bm, an * bn])

		return K

	def ht_gated_matmul(inp,
			  inp_modes,  # n
			  out_modes,
			  cores,
			  matin_ranks,  # rk
			  matout_ranks,  # rk
			  cores_initializer=weights_initializer,
			  cores_regularizer=weights_regularizer,
			  trainable=True,
			  name_scope=None):

		with tf.variable_scope(name_scope):
			matin_cores=[]
			matin_cores.append(cores[0])
			matin_cores.append(cores[1])
			matout_cores = []
			matout_cores.append(cores[2])
			matout_cores.append(cores[3])
			batch_size = inp.shape[0].value
			inp = tf.reshape(inp, [batch_size, -1])

			# kron U1, U2
			matin_kron = kron(matin_cores[0], matin_cores[1])
			# kron U3, U4
			matout_kron = kron(matout_cores[0], matout_cores[1])
			# [r12,r34]
			blast = cores[4]

			# [r1r2,r12]
			tb12 = cores[5]
			# [r3r4,r34]
			tb34 = cores[6]
			# recover
			matin_kron = tf.matmul(matin_kron, tb12)
			matin_kron = tf.matmul(matin_kron, blast)

			matout_kron = tf.matmul(matout_kron, tb34)
			matout_kron = tf.reshape(matout_kron,
									 [inp_modes[2], out_modes[2], inp_modes[3], out_modes[3], matout_ranks[2]])
			matout_kron = tf.transpose(matout_kron, [0, 2, 1, 3, 4])
			matout_kron = tf.reshape(matout_kron,
									 [inp_modes[2] * inp_modes[3] * out_modes[2] * out_modes[3], matout_ranks[2]])
			matout_kron = tf.transpose(matout_kron, [1, 0])

			# [n1n2m1m2*1]
			mat_cores1 = tf.reshape(matin_kron,
									[inp_modes[0], out_modes[0], inp_modes[1], out_modes[1], matout_ranks[-1]])
			mat_cores1 = tf.transpose(mat_cores1, [0, 2, 1, 3, 4])
			mat_cores1 = tf.reshape(mat_cores1,
									[inp_modes[0] * inp_modes[1], out_modes[0] * out_modes[1], matout_ranks[-1]])
			mat_cores1 = tf.reshape(mat_cores1, [-1, matout_ranks[-1]])

			mat_cores = tf.matmul(mat_cores1, matout_kron)
			mat_cores = tf.reshape(mat_cores,
								   [inp_modes[0] * inp_modes[1], out_modes[0] * out_modes[1],
									inp_modes[2] * inp_modes[3],
									out_modes[2] * out_modes[3]])
			mat_cores = tf.transpose(mat_cores, [0, 2, 1, 3])
			mat_cores = tf.reshape(mat_cores, [inp_modes[0] * inp_modes[1] * inp_modes[2] * inp_modes[3],
											   out_modes[0] * out_modes[1] * out_modes[2] * out_modes[3]])

			out = tf.matmul(inp, mat_cores)

		return out

	with tf.variable_scope(name_scope):
		w_f=tf.get_variable('mat_coref1',
					 [input_modes[0] * output_modes[0], ht_ranks_W[0]],
					 initializer=weights_initializer,
					 regularizer=weights_regularizer,
					 trainable=True)
		w_i = tf.get_variable('mat_corei1',
					   [input_modes[0] * output_modes[0], ht_ranks_W[0]],
					   initializer=weights_initializer,
					   regularizer=weights_regularizer,
					   trainable=True)
		w_z = tf.get_variable('mat_corez1',
					   [input_modes[0] * output_modes[0], ht_ranks_W[0]],
					   initializer=weights_initializer,
					   regularizer=weights_regularizer,
					   trainable=True)
		w_o = tf.get_variable('mat_coreo1',
					   [input_modes[0] * output_modes[0], ht_ranks_W[0]],
					   initializer=weights_initializer,
					   regularizer=weights_regularizer,
					   trainable=True)
		cores = []
		cores.append(tf.get_variable('mat_coreu2',
										 [input_modes[1] * output_modes[1], ht_ranks_W[1]],
										 initializer=weights_initializer,
										 regularizer=weights_regularizer,
										 trainable=True))
		# kron U3, U4
		for i in range(2, 2 + len(output_modes) // 2):
			matout_cores=tf.get_variable('mat_coreout_%d' % (i + 1),
										 [input_modes[i] * output_modes[i], ht_ranks_W[i - 2]],
										 initializer=weights_initializer,
										 regularizer=weights_regularizer,
										 trainable=True)
			cores.append(matout_cores)
		# [r12,r34]
		blast = tf.get_variable('mat_coreinlast', [ht_ranks_W[-1], ht_ranks_W[-1]], initializer=weights_initializer, regularizer=weights_regularizer, trainable=True)
		cores.append(blast)
		# [r1r2,r12]
		tb12 = tf.get_variable('tb12',
						[ht_ranks_W[0] * ht_ranks_W[1], ht_ranks_W[2]],
						initializer=weights_initializer,
						regularizer=weights_regularizer,
						trainable=True)
		cores.append(tb12)
		# [r3r4,r34]
		tb34 = tf.get_variable('tb34',
						[ht_ranks_W[0] * ht_ranks_W[1], ht_ranks_W[2]],
						initializer=weights_initializer,
						regularizer=weights_regularizer,
						trainable=True)
		cores.append(tb34)
		cores_f=[w_f]+cores
		cores_i=[w_i]+cores
		cores_z = [w_z] + cores
		cores_o = [w_o] + cores
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)

		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,regularizer=biases_regularizer, trainable=True)
		cores_R=[]
		# calc forget gate
		output_W_f = ht_gated_matmul(input_x, input_modes, output_modes, cores_f,ht_ranks_W, ht_ranks_W, name_scope='W_f')
		if ht_ranks_R is not None:
			output_R_f = ht_gated_matmul(input_y, output_modes, output_modes, cores_R,ht_ranks_R, ht_ranks_R, name_scope= 'R_f')
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
							  regularizer=weights_regularizer, trainable=True)
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calc input gate
		output_W_i = ht_gated_matmul(input_x,input_modes, output_modes,cores_i, ht_ranks_W,ht_ranks_W, name_scope='W_i')
		if ht_ranks_R is not None:
			output_R_i = ht_gated_matmul(input_y, output_modes, output_modes, cores_R, ht_ranks_R, ht_ranks_R, name_scope='R_i')
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
							  regularizer=weights_regularizer, trainable=True)
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calc state gate
		output_W_z = ht_gated_matmul(input_x, input_modes, output_modes,cores_z, ht_ranks_W,ht_ranks_W, name_scope='W_z')
		if ht_ranks_R is not None:
			output_R_z = ht_gated_matmul(input_y, output_modes, output_modes, cores_R, ht_ranks_R, ht_ranks_R,  name_scope='R_z')
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
							  regularizer=weights_regularizer, trainable=True)
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# current state
		output_c = output_f * input_c + output_i * output_z

		# calc output gate
		output_W_o = ht_gated_matmul(input_x, input_modes, output_modes,cores_o, ht_ranks_W, ht_ranks_W,name_scope='W_o')
		if ht_ranks_R is not None:
			output_R_o = ht_gated_matmul(input_y, output_modes, output_modes, cores_R, ht_ranks_R, ht_ranks_R, name_scope='R_o')
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
							  regularizer=weights_regularizer, trainable=True)
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _tr_lstm_cell(input_x,
				  input_y,
				  input_c,
				  output_dim,
				  input_modes,
				  output_modes,
				  tr_ranks_Win,
				  tr_ranks_Wout,
				  tr_ranks_R=None,
				  weights_initializer=tf.glorot_uniform_initializer,
				  weights_regularizer=None,
				  biases_initializer=tf.zeros_initializer,
				  biases_regularizer=None,
				  tfv_train_phase=None,
				  keep_prob=0.9,
				  name_scope=None):
	""" single LSTM cell in TR format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tr_ranks_Win: TR ranks of input modes
		tr_ranks_Wout: TR ranks of output modes
		tr_ranks_R: TR ranks of output matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# TR gated matmul defination
	def tr_gated_matmul(input, in_cores, out_cores, input_modes, output_modes, tr_ranks_in, tr_ranks_out,d_in,d_out, batch_size, name):
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])

		in_cores[0] = tf.reshape(in_cores[0], [tr_ranks_in[0], input_modes[0], tr_ranks_in[1]])
		in_cores[0] = tf.transpose(in_cores[0], [1, 0, 2])
		in_cores[0] = tf.reshape(in_cores[0], [input_modes[0], -1])
		output = tf.matmul(cur_inp, in_cores[0])
		output = tf.reshape(output, [batch_size, input_modes[1], -1, tr_ranks_in[0], tr_ranks_in[1]])
		output = tf.transpose(output, [0, 2, 3, 1, 4])
		output = tf.reshape(output, [-1, input_modes[1] * tr_ranks_in[1]])
		cur_inp = tf.identity(output)

		for i in range(1, d_in):
			output = tf.matmul(cur_inp, in_cores[i], name=name + '_mal_core_%d' % (i + 1))
			if i == d_in - 1:
				output = tf.reshape(output, [batch_size, tr_ranks_in[0], tr_ranks_in[i + 1]])
				output = tf.reshape(output, [-1, output.shape[-1].value])
			else:
				output = tf.reshape(output, [batch_size, input_modes[i + 1], -1, tr_ranks_in[0], tr_ranks_in[i + 1]])
				output = tf.transpose(output, [0, 2, 3, 1, 4])
				output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			cur_inp = tf.identity(output)

		out_cores[d_out - 1] = tf.reshape(out_cores[d_out - 1], [tr_ranks_out[d_out - 1], output_modes[d_out - 1], tr_ranks_out[d_out]])
		out_cores[d_out - 1] = tf.transpose(out_cores[d_out - 1], [0, 2, 1])
		out_cores[d_out - 1] = tf.reshape(out_cores[d_out - 1], [-1, output_modes[d_out - 1]])

		for i in range(d_out - 1):
			output = tf.matmul(cur_inp, out_cores[i])
			output = tf.reshape(output, [batch_size, tr_ranks_in[0], -1, output_modes[i], tr_ranks_out[i + 1]])
			if i == d_out - 2:
				output = tf.transpose(output, [0, 2, 3, 1, 4])
				output = tf.reshape(output, [-1, tr_ranks_in[0] * tr_ranks_out[i + 1]])
			else:
				output = tf.reshape(output, [-1, output.shape[-1].value])
			cur_inp = tf.identity(output)
		output = tf.matmul(cur_inp, out_cores[d_out - 1])

		output = tf.reshape(tf.squeeze(output), [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d_in = len(input_modes)
		d_out = len(output_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_in_cores = []
		l_W_f_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			W_f_in_core = tf.get_variable('var_W_forget_in_core_%d' % (i + 1), var_in_shape,
										  initializer=weights_initializer,
										  regularizer=weights_regularizer, trainable=True)
			l_W_f_in_cores.append(W_f_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_f_out_core = tf.get_variable('var_W_forget_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_f_out_cores.append(W_f_out_core)
		if tr_ranks_R is not None:
			l_R_f_in_cores = []
			l_R_f_out_cores = []
			for i in range(d_out):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_f_in_core = tf.get_variable('var_R_forget_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_f_in_cores.append(R_f_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_f_out_core = tf.get_variable('var_R_forget_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_f_out_cores.append(R_f_out_core)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of input gate
		l_W_i_in_cores = []
		l_W_i_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			W_i_in_core = tf.get_variable('var_W_input_in_core_%d' % (i + 1), var_in_shape,
										  initializer=weights_initializer,
										  regularizer=weights_regularizer, trainable=True)
			l_W_i_in_cores.append(W_i_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_i_out_core = tf.get_variable('var_W_input_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_i_out_cores.append(W_i_out_core)
		if tr_ranks_R is not None:
			l_R_i_in_cores = []
			l_R_i_out_cores = []
			for i in range(d_out):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_i_in_core = tf.get_variable('var_R_input_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_i_in_cores.append(R_i_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_i_out_core = tf.get_variable('var_R_input_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_i_out_cores.append(R_i_out_core)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of state gate
		l_W_z_in_cores = []
		l_W_z_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			W_z_in_core = tf.get_variable('var_W_state_in_core_%d' % (i + 1), var_in_shape,
										  initializer=weights_initializer,
										  regularizer=weights_regularizer, trainable=True)
			l_W_z_in_cores.append(W_z_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_z_out_core = tf.get_variable('var_W_state_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_z_out_cores.append(W_z_out_core)
		if tr_ranks_R is not None:
			l_R_z_in_cores = []
			l_R_z_out_cores = []
			for i in range(d_out):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_z_in_core = tf.get_variable('var_R_state_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_z_in_cores.append(R_z_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_z_out_core = tf.get_variable('var_R_state_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_z_out_cores.append(R_z_out_core)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of output gate
		l_W_o_in_cores = []
		l_W_o_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			W_o_in_core = tf.get_variable('var_W_output_in_core_%d' % (i + 1), var_in_shape,
										  initializer=weights_initializer,
										  regularizer=weights_regularizer, trainable=True)
			l_W_o_in_cores.append(W_o_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_o_out_core = tf.get_variable('var_W_output_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_o_out_cores.append(W_o_out_core)
		if tr_ranks_R is not None:
			l_R_o_in_cores = []
			l_R_o_out_cores = []
			for i in range(d_out):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_o_in_core = tf.get_variable('var_R_output_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_o_in_cores.append(R_o_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_o_out_core = tf.get_variable('var_R_output_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_o_out_cores.append(R_o_out_core)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# calculate forget gate
		output_W_f = tr_gated_matmul(input_x, l_W_f_in_cores, l_W_f_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out, batch_size,
									 'W_f')
		if tr_ranks_R is not None:
			output_R_f = tr_gated_matmul(input_y, l_R_f_in_cores, l_R_f_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_out,d_out, batch_size,
										 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = tr_gated_matmul(input_x, l_W_i_in_cores, l_W_i_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out, batch_size,
									 'W_i')
		if tr_ranks_R is not None:
			output_R_i = tr_gated_matmul(input_y, l_R_i_in_cores, l_R_i_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_out,d_out, batch_size,
										 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = tr_gated_matmul(input_x, l_W_z_in_cores, l_W_z_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out,  batch_size,
									 'W_z')
		if tr_ranks_R is not None:
			output_R_z = tr_gated_matmul(input_y, l_R_z_in_cores, l_R_z_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_out,d_out, batch_size,
										 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = tr_gated_matmul(input_x, l_W_o_in_cores, l_W_o_out_cores, input_modes, output_modes,
									 tr_ranks_Win, tr_ranks_Wout, d_in, d_out, batch_size,
									 'W_o')
		if tr_ranks_R is not None:
			output_R_o = tr_gated_matmul(input_y, l_R_o_in_cores, l_R_o_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_out,d_out, batch_size,
										 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob=dropout_rate(keep_prob), name='dropout_y'), tf.nn.dropout(output_c, keep_prob=dropout_rate(keep_prob), name='dropout_c')


def _tr_lstm_cell_share(input_x,
						input_y,
						input_c,
						output_dim,
						input_modes,
						output_modes,
						tr_ranks_Win,
						tr_ranks_Wout,
						tr_ranks_R=None,
						weights_initializer=tf.glorot_uniform_initializer,
						weights_regularizer=None,
						biases_initializer=tf.zeros_initializer,
						biases_regularizer=None,
						tfv_train_phase=None,
						keep_prob=0.9,
						name_scope=None):
	""" single LSTM cell in TR format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tr_ranks_Win: TR ranks of input modes
		tr_ranks_Wout: TR ranks of output modes
		tr_ranks_R: TR ranks of output matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# TR gated matmul defination
	def tr_gated_matmul(input, in_cores, out_cores, input_modes, output_modes, tr_ranks_in, tr_ranks_out,d_in,d_out, batch_size, name):
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])

		in_cores[0] = tf.reshape(in_cores[0], [tr_ranks_in[0], input_modes[0], tr_ranks_in[1]])
		in_cores[0] = tf.transpose(in_cores[0], [1, 0, 2])
		in_cores[0] = tf.reshape(in_cores[0], [input_modes[0], -1])
		output = tf.matmul(cur_inp, in_cores[0])
		output = tf.reshape(output, [batch_size, input_modes[1], -1, tr_ranks_in[0], tr_ranks_in[1]])
		output = tf.transpose(output, [0, 2, 3, 1, 4])
		output = tf.reshape(output, [-1, input_modes[1] * tr_ranks_in[1]])
		cur_inp = tf.identity(output)

		for i in range(1, d_in):
			output = tf.matmul(cur_inp, in_cores[i], name=name + '_mal_core_%d' % (i + 1))
			if i == d_in - 1:
				output = tf.reshape(output, [batch_size, tr_ranks_in[0], tr_ranks_in[i + 1]])
				output = tf.reshape(output, [-1, output.shape[-1].value])
			else:
				output = tf.reshape(output, [batch_size, input_modes[i + 1], -1, tr_ranks_in[0], tr_ranks_in[i + 1]])
				output = tf.transpose(output, [0, 2, 3, 1, 4])
				output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			cur_inp = tf.identity(output)

		out_cores[d_out - 1] = tf.reshape(out_cores[d_out - 1], [tr_ranks_out[d_out - 1], output_modes[d_out - 1], tr_ranks_out[d_out]])
		out_cores[d_out - 1] = tf.transpose(out_cores[d_out - 1], [0, 2, 1])
		out_cores[d_out - 1] = tf.reshape(out_cores[d_out - 1], [-1, output_modes[d_out - 1]])

		for i in range(d_out - 1):
			output = tf.matmul(cur_inp, out_cores[i])
			output = tf.reshape(output, [batch_size, tr_ranks_in[0], -1, output_modes[i], tr_ranks_out[i + 1]])
			if i == d_out - 2:
				output = tf.transpose(output, [0, 2, 3, 1, 4])
				output = tf.reshape(output, [-1, tr_ranks_in[0] * tr_ranks_out[i + 1]])
			else:
				output = tf.reshape(output, [-1, output.shape[-1].value])
			cur_inp = tf.identity(output)
		output = tf.matmul(cur_inp, out_cores[d_out - 1])

		output = tf.reshape(tf.squeeze(output), [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d_in = len(input_modes)
		d_out = len(output_modes)
		batch_size = input_x.shape[0].value
		
		# define variables of forget gate
		l_W_f_in_cores = []
		l_W_f_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			if  i==1:
				W_f_in_core = tf.get_variable('var_W_forget_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			else:
				W_f_in_core = tf.get_variable('var_W_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			l_W_f_in_cores.append(W_f_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_f_out_core = tf.get_variable('var_W_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_f_out_cores.append(W_f_out_core)
		if tr_ranks_R is not None:
			l_R_f_in_cores = []
			l_R_f_out_cores = []
			for i in range(d_in):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_f_in_core = tf.get_variable('var_R_forget_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_f_in_cores.append(R_f_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_f_out_core = tf.get_variable('var_R_forget_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_f_out_cores.append(R_f_out_core)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of input gate
		l_W_i_in_cores = []
		l_W_i_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			if i==1:
				W_i_in_core = tf.get_variable('var_W_input_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			else:
				W_i_in_core = tf.get_variable('var_W_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			l_W_i_in_cores.append(W_i_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_i_out_core = tf.get_variable('var_W_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_i_out_cores.append(W_i_out_core)
		if tr_ranks_R is not None:
			l_R_i_in_cores = []
			l_R_i_out_cores = []
			for i in range(d_in):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_i_in_core = tf.get_variable('var_R_input_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_i_in_cores.append(R_i_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_i_out_core = tf.get_variable('var_R_input_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_i_out_cores.append(R_i_out_core)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of state gate
		l_W_z_in_cores = []
		l_W_z_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			if i==1:
				W_z_in_core = tf.get_variable('var_W_state_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			else:
				W_z_in_core = tf.get_variable('var_W_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			l_W_z_in_cores.append(W_z_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_z_out_core = tf.get_variable('var_W_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_z_out_cores.append(W_z_out_core)
		if tr_ranks_R is not None:
			l_R_z_in_cores = []
			l_R_z_out_cores = []
			for i in range(d_in):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_z_in_core = tf.get_variable('var_R_state_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_z_in_cores.append(R_z_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_z_out_core = tf.get_variable('var_R_state_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_z_out_cores.append(R_z_out_core)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of output gate
		l_W_o_in_cores = []
		l_W_o_out_cores = []
		for i in range(d_in):
			# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
			var_in_shape = [tr_ranks_Win[i] * input_modes[i], tr_ranks_Win[i + 1]]
			if i==1:
				W_o_in_core = tf.get_variable('var_W_output_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			else:
				W_o_in_core = tf.get_variable('var_W_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
			l_W_o_in_cores.append(W_o_in_core)
		for i in range(d_out):
			var_out_shape = [tr_ranks_Wout[i], output_modes[i] * tr_ranks_Wout[i + 1]]
			W_o_out_core = tf.get_variable('var_W_out_core_%d' % (i + 1), var_out_shape,
										   initializer=weights_initializer,
										   regularizer=weights_regularizer, trainable=True)
			l_W_o_out_cores.append(W_o_out_core)
		if tr_ranks_R is not None:
			l_R_o_in_cores = []
			l_R_o_out_cores = []
			for i in range(d_in):
				# shape is: (r_{k-1}*m_{k}, n_{k}*r_{k})
				var_in_shape = [tr_ranks_R[i] * output_modes[i], tr_ranks_R[i + 1]]
				R_o_in_core = tf.get_variable('var_R_output_in_core_%d' % (i + 1), var_in_shape,
											  initializer=weights_initializer,
											  regularizer=weights_regularizer, trainable=True)
				l_R_o_in_cores.append(R_o_in_core)
			for i in range(d_out):
				var_out_shape = [tr_ranks_R[i], output_modes[i] * tr_ranks_R[i + 1]]
				R_o_out_core = tf.get_variable('var_R_output_out_core_%d' % (i + 1), var_out_shape,
											   initializer=weights_initializer,
											   regularizer=weights_regularizer, trainable=True)
				l_R_o_out_cores.append(R_o_out_core)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# calculate forget gate
		output_W_f = tr_gated_matmul(input_x, l_W_f_in_cores, l_W_f_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out, batch_size,
									 'W_f')
		if tr_ranks_R is not None:
			output_R_f = tr_gated_matmul(input_y, l_R_f_in_cores, l_R_f_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_in,d_out, batch_size,
										 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = tr_gated_matmul(input_x, l_W_i_in_cores, l_W_i_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out, batch_size,
									 'W_i')
		if tr_ranks_R is not None:
			output_R_i = tr_gated_matmul(input_y, l_R_i_in_cores, l_R_i_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_in,d_out, batch_size,
										 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = tr_gated_matmul(input_x, l_W_z_in_cores, l_W_z_out_cores, input_modes, output_modes,
									 tr_ranks_Win,tr_ranks_Wout, d_in,d_out,  batch_size,
									 'W_z')
		if tr_ranks_R is not None:
			output_R_z = tr_gated_matmul(input_y, l_R_z_in_cores, l_R_z_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_in,d_out, batch_size,
										 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = tr_gated_matmul(input_x, l_W_o_in_cores, l_W_o_out_cores, input_modes, output_modes,
									 tr_ranks_Win, tr_ranks_Wout, d_in, d_out, batch_size,
									 'W_o')
		if tr_ranks_R is not None:
			output_R_o = tr_gated_matmul(input_y, l_R_o_in_cores, l_R_o_out_cores, output_modes, output_modes,
										 tr_ranks_R,tr_ranks_R, d_in,d_out, batch_size,
										 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob=dropout_rate(keep_prob), name='dropout_y'), tf.nn.dropout(output_c, keep_prob=dropout_rate(keep_prob), name='dropout_c')


def _btd_lstm_cell(input_x,
				   input_y,
				   input_c,
				   output_dim,
				   input_modes,
				   output_modes,
				   cp_rank,
				   tk_ranks_W,
				   tk_ranks_R = None,
				   weights_initializer = tf.glorot_uniform_initializer,
				   weights_regularizer = None,
				   biases_initializer = tf.zeros_initializer,
				   biases_regularizer = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   name_scope = None):
	""" single LSTM cell in BTD format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		cp_rank: CP rank in BTD
		tk_ranks_W: Tucker ranks of input matrices
		tk_ranks_R: Tucker ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# BTD gated matmul defination
	def _btd_gated_matmul(input, cores, input_modes, output_modes, cp_rank, tk_ranks, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1})
		cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# accumulate corresponding ith cores in each Tucker
			cur_core = cores[cp_rank * i]
			for k in range(1, cp_rank):
				cur_core = tf.add(cur_core, cores[cp_rank * i + k])
			# contraction between input and the ith core, output shape is (batch_size*n_{1}r_{1}*...*n_{k-1}r_{k-1}m_{k+1}...*m_{d}, n_{k}r_{k})
			output = tf.matmul(cur_inp, cur_core, name = name + '_mal_core_%d' % (i + 1))
			# reshape output to (batch_size*n_{1}r_{1}*...*n_{k-1}r_{k-1}, m_{k+1}, m_{k+2}...*m_{d}, n_{k}r_{k})
			if i == d - 1:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32) * np.prod(np.array(tk_ranks[0:i]), dtype = np.int32), 1, -1, output_modes[i] * tk_ranks[i]])
			else:
				output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32) * np.prod(np.array(tk_ranks[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i] * tk_ranks[i]])
			# exchange m_{k+1} and n_{k}r_{k}, reshape output to (batch_size*n_{1}r_{1}*...*n_{k}r_{k}m_{k+2}...*m_{d}, m_{k+1})
			output = tf.transpose(output, [0, 3, 2, 1])
			output = tf.reshape(output, [-1, output.shape[-1].value])
			if i != d - 1:
				cur_inp = tf.identity(output)
		# reshape and transpose to (batch_size, n_{1}n_{2}...n_{d}, r_{1}r_{2}...r_{d})
		l_n_and_r = []
		l_transposes = []
		for i in range (d):
			l_n_and_r.append(output_modes[i])
			l_n_and_r.append(tk_ranks[i])
			l_transposes.append(i * 2 + 1)
			l_transposes.append((i + 1) * 2)
		output = tf.reshape(tf.squeeze(output), [batch_size] + l_n_and_r)
		output = tf.transpose(output, [0] + l_transposes)
		output = tf.reshape(output, [batch_size, np.prod(np.array(output_modes[0:]), dtype = np.int32), np.prod(np.array(tk_ranks[0:]), dtype = np.int32)])
		# accumulate corresponding kernels in each Tucker
		cur_kernel = cores[-1]
		for k in range(1, cp_rank):
			cur_kernel = tf.add(cur_kernel, cores[-1 - k])
		# contraction between output and the dth-order kernel
		output = tf.einsum('bnr,r->bn', output, cur_kernel)
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			# shape is: (m_{k}, n_{k}*r_{k})
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				W_f_core = tf.get_variable('var_W_forget_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_f_cores.append(W_f_core)
		# Tucker kernel with shape (r_{1}*r_{2}*...*r_{d})
		for k in range(cp_rank):
			W_f_kernel = tf.get_variable('var_W_forget_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_W[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_f_cores.append(W_f_kernel)
		if tk_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				# shape is: (n_{k}, n_{k}*r_{k})
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_f_core = tf.get_variable('var_R_forget_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_f_cores.append(R_f_core)
			# Tucker kernel with shape (r_{1}*r_{2}*...*r_{d})
			for k in range(cp_rank):
				R_f_kernel = tf.get_variable('var_R_forget_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_R[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_f_cores.append(R_f_kernel)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				W_i_core = tf.get_variable('var_W_input_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_i_cores.append(W_i_core)
		for k in range(cp_rank):
			W_i_kernel = tf.get_variable('var_W_input_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_W[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_i_cores.append(W_i_kernel)
		if tk_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_i_core = tf.get_variable('var_R_input_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_i_cores.append(R_i_core)
			for k in range(cp_rank):
				R_i_kernel = tf.get_variable('var_R_input_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_R[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_i_cores.append(R_i_kernel)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				W_z_core = tf.get_variable('var_W_state_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core)
		for k in range(cp_rank):
			W_z_kernel = tf.get_variable('var_W_state_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_W[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_z_cores.append(W_z_kernel)
		if tk_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_z_core = tf.get_variable('var_R_state_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core)
			for k in range(cp_rank):
				R_z_kernel = tf.get_variable('var_R_state_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_R[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				W_o_core = tf.get_variable('var_W_output_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_o_cores.append(W_o_core)
		for k in range(cp_rank):
			W_o_kernel = tf.get_variable('var_W_output_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_W[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_o_cores.append(W_o_kernel)
		if tk_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_o_core = tf.get_variable('var_R_output_core_%d%d' % ((i + 1), (j + 1)), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_o_cores.append(R_o_core)
			for k in range(cp_rank):
				R_o_kernel = tf.get_variable('var_R_output_kernel_%d' % (k + 1), [np.prod(np.array(tk_ranks_R[0:]), dtype = np.int32)], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_o_cores.append(R_o_kernel)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# calculate forget gate
		output_W_f = _btd_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d, batch_size, 'W_f')
		if tk_ranks_R is not None:
			output_R_f = _btd_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = _btd_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d, batch_size, 'W_i')
		if tk_ranks_R is not None:
			output_R_i = _btd_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = _btd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d, batch_size, 'W_z')
		if tk_ranks_R is not None:
			output_R_z = _btd_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = _btd_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d, batch_size, 'W_o')
		if tk_ranks_R is not None:
			output_R_o = _btd_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _btd_lstm_cell_share(input_x,
						 input_y,
						 input_c,
						 output_dim,
						 input_modes,
						 output_modes,
						 cp_rank,
						 tk_ranks_W,
						 tk_ranks_R=None,
						 weights_initializer=tf.glorot_uniform_initializer,
						 weights_regularizer=None,
						 biases_initializer=tf.zeros_initializer,
						 biases_regularizer=None,
						 tfv_train_phase=None,
						 keep_prob=0.9,
						 name_scope=None):
	""" single LSTM cell in BTD format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		cp_rank: CP rank in BTD
		tk_ranks_W: Tucker ranks of input matrices
		tk_ranks_R: Tucker ranks of recurrent matrices
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# BTD gated matmul defination
	def _btd_gated_matmul(input, cores, input_modes, output_modes, cp_rank, tk_ranks, d, batch_size, name):
		for c in range(cp_rank):
			cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
			cur_inp = tf.transpose(cur_inp, [0, 2, 1])
			cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
			for i in range(d):
				# accumulate corresponding ith cores in each Tucker
				cur_core = cores[cp_rank * i + c]
				# contraction between input and the ith core, output shape is (batch_size*n_{1}r_{1}*...*n_{k-1}r_{k-1}m_{k+1}...*m_{d}, n_{k}r_{k})
				output = tf.matmul(cur_inp, cur_core, name=name + '_mal_core_%d%d' % ((i + 1), (c + 1)))
				# reshape output to (batch_size*n_{1}r_{1}*...*n_{k-1}r_{k-1}, m_{k+1}, m_{k+2}...*m_{d}, n_{k}r_{k})
				if i == d - 1:
					output = tf.reshape(output, [
						batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32) * np.prod(
							np.array(tk_ranks[0:i]), dtype=np.int32), 1, -1, output_modes[i] * tk_ranks[i]])
				else:
					output = tf.reshape(output, [
						batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32) * np.prod(
							np.array(tk_ranks[0:i]), dtype=np.int32), input_modes[i + 1], -1,
						output_modes[i] * tk_ranks[i]])
				# exchange m_{k+1} and n_{k}r_{k}, reshape output to (batch_size*n_{1}r_{1}*...*n_{k}r_{k}m_{k+2}...*m_{d}, m_{k+1})
				output = tf.transpose(output, [0, 3, 2, 1])
				output = tf.reshape(output, [-1, output.shape[-1].value])
				if i != d - 1:
					cur_inp = tf.identity(output)
			# reshape and transpose to (batch_size, n_{1}n_{2}...n_{d}, r_{1}r_{2}...r_{d})
			l_n_and_r = []
			l_transposes = []
			for i in range(d):
				l_n_and_r.append(output_modes[i])
				l_n_and_r.append(tk_ranks[i])
				l_transposes.append(i * 2 + 1)
				l_transposes.append((i + 1) * 2)
			output = tf.reshape(tf.squeeze(output), [batch_size] + l_n_and_r)
			output = tf.transpose(output, [0] + l_transposes)
			output = tf.reshape(output, [batch_size, np.prod(np.array(output_modes[0:]), dtype=np.int32),
										 np.prod(np.array(tk_ranks[0:]), dtype=np.int32)])
			# accumulate corresponding kernels in each Tucker
			cur_kernel = cores[cp_rank * d + c]
			# for k in range(1, cp_rank):
			# 	cur_kernel = tf.add(cur_kernel, cores[-1 - k])
			# contraction between output and the dth-order kernel
			output = tf.einsum('bnr,r->bn', output, cur_kernel)
			if c == 0:
				output1 = output
			else:
				output1 = tf.add(output1, output)
		return output1

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			# shape is: (m_{k}, n_{k}*r_{k})
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				if i==0:
					W_f_core = tf.get_variable('var_W_forget_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
				else:
					W_f_core = tf.get_variable('var_W_core_%d%d' % ((i + 1), (j + 1)), var_shape,
										initializer=weights_initializer, regularizer=weights_regularizer,
										   trainable=True)
				l_W_f_cores.append(W_f_core)
		# Tucker kernel with shape (r_{1}*r_{2}*...*r_{d})
		for k in range(cp_rank):
			W_f_kernel = tf.get_variable('var_W_kernel_%d' % (k + 1),
										 [np.prod(np.array(tk_ranks_W[0:]), dtype=np.int32)],
										 initializer=weights_initializer, regularizer=weights_regularizer,
										 trainable=True)
			l_W_f_cores.append(W_f_kernel)
		if tk_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				# shape is: (n_{k}, n_{k}*r_{k})
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_f_core = tf.get_variable('var_R_forget_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
					l_R_f_cores.append(R_f_core)
			# Tucker kernel with shape (r_{1}*r_{2}*...*r_{d})
			for k in range(cp_rank):
				R_f_kernel = tf.get_variable('var_R_forget_kernel_%d' % (k + 1),
											 [np.prod(np.array(tk_ranks_R[0:]), dtype=np.int32)],
											 initializer=weights_initializer, regularizer=weights_regularizer,
											 trainable=True)
				l_R_f_cores.append(R_f_kernel)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				if i==0:
					W_i_core = tf.get_variable('var_W_input_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
				else:
					W_i_core = tf.get_variable('var_W_core_%d%d' % ((i + 1), (j + 1)), var_shape,
										   initializer=weights_initializer, regularizer=weights_regularizer,
										   trainable=True)
				l_W_i_cores.append(W_i_core)
		for k in range(cp_rank):
			W_i_kernel = tf.get_variable('var_W_kernel_%d' % (k + 1),
										 [np.prod(np.array(tk_ranks_W[0:]), dtype=np.int32)],
										 initializer=weights_initializer, regularizer=weights_regularizer,
										 trainable=True)
			l_W_i_cores.append(W_i_kernel)
		if tk_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_i_core = tf.get_variable('var_R_input_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
					l_R_i_cores.append(R_i_core)
			for k in range(cp_rank):
				R_i_kernel = tf.get_variable('var_R_input_kernel_%d' % (k + 1),
											 [np.prod(np.array(tk_ranks_R[0:]), dtype=np.int32)],
											 initializer=weights_initializer, regularizer=weights_regularizer,
											 trainable=True)
				l_R_i_cores.append(R_i_kernel)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				if i==0:
					W_z_core = tf.get_variable('var_W_state_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
				else:
					W_z_core = tf.get_variable('var_W_core_%d%d' % ((i + 1), (j + 1)), var_shape,
										   initializer=weights_initializer, regularizer=weights_regularizer,
										   trainable=True)
				l_W_z_cores.append(W_z_core)
		for k in range(cp_rank):
			W_z_kernel = tf.get_variable('var_W_kernel_%d' % (k + 1),
										 [np.prod(np.array(tk_ranks_W[0:]), dtype=np.int32)],
										 initializer=weights_initializer, regularizer=weights_regularizer,
										 trainable=True)
			l_W_z_cores.append(W_z_kernel)
		if tk_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_z_core = tf.get_variable('var_R_state_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
					l_R_z_cores.append(R_z_core)
			for k in range(cp_rank):
				R_z_kernel = tf.get_variable('var_R_state_kernel_%d' % (k + 1),
											 [np.prod(np.array(tk_ranks_R[0:]), dtype=np.int32)],
											 initializer=weights_initializer, regularizer=weights_regularizer,
											 trainable=True)
				l_R_z_cores.append(R_z_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			var_shape = [input_modes[i], output_modes[i] * tk_ranks_W[i]]
			for j in range(cp_rank):
				if i==0:
					W_o_core = tf.get_variable('var_W_output_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
				else:
					W_o_core = tf.get_variable('var_W_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
				l_W_o_cores.append(W_o_core)
		for k in range(cp_rank):
			W_o_kernel = tf.get_variable('var_W_kernel_%d' % (k + 1),
										 [np.prod(np.array(tk_ranks_W[0:]), dtype=np.int32)],
										 initializer=weights_initializer, regularizer=weights_regularizer,
										 trainable=True)
			l_W_o_cores.append(W_o_kernel)
		if tk_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				var_shape = [output_modes[i], output_modes[i] * tk_ranks_R[i]]
				for j in range(cp_rank):
					R_o_core = tf.get_variable('var_R_output_core_%d%d' % ((i + 1), (j + 1)), var_shape,
											   initializer=weights_initializer, regularizer=weights_regularizer,
											   trainable=True)
					l_R_o_cores.append(R_o_core)
			for k in range(cp_rank):
				R_o_kernel = tf.get_variable('var_R_output_kernel_%d' % (k + 1),
											 [np.prod(np.array(tk_ranks_R[0:]), dtype=np.int32)],
											 initializer=weights_initializer, regularizer=weights_regularizer,
											 trainable=True)
				l_R_o_cores.append(R_o_kernel)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# calculate forget gate
		output_W_f = _btd_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d,
									   batch_size, 'W_f')
		if tk_ranks_R is not None:
			output_R_f = _btd_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d,
										   batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = _btd_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d,
									   batch_size, 'W_i')
		if tk_ranks_R is not None:
			output_R_i = _btd_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d,
										   batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = _btd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d,
									   batch_size, 'W_z')
		if tk_ranks_R is not None:
			output_R_z = _btd_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d,
										   batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = _btd_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, cp_rank, tk_ranks_W, d,
									   batch_size, 'W_o')
		if tk_ranks_R is not None:
			output_R_o = _btd_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, cp_rank, tk_ranks_R, d,
										   batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob=dropout_rate(keep_prob), name='dropout_y'), tf.nn.dropout(output_c, keep_prob=dropout_rate(keep_prob), name='dropout_c')


def _ktd_lstm_cell(input_x,
				   input_y,
				   input_c,
				   output_dim,
				   input_modes,
				   output_modes,
				   ktd_rank,
				   cp_ranks_W,
				   cp_ranks_R = None,
				   weights_initializer = tf.glorot_uniform_initializer,
				   weights_regularizer = None,
				   biases_initializer = tf.zeros_initializer,
				   biases_regularizer = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   name_scope = None):
	""" single LSTM cell in KTD format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KTD gated matmul defination
	def _ktd_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1})
		cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# recover and concat all K pairs matrices
			l_matrices = []
			for k in range(ktd_rank):
				# recover (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B}) to (m_{i}, n_{i}, r_{k}^{A}*r_{k}^{B})
				matrix_A = tf.reshape(cores[2 * ktd_rank * i + 2 * k], [input_modes[i], 1, cp_ranks_in[k], 1])
				matrix_B = tf.reshape(cores[2 * ktd_rank * i + 2 * k + 1], [1, output_modes[i], 1, cp_ranks_out[k]])
				recover = tf.reshape(tf.multiply(matrix_A, matrix_B), [input_modes[i], output_modes[i], cp_ranks_in[k] * cp_ranks_out[k]])
				l_matrices.append(recover)
			# reshape the ith CP core to (m_{i}, r^{A}*r^{B}, n_{i})
			cur_core = tf.transpose(tf.concat(l_matrices, axis = -1), [0, 2, 1])
			# contraction between input and the ith core, only m_{i} if i is even and m_{i}*r^{A}*r^{B} when i is odd
			if i % 2 == 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, r^{A}*r^{B}, n_{i})
				output = tf.einsum('bm,mrn->brn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1}, r^{A}*r^{B})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, cur_core.shape[1], output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, cur_core.shape[1], output_modes[i]])
				output = tf.transpose(output, [0, 4, 2, 1, 3])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1, cur_core.shape[1]])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1], cur_core.shape[1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
			if i % 2 != 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, n_{i})
				output = tf.einsum('bmr,mrn->bn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i]])
				output = tf.transpose(output, [0, 3, 2, 1])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
		# if the value of order is odd, there is still a vector whose dimension is r^{A}*r^{B} shall be contracted
		if d % 2 != 0:
			output = tf.matmul(tf.squeeze(output), cores[-1])
		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_f_core_A = tf.get_variable('var_W_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_f_core_B = tf.get_variable('var_W_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_f_cores.append(W_f_core_A)
				l_W_f_cores.append(W_f_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_f_core_kernel = tf.get_variable('var_W_forget_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_f_cores.append(W_f_core_kernel)
		if cp_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_f_core_A = tf.get_variable('var_R_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_f_core_B = tf.get_variable('var_R_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_f_cores.append(R_f_core_A)
					l_R_f_cores.append(R_f_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_f_core_kernel = tf.get_variable('var_R_forget_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_f_cores.append(R_f_core_kernel)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_i_core_A = tf.get_variable('var_W_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_i_core_B = tf.get_variable('var_W_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_i_cores.append(W_i_core_A)
				l_W_i_cores.append(W_i_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_i_core_kernel = tf.get_variable('var_W_input_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_i_cores.append(W_i_core_kernel)
		if cp_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_i_core_A = tf.get_variable('var_R_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_i_core_B = tf.get_variable('var_R_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_i_cores.append(R_i_core_A)
					l_R_i_cores.append(R_i_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_i_core_kernel = tf.get_variable('var_R_input_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_i_cores.append(R_i_core_kernel)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_z_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_z_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core_A)
				l_W_z_cores.append(W_z_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_z_core_kernel = tf.get_variable('var_W_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_z_cores.append(W_z_core_kernel)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_z_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_z_core_kernel = tf.get_variable('var_R_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_core_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_o_core_A = tf.get_variable('var_W_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_o_core_B = tf.get_variable('var_W_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_o_cores.append(W_o_core_A)
				l_W_o_cores.append(W_o_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_o_core_kernel = tf.get_variable('var_W_output_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_o_cores.append(W_o_core_kernel)
		if cp_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_o_core_A = tf.get_variable('var_R_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_o_core_B = tf.get_variable('var_R_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_o_cores.append(R_o_core_A)
					l_R_o_cores.append(R_o_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_o_core_kernel = tf.get_variable('var_R_output_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_o_cores.append(R_o_core_kernel)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
		
		# calculate forget gate
		output_W_f = _ktd_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_f')
		if cp_ranks_R is not None:
			output_R_f = _ktd_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name = 'R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = _ktd_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_i')
		if cp_ranks_R is not None:
			output_R_i = _ktd_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name = 'R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = _ktd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _ktd_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name = 'R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = _ktd_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_o')
		if cp_ranks_R is not None:
			output_R_o = _ktd_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name = 'R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob = dropout_rate(keep_prob), name = 'dropout_y'), tf.nn.dropout(output_c, keep_prob = dropout_rate(keep_prob), name = 'dropout_c')


def _ktd_lstm_cell_share(input_x,
						 input_y,
						 input_c,
						 output_dim,
						 input_modes,
						 output_modes,
						 ktd_rank,
						 cp_ranks_W,
						 cp_ranks_R=None,
						 weights_initializer=tf.glorot_uniform_initializer,
						 weights_regularizer=None,
						 biases_initializer=tf.zeros_initializer,
						 biases_regularizer=None,
						 tfv_train_phase=None,
						 keep_prob=0.9,
						 name_scope=None):
	""" single LSTM cell in KTD format, refer to _lstm_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_y: input from the last time - [batch_size, output_dim], is 0 for the initial time
		input_c: state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KTD gated matmul defination
	def _ktd_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size,
						  name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1})
		cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# recover and concat all K pairs matrices
			l_matrices = []
			for k in range(ktd_rank):
				# recover (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B}) to (m_{i}, n_{i}, r_{k}^{A}*r_{k}^{B})
				matrix_A = tf.reshape(cores[2 * ktd_rank * i + 2 * k], [input_modes[i], 1, cp_ranks_in[k], 1])
				matrix_B = tf.reshape(cores[2 * ktd_rank * i + 2 * k + 1], [1, output_modes[i], 1, cp_ranks_out[k]])
				recover = tf.reshape(tf.multiply(matrix_A, matrix_B),
									 [input_modes[i], output_modes[i], cp_ranks_in[k] * cp_ranks_out[k]])
				l_matrices.append(recover)
			# reshape the ith CP core to (m_{i}, r^{A}*r^{B}, n_{i})
			cur_core = tf.transpose(tf.concat(l_matrices, axis=-1), [0, 2, 1])
			# contraction between input and the ith core, only m_{i} if i is even and m_{i}*r^{A}*r^{B} when i is odd
			if i % 2 == 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, r^{A}*r^{B}, n_{i})
				output = tf.einsum('bm,mrn->brn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1}, r^{A}*r^{B})
				if i == d - 1:
					output = tf.reshape(output,
										[batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32), 1, -1,
										 cur_core.shape[1], output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32),
												 input_modes[i + 1], -1, cur_core.shape[1], output_modes[i]])
				output = tf.transpose(output, [0, 4, 2, 1, 3])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1, cur_core.shape[1]])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1], cur_core.shape[1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
			if i % 2 != 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, n_{i})
				output = tf.einsum('bmr,mrn->bn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1})
				if i == d - 1:
					output = tf.reshape(output,
										[batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32), 1, -1,
										 output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype=np.int32),
												 input_modes[i + 1], -1, output_modes[i]])
				output = tf.transpose(output, [0, 3, 2, 1])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
		# if the value of order is odd, there is still a vector whose dimension is r^{A}*r^{B} shall be contracted
		if d % 2 != 0:
			output = tf.matmul(tf.squeeze(output), cores[-1])
		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of forget gate
		l_W_f_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_f_core_A = tf.get_variable('var_W_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_f_core_B = tf.get_variable('var_W_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				else:
					W_f_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_f_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				l_W_f_cores.append(W_f_core_A)
				l_W_f_cores.append(W_f_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype=np.int32)]
			W_f_core_kernel = tf.get_variable('var_W_forget_core_kernel', var_shape_kernel,
											  initializer=weights_initializer, regularizer=weights_regularizer,
											  trainable=True)
			l_W_f_cores.append(W_f_core_kernel)
		if cp_ranks_R is not None:
			l_R_f_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_f_core_A = tf.get_variable('var_R_forget_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					R_f_core_B = tf.get_variable('var_R_forget_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					l_R_f_cores.append(R_f_core_A)
					l_R_f_cores.append(R_f_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype=np.int32)]
				R_f_core_kernel = tf.get_variable('var_R_forget_core_kernel', var_shape_kernel,
												  initializer=weights_initializer, regularizer=weights_regularizer,
												  trainable=True)
				l_R_f_cores.append(R_f_core_kernel)
		else:
			R_f = tf.get_variable('var_recurrent_forget', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_f = tf.get_variable('var_peephole_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_f = tf.get_variable('var_bias_forget', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of input gate
		l_W_i_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_i_core_A = tf.get_variable('var_W_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_i_core_B = tf.get_variable('var_W_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				else:
					W_i_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_i_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				l_W_i_cores.append(W_i_core_A)
				l_W_i_cores.append(W_i_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype=np.int32)]
			W_i_core_kernel = tf.get_variable('var_W_input_core_kernel', var_shape_kernel,
											  initializer=weights_initializer, regularizer=weights_regularizer,
											  trainable=True)
			l_W_i_cores.append(W_i_core_kernel)
		if cp_ranks_R is not None:
			l_R_i_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_i_core_A = tf.get_variable('var_R_input_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					R_i_core_B = tf.get_variable('var_R_input_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					l_R_i_cores.append(R_i_core_A)
					l_R_i_cores.append(R_i_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype=np.int32)]
				R_i_core_kernel = tf.get_variable('var_R_input_core_kernel', var_shape_kernel,
												  initializer=weights_initializer, regularizer=weights_regularizer,
												  trainable=True)
				l_R_i_cores.append(R_i_core_kernel)
		else:
			R_i = tf.get_variable('var_recurrent_input', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_i = tf.get_variable('var_peephole_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_i = tf.get_variable('var_bias_input', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of state gate (no peephole)
		l_W_z_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_z_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_z_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				else:
					W_z_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_z_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				l_W_z_cores.append(W_z_core_A)
				l_W_z_cores.append(W_z_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype=np.int32)]
			W_z_core_kernel = tf.get_variable('var_W_state_core_kernel', var_shape_kernel,
											  initializer=weights_initializer, regularizer=weights_regularizer,
											  trainable=True)
			l_W_z_cores.append(W_z_core_kernel)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					R_z_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype=np.int32)]
				R_z_core_kernel = tf.get_variable('var_R_state_core_kernel', var_shape_kernel,
												  initializer=weights_initializer, regularizer=weights_regularizer,
												  trainable=True)
				l_R_z_cores.append(R_z_core_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		b_z = tf.get_variable('var_bias_state', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# define variables of output gate
		l_W_o_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_o_core_A = tf.get_variable('var_W_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_o_core_B = tf.get_variable('var_W_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				else:
					W_o_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					W_o_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
				l_W_o_cores.append(W_o_core_A)
				l_W_o_cores.append(W_o_core_B)
		if d % 2 != 0:
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype=np.int32)]
			W_o_core_kernel = tf.get_variable('var_W_output_core_kernel', var_shape_kernel,
											  initializer=weights_initializer, regularizer=weights_regularizer,
											  trainable=True)
			l_W_o_cores.append(W_o_core_kernel)
		if cp_ranks_R is not None:
			l_R_o_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_o_core_A = tf.get_variable('var_R_output_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					R_o_core_B = tf.get_variable('var_R_output_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B,
												 initializer=weights_initializer, regularizer=weights_regularizer,
												 trainable=True)
					l_R_o_cores.append(R_o_core_A)
					l_R_o_cores.append(R_o_core_B)
			if d % 2 != 0:
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype=np.int32)]
				R_o_core_kernel = tf.get_variable('var_R_output_core_kernel', var_shape_kernel,
												  initializer=weights_initializer, regularizer=weights_regularizer,
												  trainable=True)
				l_R_o_cores.append(R_o_core_kernel)
		else:
			R_o = tf.get_variable('var_recurrent_output', [output_dim, output_dim], initializer=weights_initializer,
								  regularizer=weights_regularizer, trainable=True)
		p_o = tf.get_variable('var_peephole_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)
		b_o = tf.get_variable('var_bias_output', [output_dim], initializer=biases_initializer,
							  regularizer=biases_regularizer, trainable=True)

		# calculate forget gate
		output_W_f = _ktd_gated_matmul(input_x, l_W_f_cores, input_modes, output_modes, ktd_rank,
									   cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_f')
		if cp_ranks_R is not None:
			output_R_f = _ktd_gated_matmul(input_y, l_R_f_cores, output_modes, output_modes, ktd_rank,
										   cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_f')
		else:
			output_R_f = tf.matmul(input_y, R_f, name='R_f')
		output_f = tf.nn.sigmoid(output_W_f + output_R_f + input_c * p_f + b_f)

		# calculate input gate
		output_W_i = _ktd_gated_matmul(input_x, l_W_i_cores, input_modes, output_modes, ktd_rank,
									   cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_i')
		if cp_ranks_R is not None:
			output_R_i = _ktd_gated_matmul(input_y, l_R_i_cores, output_modes, output_modes, ktd_rank,
										   cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_i')
		else:
			output_R_i = tf.matmul(input_y, R_i, name='R_i')
		output_i = tf.nn.sigmoid(output_W_i + output_R_i + input_c * p_i + b_i)

		# calculate state gate
		output_W_z = _ktd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, ktd_rank,
									   cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _ktd_gated_matmul(input_y, l_R_z_cores, output_modes, output_modes, ktd_rank,
										   cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_y, R_z, name='R_z')
		output_z = tf.nn.tanh(output_W_z + output_R_z + b_z)

		# calculate current state
		output_c = output_f * input_c + output_i * output_z

		# calculate output gate
		output_W_o = _ktd_gated_matmul(input_x, l_W_o_cores, input_modes, output_modes, ktd_rank,
									   cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_o')
		if cp_ranks_R is not None:
			output_R_o = _ktd_gated_matmul(input_y, l_R_o_cores, output_modes, output_modes, ktd_rank,
										   cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_o')
		else:
			output_R_o = tf.matmul(input_y, R_o, name='R_o')
		output_o = tf.nn.sigmoid(output_W_o + output_R_o + output_c * p_o + b_o)

		# calculate current output
		output_y = output_o * tf.nn.tanh(output_c)

	return tf.nn.dropout(output_y, keep_prob=dropout_rate(keep_prob), name='dropout_y'), tf.nn.dropout(output_c, keep_prob=dropout_rate(keep_prob), name='dropout_c')


def lstm_layer(input_seq,
			   hidden_dim,
			   tfv_train_phase = None,
			   keep_prob = 0.9,
			   initializer = tf.glorot_uniform_initializer,
			   reverse = False,
			   name_scope = None):
	""" LSTM layer, there are num_seq (the last dim of input) LSTM cells (_lstm_cell) in a LSTM layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			if reverse:
				cur_x = tf.gather(input_seq, num_seq - i - 1, axis = -1)
			else:
				cur_x = tf.gather(input_seq, i, axis = -1)
			cur_y, cur_c = _lstm_cell(cur_x, cur_y, cur_c, hidden_dim, weights_initializer = initializer, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	if reverse:
		l_outputs.reverse()
		return tf.concat(l_outputs, axis = -1)
	else:
		return tf.concat(l_outputs, axis = -1)


def tt_lstm_layer(input_seq,
				  hidden_dim,
				  input_modes,
				  output_modes,
				  tt_ranks_W,
				  tt_ranks_R = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  flag_share = False,
				  initializer = tf.glorot_uniform_initializer,
				  reverse = False,
				  name_scope = None):
	""" LSTM layer in TT format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tt_ranks_W: TT ranks of input matrices
		tt_ranks_R: TT ranks of recurrent matrices
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_y, cur_c = _tt_lstm_cell_share(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, tt_ranks_W, tt_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'tt_lstm_cell')
			else:
				cur_y, cur_c = _tt_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, tt_ranks_W, tt_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'tt_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	return tf.concat(l_outputs, axis = -1)


def ht_lstm_layer(input_seq,
				  hidden_dim,
				  input_modes = None,
				  output_modes = None,
				  ht_ranks_W = None,
				  ht_ranks_R = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  flag_share = False,
				  initializer = tf.glorot_uniform_initializer,
				  reverse = False,
				  name_scope = None):
	""" LSTM layer in HT format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ht_ranks_W: HT ranks of input matrices
		ht_ranks_R: HT ranks of recurrent matrices
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_y, cur_c = _ht_lstm_cell_share(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, ht_ranks_W, ht_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ht_lstm_cell')
			else:
				cur_y, cur_c = _ht_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, ht_ranks_W, ht_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ht_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	return tf.concat(l_outputs, axis = -1)


def tr_lstm_layer(input_seq,
				  hidden_dim,
				  input_modes = None,
				  output_modes = None,
				  tr_ranks_W = None,
				  tr_ranks_W2 = None,
				  tr_ranks_R = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  flag_share = False,
				  initializer = tf.glorot_uniform_initializer,
				  reverse = False,
				  name_scope = None):
	""" LSTM layer in TR format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		tr_ranks_W: TR ranks of input modes
		tr_ranks_W2: TR ranks of output modes
		tr_ranks_R: TR ranks of recurrent matrices
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_y, cur_c = _tr_lstm_cell_share(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, tr_ranks_W, tr_ranks_W2,
									   tr_ranks_R, tfv_train_phase=tfv_train_phase, keep_prob=keep_prob, name_scope='tr_lstm_cell')
			else:
				cur_y, cur_c = _tr_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, tr_ranks_W, tr_ranks_W2,
								 tr_ranks_R, tfv_train_phase=tfv_train_phase, keep_prob=keep_prob, name_scope='tr_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	return tf.concat(l_outputs, axis = -1)


def btd_lstm_layer(input_seq,
				   hidden_dim,
				   input_modes,
				   output_modes,
				   cp_rank,
				   tk_ranks_W,
				   tk_ranks_R = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   flag_share = False,
				   initializer = tf.glorot_uniform_initializer,
				   reverse = False,
				   name_scope = None):
	""" LSTM layer in BTD format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		cp_rank: CP rank in BTD
		tk_ranks_W: Tucker ranks of input matrices
		tk_ranks_R: Tucker ranks of recurrent matrices
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_y, cur_c = _btd_lstm_cell_share(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, cp_rank, tk_ranks_W, tk_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'btd_lstm_cell')
			else:
				cur_y, cur_c = _btd_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, cp_rank, tk_ranks_W, tk_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'btd_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	return tf.concat(l_outputs, axis = -1)


def ktd_lstm_layer(input_seq,
				   hidden_dim,
				   input_modes,
				   output_modes,
				   ktd_rank,
				   cp_ranks_W,
				   cp_ranks_R = None,
				   tfv_train_phase = None,
				   keep_prob = 0.9,
				   flag_share = False,
				   initializer = tf.glorot_uniform_initializer,
				   reverse = False,
				   name_scope = None):
	""" LSTM layer in KTD format, refer to lstm_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of LSTM units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in LSTM unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, c and y
		init_c = tf.zeros([batch_size, hidden_dim])
		init_y = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq LSTM cells
		l_outputs = []
		cur_c = init_c
		cur_y = init_y
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_y, cur_c = _ktd_lstm_cell_share(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_lstm_cell')
			else:
				cur_y, cur_c = _ktd_lstm_cell(cur_x, cur_y, cur_c, hidden_dim, input_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_lstm_cell')
			l_outputs.append(tf.expand_dims(cur_y, -1))

	return tf.concat(l_outputs, axis = -1)
