import numpy as np
import tensorflow as tf


def _gru_cell(input_x,
			  input_h,
			  output_dim,
			  weights_initializer = tf.glorot_uniform_initializer,
			  weights_regularizer = None,
			  biases_initializer = tf.zeros_initializer,
			  biases_regularizer = None,
			  tfv_train_phase = None,
			  keep_prob = 0.9,
			  name_scope = None):
	""" single GRU cell, should NOT be referenced outside this script
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_h: input/state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name_scope):
		input_dim = input_x.shape[-1].value

		# define variables of reset gate
		W_r = tf.get_variable('var_weight_reset', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_r = tf.get_variable('var_recurrent_reset', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_r = tf.get_variable('var_bias_reset', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of update gate
		W_z = tf.get_variable('var_weight_update', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_z = tf.get_variable('var_recurrent_update', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_update', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate
		W_h = tf.get_variable('var_weight_state', [input_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		R_h = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_h = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate the reset gate
		output_r = tf.nn.sigmoid(tf.matmul(input_x, W_r) + tf.matmul(input_h, R_r) + b_r)

		# calculate the update gate
		output_z = tf.nn.sigmoid(tf.matmul(input_x, W_z) + tf.matmul(input_h, R_z) + b_z)

		# calculate the state
		output_h = tf.nn.tanh(tf.matmul(input_x, W_h) + tf.matmul(output_r * input_h, R_h) + b_h)

		# current output
		output = (tf.constant(1.0, shape = [input_x.shape[0].value, output_dim]) - output_z) * input_h + output_z * output_h

	return tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')


def _ktd_gru_cell(input_x,
				  input_h,
				  output_dim,
				  input_modes,
				  output_modes,
				  ktd_rank,
				  cp_ranks_W,
				  cp_ranks_R = None,
				  weights_initializer = tf.glorot_uniform_initializer,
				  weights_regularizer = None,
				  biases_initializer = tf.zeros_initializer,
				  biases_regularizer = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  name_scope = None):
	""" single GRU cell in KTD format, refer to _gru_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_h: input/state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KTD gated matmul defination
	def _ktd_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1})
		cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# recover and concat all K pairs matrices
			l_matrices = []
			for k in range(ktd_rank):
				# recover (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B}) to (m_{i}, n_{i}, r_{k}^{A}*r_{k}^{B})
				matrix_A = tf.reshape(cores[2 * ktd_rank * i + 2 * k], [input_modes[i], 1, cp_ranks_in[k], 1])
				matrix_B = tf.reshape(cores[2 * ktd_rank * i + 2 * k + 1], [1, output_modes[i], 1, cp_ranks_out[k]])
				recover = tf.reshape(tf.multiply(matrix_A, matrix_B), [input_modes[i], output_modes[i], cp_ranks_in[k] * cp_ranks_out[k]])
				l_matrices.append(recover)
			# reshape the ith CP core to (m_{i}, r^{A}*r^{B}, n_{i})
			cur_core = tf.transpose(tf.concat(l_matrices, axis = -1), [0, 2, 1])
			# contraction between input and the ith core, only m_{i} if i is even and m_{i}*r^{A}*r^{B} when i is odd
			if i % 2 == 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, r^{A}*r^{B}, n_{i})
				output = tf.einsum('bm,mrn->brn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1}, r^{A}*r^{B})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, cur_core.shape[1], output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, cur_core.shape[1], output_modes[i]])
				output = tf.transpose(output, [0, 4, 2, 1, 3])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1, cur_core.shape[1]])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1], cur_core.shape[1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
			if i % 2 != 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, n_{i})
				output = tf.einsum('bmr,mrn->bn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i]])
				output = tf.transpose(output, [0, 3, 2, 1])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
		# if the value of order is odd, there is still a vector whose dimension is r^{A}*r^{B} shall be contracted
		if d % 2 != 0:
			output = tf.matmul(tf.squeeze(output), cores[-1])
		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of reset gate
		l_W_r_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_r_core_A = tf.get_variable('var_W_reset_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_r_core_B = tf.get_variable('var_W_reset_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_r_cores.append(W_r_core_A)
				l_W_r_cores.append(W_r_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_r_core_kernel = tf.get_variable('var_W_reset_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_r_cores.append(W_r_core_kernel)
		if cp_ranks_R is not None:
			l_R_r_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_r_core_A = tf.get_variable('var_R_reset_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_r_core_B = tf.get_variable('var_R_reset_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_r_cores.append(R_r_core_A)
					l_R_r_cores.append(R_r_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_r_core_kernel = tf.get_variable('var_R_reset_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_r_cores.append(R_r_core_kernel)
		else:
			R_r = tf.get_variable('var_recurrent_reset', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_r = tf.get_variable('var_bias_reset', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of update gate
		l_W_z_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_z_core_A = tf.get_variable('var_W_update_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_z_core_B = tf.get_variable('var_W_update_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core_A)
				l_W_z_cores.append(W_z_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_z_core_kernel = tf.get_variable('var_W_update_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_z_cores.append(W_z_core_kernel)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_update_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_z_core_B = tf.get_variable('var_R_update_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_z_core_kernel = tf.get_variable('var_R_update_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_core_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_update', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_update', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate
		l_W_h_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				W_h_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				W_h_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_h_cores.append(W_h_core_A)
				l_W_h_cores.append(W_h_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_h_core_kernel = tf.get_variable('var_W_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_h_cores.append(W_h_core_kernel)
		if cp_ranks_R is not None:
			l_R_h_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_h_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_h_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_h_cores.append(R_h_core_A)
					l_R_h_cores.append(R_h_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_h_core_kernel = tf.get_variable('var_R_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_h_cores.append(R_h_core_kernel)
		else:
			R_h = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_h = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate the reset gate
		output_W_r = _ktd_gated_matmul(input_x, l_W_r_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_r')
		if cp_ranks_R is not None:
			output_R_r = _ktd_gated_matmul(input_h, l_R_r_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_r')
		else:
			output_R_r = tf.matmul(input_h, R_r, name = 'R_r')
		output_r = tf.nn.sigmoid(output_W_r + output_R_r + b_r)

		# calculate the update gate
		output_W_z = _ktd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _ktd_gated_matmul(input_h, l_R_z_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_h, R_z, name = 'R_z')
		output_z = tf.nn.sigmoid(output_W_z + output_R_z + b_z)

		# calculate the state
		output_W_h = _ktd_gated_matmul(input_x, l_W_h_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_h')
		if cp_ranks_R is not None:
			output_R_h = _ktd_gated_matmul(output_r * input_h, l_R_h_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_h')
		else:
			output_R_h = tf.matmul(output_r * input_h, R_h, name = 'R_h')
		output_h = tf.nn.tanh(output_W_h + output_R_h + b_h)

		# current output
		output = (tf.constant(1.0, shape = [input_x.shape[0].value, output_dim]) - output_z) * input_h + output_z * output_h

	return tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')


def _ktd_gru_cell_share(input_x,
						input_h,
						output_dim,
						input_modes,
						output_modes,
						ktd_rank,
						cp_ranks_W,
						cp_ranks_R = None,
						weights_initializer = tf.glorot_uniform_initializer,
						weights_regularizer = None,
						biases_initializer = tf.zeros_initializer,
						biases_regularizer = None,
						tfv_train_phase = None,
						keep_prob = 0.9,
						name_scope = None):
	""" single GRU cell in KTD format, refer to _gru_cell
	params:
		input_x: input from previous layer - [batch_size, input_dim]
		input_h: input/state from the last time - [batch_size, output_dim], is 0 for the initial time
		output_dim: dimension of the output
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		weights_initializer:
		weights_regularizer:
		biases_initializer:
		biases_regularizer:
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		name_scope:
	"""
	assert input_x.get_shape()[-1].value == np.prod(input_modes), 'Input modes must be the factors of input tensor.'
	assert output_dim == np.prod(output_modes), 'Output modes must be the factors of output tensor.'
	assert len(input_modes) == len(output_modes), 'Modes of input and output must be equal.'

	# dropout defination
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	# KTD gated matmul defination
	def _ktd_gated_matmul(input, cores, input_modes, output_modes, ktd_rank, cp_ranks_in, cp_ranks_out, d, batch_size, name):
		# reshape input to (batch_size*m_{2}*m_{3}*...*m_{d}, m_{1})
		cur_inp = tf.identity(tf.reshape(input, [batch_size, input_modes[0], -1]))
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])
		for i in range(d):
			# recover and concat all K pairs matrices
			l_matrices = []
			for k in range(ktd_rank):
				# recover (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B}) to (m_{i}, n_{i}, r_{k}^{A}*r_{k}^{B})
				matrix_A = tf.reshape(cores[2 * ktd_rank * i + 2 * k], [input_modes[i], 1, cp_ranks_in[k], 1])
				matrix_B = tf.reshape(cores[2 * ktd_rank * i + 2 * k + 1], [1, output_modes[i], 1, cp_ranks_out[k]])
				recover = tf.reshape(tf.multiply(matrix_A, matrix_B), [input_modes[i], output_modes[i], cp_ranks_in[k] * cp_ranks_out[k]])
				l_matrices.append(recover)
			# reshape the ith CP core to (m_{i}, r^{A}*r^{B}, n_{i})
			cur_core = tf.transpose(tf.concat(l_matrices, axis = -1), [0, 2, 1])
			# contraction between input and the ith core, only m_{i} if i is even and m_{i}*r^{A}*r^{B} when i is odd
			if i % 2 == 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, r^{A}*r^{B}, n_{i})
				output = tf.einsum('bm,mrn->brn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1}, r^{A}*r^{B})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, cur_core.shape[1], output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, cur_core.shape[1], output_modes[i]])
				output = tf.transpose(output, [0, 4, 2, 1, 3])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1, cur_core.shape[1]])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1], cur_core.shape[1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
			if i % 2 != 0:
				# contraction output shape is (batch_size*n_{1}*...*n_{i-1}m_{i+1}*...*m_{d}, n_{i})
				output = tf.einsum('bmr,mrn->bn', cur_inp, cur_core)
				# reshape to (batch_size*n_{1}*...*n_{i-1}*n_{i}m_{i+2}*...*m_{d}, m_{i+1})
				if i == d - 1:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), 1, -1, output_modes[i]])
				else:
					output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i]])
				output = tf.transpose(output, [0, 3, 2, 1])
				if i == d - 1:
					output = tf.reshape(output, [-1, 1])
				else:
					output = tf.reshape(output, [-1, input_modes[i + 1]])
				if i != d - 1:
					cur_inp = tf.identity(output)
		# if the value of order is odd, there is still a vector whose dimension is r^{A}*r^{B} shall be contracted
		if d % 2 != 0:
			output = tf.matmul(tf.squeeze(output), cores[-1])
		# reshape to (batch_size, n_{1}*...*n_{d})
		output = tf.reshape(output, [batch_size, -1])
		return output

	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input_x.shape[0].value

		# define variables of reset gate
		l_W_r_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_r_core_A = tf.get_variable('var_W_reset_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_r_core_B = tf.get_variable('var_W_reset_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				else:
					W_r_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_r_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_r_cores.append(W_r_core_A)
				l_W_r_cores.append(W_r_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_r_core_kernel = tf.get_variable('var_W_reset_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_r_cores.append(W_r_core_kernel)
		if cp_ranks_R is not None:
			l_R_r_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_r_core_A = tf.get_variable('var_R_reset_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_r_core_B = tf.get_variable('var_R_reset_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_r_cores.append(R_r_core_A)
					l_R_r_cores.append(R_r_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_r_core_kernel = tf.get_variable('var_R_reset_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_r_cores.append(R_r_core_kernel)
		else:
			R_r = tf.get_variable('var_recurrent_reset', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_r = tf.get_variable('var_bias_reset', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of update gate
		l_W_z_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_z_core_A = tf.get_variable('var_W_update_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_z_core_B = tf.get_variable('var_W_update_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				else:
					W_z_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_z_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_z_cores.append(W_z_core_A)
				l_W_z_cores.append(W_z_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_z_core_kernel = tf.get_variable('var_W_update_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_z_cores.append(W_z_core_kernel)
		if cp_ranks_R is not None:
			l_R_z_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_z_core_A = tf.get_variable('var_R_update_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_z_core_B = tf.get_variable('var_R_update_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_z_cores.append(R_z_core_A)
					l_R_z_cores.append(R_z_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_z_core_kernel = tf.get_variable('var_R_update_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_z_cores.append(R_z_core_kernel)
		else:
			R_z = tf.get_variable('var_recurrent_update', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_z = tf.get_variable('var_bias_update', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# define variables of state gate
		l_W_h_cores = []
		for i in range(d):
			for k in range(ktd_rank):
				# shapes are (m_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
				var_shape_A = [input_modes[i], cp_ranks_W[k]]
				var_shape_B = [output_modes[i], cp_ranks_W[k + ktd_rank]]
				if i == 0:
					W_h_core_A = tf.get_variable('var_W_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_h_core_B = tf.get_variable('var_W_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				else:
					W_h_core_A = tf.get_variable('var_W_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					W_h_core_B = tf.get_variable('var_W_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_W_h_cores.append(W_h_core_A)
				l_W_h_cores.append(W_h_core_B)
		if d % 2 != 0:
			# kernel vector with shape (r^{A}*r^{B})
			var_shape_kernel = [np.prod(np.array(cp_ranks_W[0:]), dtype = np.int32)]
			W_h_core_kernel = tf.get_variable('var_W_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
			l_W_h_cores.append(W_h_core_kernel)
		if cp_ranks_R is not None:
			l_R_h_cores = []
			for i in range(d):
				for k in range(ktd_rank):
					# shapes are (n_{i}, r_{k}^{A}) and (n_{i}, r_{k}^{B})
					var_shape_A = [output_modes[i], cp_ranks_R[k]]
					var_shape_B = [output_modes[i], cp_ranks_R[k + ktd_rank]]
					R_h_core_A = tf.get_variable('var_R_state_core_A_%d%d' % ((i + 1), (k + 1)), var_shape_A, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					R_h_core_B = tf.get_variable('var_R_state_core_B_%d%d' % ((i + 1), (k + 1)), var_shape_B, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
					l_R_h_cores.append(R_h_core_A)
					l_R_h_cores.append(R_h_core_B)
			if d % 2 != 0:
				# kernel vector with shape (r^{A}*r^{B})
				var_shape_kernel = [np.prod(np.array(cp_ranks_R[0:]), dtype = np.int32)]
				R_h_core_kernel = tf.get_variable('var_R_state_core_kernel', var_shape_kernel, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
				l_R_h_cores.append(R_h_core_kernel)
		else:
			R_h = tf.get_variable('var_recurrent_state', [output_dim, output_dim], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)
		b_h = tf.get_variable('var_bias_state', [output_dim], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)

		# calculate the reset gate
		output_W_r = _ktd_gated_matmul(input_x, l_W_r_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_r')
		if cp_ranks_R is not None:
			output_R_r = _ktd_gated_matmul(input_h, l_R_r_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_r')
		else:
			output_R_r = tf.matmul(input_h, R_r, name = 'R_r')
		output_r = tf.nn.sigmoid(output_W_r + output_R_r + b_r)

		# calculate the update gate
		output_W_z = _ktd_gated_matmul(input_x, l_W_z_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_z')
		if cp_ranks_R is not None:
			output_R_z = _ktd_gated_matmul(input_h, l_R_z_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_z')
		else:
			output_R_z = tf.matmul(input_h, R_z, name = 'R_z')
		output_z = tf.nn.sigmoid(output_W_z + output_R_z + b_z)

		# calculate the state
		output_W_h = _ktd_gated_matmul(input_x, l_W_h_cores, input_modes, output_modes, ktd_rank, cp_ranks_W[0:ktd_rank], cp_ranks_W[ktd_rank:], d, batch_size, 'W_h')
		if cp_ranks_R is not None:
			output_R_h = _ktd_gated_matmul(output_r * input_h, l_R_h_cores, output_modes, output_modes, ktd_rank, cp_ranks_R[0:ktd_rank], cp_ranks_R[ktd_rank:], d, batch_size, 'R_h')
		else:
			output_R_h = tf.matmul(output_r * input_h, R_h, name = 'R_h')
		output_h = tf.nn.tanh(output_W_h + output_R_h + b_h)

		# current output
		output = (tf.constant(1.0, shape = [input_x.shape[0].value, output_dim]) - output_z) * input_h + output_z * output_h

	return tf.nn.dropout(output, keep_prob = dropout_rate(keep_prob), name = 'dropout')


def gru_layer(input_seq,
			  hidden_dim,
			  tfv_train_phase = None,
			  keep_prob = 0.9,
			  initializer = tf.glorot_uniform_initializer,
			  reverse = False,
			  name_scope = None):
	""" GRU layer, there are num_seq (the last dim of input) GRU cells (_gru_cell) in a GRU layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of GRU units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in GRU unit(input dimension is input_dim)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, h
		init_h = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq GRU cells
		l_outputs = []
		cur_h = init_h
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			cur_h = _gru_cell(cur_x, cur_h, hidden_dim, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'gru_cell')
			l_outputs.append(tf.expand_dims(cur_h, -1))

	return tf.concat(l_outputs, axis = -1)


def ktd_gru_layer(input_seq,
				  hidden_dim,
				  input_modes,
				  output_modes,
				  ktd_rank,
				  cp_ranks_W,
				  cp_ranks_R = None,
				  tfv_train_phase = None,
				  keep_prob = 0.9,
				  flag_share = False,
				  initializer = tf.glorot_uniform_initializer,
				  reverse = False,
				  name_scope = None):
	""" GRU layer in KTD format, refer to gru_layer
	params:
		input_seq: input sequence, 3rd-order tensor normally - [batch_size, input_dim, num_seq], num_seq is the number of GRU units
		hidden_dim: dimension of hidden layer, i.e., output dimension of weight matrix in GRU unit(input dimension is input_dim)
		input_modes: factorization of input_dim
		output_modes: factorization of output_dim
		ktd_rank: KTD rank
		cp_ranks_W: totally 2*ktd_rank CP ranks at the input side (top ktd_rank ranks along m, last ktd_rank ranks along n)
		cp_ranks_R: totally 2*ktd_rank CP ranks at the recurrent side (top ktd_rank ranks along n, last ktd_rank ranks along n)
		tfv_train_phase: flag of whether is training
		keep_prob: keeping probability of dropout
		flag_share: whether to share weights
		initializer: weights initializer
		reverse: True if the sequence of LSTM is reversed
		name_scope:
	"""
	with tf.variable_scope(name_scope):
		batch_size = input_seq.shape[0].value
		input_dim = input_seq.shape[1].value
		num_seq = input_seq.shape[-1].value

		# initial states, h
		init_h = tf.zeros([batch_size, hidden_dim])

		# sequential data into num_seq GRU cells
		l_outputs = []
		cur_h = init_h
		for i in range(num_seq):
			cur_x = tf.gather(input_seq, i, axis = -1)
			if flag_share:
				cur_h = _ktd_gru_cell_share(cur_x, cur_h, hidden_dim, input_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_gru_cell')
			else:
				cur_h = _ktd_gru_cell(cur_x, cur_h, hidden_dim, input_modes, output_modes, ktd_rank, cp_ranks_W, cp_ranks_R, tfv_train_phase = tfv_train_phase, keep_prob = keep_prob, name_scope = 'ktd_gru_cell')
			l_outputs.append(tf.expand_dims(cur_h, -1))

	return tf.concat(l_outputs, axis = -1)
