import numpy as np
import tensorflow as tf
import t3f

import Operators


# 全连接层
def linear(input,
		   output_size,
		   weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
		   weights_regularizer = None,
		   biases_initializer = tf.zeros_initializer,
		   biases_regularizer = None,
		   name_scope = None):
	""" 全连接层
	参数：
		input: 输入张量，2维 - [batch_size, input_size]
		output_size: 输出维数
		weights_initializer: 权重初始化器
		weights_regularizer: 权重正则化器
		biases_initializer: 偏置项初始化器
		biases_regularizer: 偏置项正则化器
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		input_size = input.get_shape()[-1].value
		tfv_weights = tf.get_variable('var_weights', [input_size, output_size], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

		output = tf.matmul(input, tfv_weights, name = 'output_mal')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')

	return output


# TT全连接层
def tt_linear(input,
			  output_shape,
			  tt_ranks = None,
			  weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
			  weights_regularizer = None,
			  biases_initializer = tf.zeros_initializer,
			  biases_regularizer = None,
			  name_scope = None):
	""" TT全连接层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		output_shape: 输出的TT张量各阶模值，长度与输入张量TT核数相等
		tt_ranks: 权重TT秩的队列，长度为输入张量TT核减1，为None则根据input的shape与output_shape取r_1截断秩
		weights_initializer: 权重初始化器
		weights_regularizer: 权重正则化器
		biases_initializer: 偏置项初始化器
		biases_regularizer: 偏置项正则化器
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		n_dim = len(input)

		# 确定tt_ranks
		if tt_ranks is None:
			# 求最小的r_1
			modes = []
			for i in range(n_dim):
				modes.append(input[i].shape[3].value * output_shape[i])
			v = np.min(np.array(modes, dtype = np.uint8))
			tt_ranks = [v for i in range(n_dim - 1)]
		tt_ranks = [1] + tt_ranks + [1]

		# 定义n_dim个权重TT核张量
		output = []
		for i in range(n_dim):
			var_shape = [tt_ranks[i], input[i].shape[3].value, output_shape[i], tt_ranks[i + 1]]
			tfv_weights = tf.get_variable('var_weights_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

			# 计算TT核之间的c_product
			i_output = Operators.c_product(input[i], tfv_weights, name = 'output_c_product_%d' % (i + 1))
			if biases_initializer is not None:
				tfv_biases = tf.get_variable('var_biases_%d' % (i + 1), [i_output.shape[-1].value], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
				i_output = tf.add(i_output, tfv_biases, name = 'output_add_%d' % (i + 1))
			output.append(i_output)

	return output


# TT激活函数层
def tt_relu(input,
			name_scope = None):
	""" TT激活函数层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		n_dim = len(input)
		output = []

		for i in range(n_dim):
			output.append(tf.nn.relu(input[i], name = 'output_relu_%d' % (i + 1)))

	return output


# TT dropout层
def tt_dropout(input,
			   keep_prob,
			   name_scope = None):
	""" TT dropout层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		keep_prob: 神经元保存率
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		n_dim = len(input)
		output = []

		for i in range(n_dim):
			output.append(tf.nn.dropout(input[i], keep_prob, name = 'output_dropout_%d' % (i + 1)))

	return output


# TT rounding层
def tt_rounding(input,
				max_tt_rank = None,
				name_scope = None):
	""" TT rounding层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		max_tt_rank: rounding的目标最大TT秩，标量，为None则根据input的shape取最大r_i截断秩
		name_scope: 本层名称
	"""
	print('Warning! The rounding algorithm is hardly to train so avoid to use this function!')
	with tf.variable_scope(name_scope):
		n_dim = len(input)

		# 确定max_tt_rank
		if max_tt_rank is None:
			# 求最大的r_i
			modes = []
			for i in range(n_dim):
				modes.append(input[i].shape[2].value * input[i].shape[3].value)
			max_tt_rank = np.min(np.array(modes, dtype = np.uint8))

		# rounding降秩，在cpu下
		with tf.device('/cpu:0'):
			# TT正交化，QR梯度没有实现，所以屏蔽
			#orth_cores = []
			#for i in range(n_dim - 1, 0, -1):
			#	cur_core = tf.reshape(input[i], [input[i].shape[0].value, input[i].shape[1].value, input[i].shape[2].value * input[i].shape[3].value * input[i].shape[4].value])
			#	r, q = Operators.row_qr(cur_core, name = 'row_qr_%d' % (i + 1))
			#	orth_cores.append(tf.reshape(q, [input[i].shape[0].value, -1, input[i].shape[2].value, input[i].shape[3].value, input[i].shape[4].value]))
			#	input[i - 1] = tf.einsum('bimnj,bjk->bimnk', input[i - 1], r)
			#orth_cores.append(input[0])
			#orth_cores.reverse()

			# SVD降秩
			l_u = []
			l_s = []
			l_v = []
			for i in range(n_dim - 1):
				matrix_shape = [input[i].shape[0].value, input[i].shape[1].value * input[i].shape[2].value * input[i].shape[3].value, input[i].shape[4].value]
				u, s, v = Operators.truancated_svd(tf.reshape(input[i], matrix_shape), max_tt_rank, name = 'truancated_svd_%d' % (i + 1))
				u = tf.reshape(u, [u.shape[0].value, input[i].shape[1].value, input[i].shape[2].value, input[i].shape[3].value, max_tt_rank])
				l_u.append(u)
				l_s.append(s)
				l_v.append(v)
			output = []
			output.append(l_u[0])
			for i in range(1, n_dim - 1):
				right_matrix = tf.einsum('bmn,bnk->bmk', tf.transpose(l_v[i - 1], [0, 2, 1]), tf.transpose(l_s[i - 1], [0, 2, 1]))
				cur_core = tf.einsum('brmnt,brk->bkmnt', l_u[i], right_matrix)
				output.append(cur_core)
			right_matrix = tf.einsum('bmn,bnk->bmk', tf.transpose(l_v[-1], [0, 2, 1]), tf.transpose(l_s[-1], [0, 2, 1]))
			last_core = tf.einsum('brmnt,brk->bkmnt', input[-1], right_matrix)
			output.append(last_core)

	return output


# TT降秩层
def tt_reduced_rank(input,
					max_tt_rank = None,
					name_scope = None):
	""" TT降秩层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		max_tt_rank: 目标最大TT秩，标量，为None则根据input的shape取最大r_i截断秩
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		n_dim = len(input)

		# 确定max_tt_rank
		if max_tt_rank is None:
			# 求最大的r_i
			modes = []
			for i in range(n_dim):
				modes.append(input[i].shape[2].value * input[i].shape[3].value)
			max_tt_rank = np.min(np.array(modes, dtype = np.uint8))

		# 定义n_dim个降秩矩阵
		output = []
		reducer_initializer = tf.contrib.layers.xavier_initializer(uniform = False)
		for i in range(n_dim):
			if i == 0:
				var_shape = [input[i].shape[-1].value, max_tt_rank]
				tfv_reducer = tf.get_variable('var_reducer_%d' % (i * 2 + 1), var_shape, initializer = reducer_initializer, trainable = True)
				i_output = tf.einsum('bimnj,jk->bimnk', input[i], tfv_reducer)
				output.append(i_output)
			elif i == n_dim - 1:
				var_shape = [input[i].shape[1].value, max_tt_rank]
				tfv_reducer = tf.get_variable('var_reducer_%d' % (i * 2), var_shape, initializer = reducer_initializer, trainable = True)
				i_output = tf.einsum('bimnj,ik->bkmnj', input[i], tfv_reducer)
				output.append(i_output)
			else:
				var_shape_left = [input[i].shape[1].value, max_tt_rank]
				tfv_reducer_left = tf.get_variable('var_reducer_%d' % (i * 2), var_shape_left, initializer = reducer_initializer, trainable = True)
				var_shape_right = [input[i].shape[-1].value, max_tt_rank]
				tfv_reducer_right = tf.get_variable('var_reducer_%d' % (i * 2 + 1), var_shape_right, initializer = reducer_initializer, trainable = True)
				i_output = tf.einsum('bimnj,ik->bkmnj', input[i], tfv_reducer_left)
				i_output = tf.einsum('bimnj,jk->bimnk', i_output, tfv_reducer_right)
				output.append(i_output)

	return output


# TT缩并层
def tt_contract(input,
				name_scope = None):
	""" TT缩并层
	参数：
		input: 输入张量，多个TT核的队列 - [[batch_size, input_core_1], [batch_size, input_core_2], ...]
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		n_dim = len(input)

		output = input[0]
		for i in range(1, n_dim):
			output = Operators.strong_kronecker(output, input[i], name = 'output_strong_kronecker_%d' % (i + 1))
		output = tf.reshape(output, [output.shape[0].value, -1])

	return output
