import numpy as np
import tensorflow as tf
import t3f


# 向量转换为TT层
def vectors_to_tt(input,
				  modes,
				  tt_ranks,
				  name_scope = None):
	""" 向量转换为TT层
	参数：
		input: 输入向量
		modes: 输入向量维数分解的modes，其积必须等于输入向量的大小
		tt_ranks: 预设的TT秩，长度+1后必须等于modes的长度
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		tt_shape = [modes, None]
		tt_input = t3f.to_tt_matrix(input, tt_shape, [1] + tt_ranks + [1])

	return tt_input._tt_cores


# 全连接层
def linear(input,
		   output_size,
		   weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
		   weights_regularizer = None,
		   biases_initializer = tf.zeros_initializer,
		   biases_regularizer = None,
		   name_scope = None):
	""" 全连接层
	参数：
		input: 输入张量，2维 - [batch_size, input_size]
		output_size: 输出维数
		weights_initializer: 权重初始化器
		weights_regularizer: 权重正则化器
		biases_initializer: 偏置项初始化器
		biases_regularizer: 偏置项正则化器
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		input_size = input.get_shape()[-1].value
		tfv_weights = tf.get_variable('var_weights', [input_size, output_size], initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

		output = tf.matmul(input, tfv_weights, name = 'output_mal')
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')

	return output


# TT全连接层
def tt_linear(input,
			  output_size,
			  input_modes,
			  output_modes,
			  tt_ranks,
			  weights_initializer = tf.contrib.layers.xavier_initializer(uniform = False),
			  weights_regularizer = None,
			  biases_initializer = tf.zeros_initializer,
			  biases_regularizer = None,
			  tfv_train_phase = None,
			  name_scope = None):
	""" TT全连接层
	参数：
		input: 输入张量，2阶 - [batch_size, input_size]
		output_size: 输出维数
		input_modes: 输入张量维数分解的modes，其积必须等于输入张量的input_size
		output_modes: 输出张量维数分解的modes，其积必须等于输出张量的output_size
		tt_ranks: 预设的TT秩，长度+1后必须等于input_modes或output_modes的长度
		weights_initializer: 权重初始化器
		weights_regularizer: 权重正则化器
		biases_initializer: 偏置项初始化器
		biases_regularizer: 偏置项正则化器
		tfv_train_phase: 是否训练标记
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		d = len(input_modes)
		batch_size = input.shape[0].value
		l_tt_ranks = [1] + tt_ranks + [1]

		# 先将input重构为:(batch_size*m_{2}*m_{3}*...*m_{d}, m_{1}*r_{0})，注意r_{0}=1
		cur_inp = tf.reshape(input, [batch_size, input_modes[0], -1])
		cur_inp = tf.transpose(cur_inp, [0, 2, 1])
		cur_inp = tf.reshape(cur_inp, [-1, cur_inp.shape[-1].value])

		# TT weights，定义及缩并
		input_modes.append(1)
		for i in range(d):
			# core的shape为:(r_{k-1}*m_{k}, n_{k}*r_{k})
			var_shape = [l_tt_ranks[i] * input_modes[i], output_modes[i] * l_tt_ranks[i + 1]]
			tfv_weight_core = tf.get_variable('var_weight_core_%d' % (i + 1), var_shape, initializer = weights_initializer, regularizer = weights_regularizer, trainable = True)

			# 计算输入和当前core的乘积，链式，结果shape为:(batch_size*n_{1}*...*n_{k-1}m_{k+1}...*m_{d}, n_{k}*r_{k})
			output = tf.matmul(cur_inp, tfv_weight_core, name = 'output_mal_core_%d' % (i + 1))
			
			# 输出先重构shape为:(batch_size*n_{1}*...*n_{k-1}, m_{k+1}, m_{k+2}...*m_{d}, n_{k}, r_{k})
			output = tf.reshape(output, [batch_size * np.prod(np.array(output_modes[0:i]), dtype = np.int32), input_modes[i + 1], -1, output_modes[i], l_tt_ranks[i + 1]])

			# 调换m_{k+1}和n_{k}，重构输出shape为: (batch_size*n_{1}*...*n_{k-1}n_{k}m_{k+2}...*m_{d}, m_{k+1}*r_{k})
			output = tf.transpose(output, [0, 3, 2, 1, 4])
			output = tf.reshape(output, [-1, output.shape[-2].value * output.shape[-1].value])
			if i != d - 1:
				cur_inp = tf.identity(output)
		output = tf.reshape(tf.squeeze(output), [batch_size, -1])

		# biases，定义
		if biases_initializer is not None:
			tfv_biases = tf.get_variable('var_biases', [output_size], initializer = biases_initializer, regularizer = biases_regularizer, trainable = True)
			output = tf.add(output, tfv_biases, name = 'output_add')

	return output


# TT加速全连接层
def acc_linear(input,
			   input_modes,
			   output_modes,
			   tt_ranks,
			   name_scope = None):
	""" TT加速全连接层
	参数：
		input: 输入TT
		input_modes: 输入维数分解的modes
		output_modes: 输出维数分解的modes
		tt_ranks: 预设的TT秩，长度+1后必须等于input_modes或output_modes的长度
		name_scope: 本层名称
	"""
	with tf.variable_scope(name_scope):
		d = len(input_modes)
		l_tt_ranks = [1] + tt_ranks + [1]

		# 变量定义与TT核相乘
		l_new_cores = []
		for i in range(d):
			# core的shape为:(r_{k-1}*m_{k}, n_{k}*r_{k})
			var_shape = [l_tt_ranks[i] * input_modes[i], output_modes[i] * l_tt_ranks[i + 1]]
			tfv_weight_core = tf.get_variable('var_weight_core_%d' % (i + 1), var_shape, initializer = tf.contrib.layers.xavier_initializer(uniform = False))

			# 输入核与权重核相乘
			new_core = tf.einsum('imj,pmnq->ipnjq', tf.squeeze(input[i], axis = [2]), tf.reshape(tfv_weight_core, [l_tt_ranks[i], input_modes[i], output_modes[i], l_tt_ranks[i + 1]]))
			l_new_cores.append(tf.reshape(new_core, [new_core.shape[0].value * new_core.shape[1].value, -1, new_core.shape[-2].value * new_core.shape[-1].value]))

		# TT核缩并为向量
		output = l_new_cores[0]
		for i in range(1,d):
			output = tf.einsum('imj,jnk->imnk', output, l_new_cores[i])
			output = tf.reshape(output, [output.shape[0].value, -1, output.shape[-1].value])
		output = tf.squeeze(output, name = 'output_mal')

		# 偏执项
		tfv_biases = tf.get_variable('var_biases', [output.shape[0].value], initializer = tf.zeros_initializer)
		output = tf.add(output, tfv_biases, name = 'output_add')

	return output
