import tensorflow as tf


# 行QR分解
def row_qr(inp_matrix,
		   name):
	""" 行QR分解
		输入一个矩阵，作行QR分解，得到三角阵和正交阵
	参数：
		inp_matrix: 输入矩阵G，[r, n]或[batch_size, r, n]
		name: 本次计算的名字
	返回：
		out_r: 输出的三角阵，[r, r]或[batch_size, r, r]
		out_q: 输出的正交阵，[r, n]或[batch_size, r, n]
	"""
	with tf.name_scope(name, 'row_qr'):
		# 先将inp_matrix转置
		if len(inp_matrix.shape) > 2:
			t_matrix = tf.transpose(inp_matrix, [0, 2, 1])
		else:
			t_matrix = tf.transpose(inp_matrix, [1, 0])

		# 计算QR分解
		out_q, out_r = tf.qr(t_matrix)

		# 恢复为行QR结构
		if len(inp_matrix.shape) > 2:
			out_q = tf.transpose(out_q, [0, 2, 1])
			out_r = tf.transpose(out_r, [0, 2, 1])
		else:
			out_q = tf.transpose(out_q, [1, 0])
			out_r = tf.transpose(out_r, [1, 0])

	return out_r, out_q


# 截断SVD
def truancated_svd(inp_matrix,
		rank,
		name):
	""" 截断SVD
	参数：
		inp_matrix: 输入矩阵G，[m, n]或[batch_size, m, n]
		rank: 截断SVD的秩
		name: 本次计算的名字
	返回：
		out_u: 左基U，[m, r]或[batch_size, m, r]
		out_s: 奇异阵，[r, r]或[batch_size, r, r]
		out_v: 右基V，[r, n]或[batch_size, r, n]
	"""
	with tf.name_scope(name, 'truancated_svd'):
		# 先计算完整尺寸SVD
		s, u, v = tf.svd(inp_matrix)

		# 按rank截断
		if len(inp_matrix.shape) > 2:
			v = tf.transpose(v, [0, 2, 1])
			out_u = tf.slice(u, [0, 0, 0], [u.shape[0].value, u.shape[1].value, rank])
			out_s = tf.slice(tf.matrix_diag(s), [0, 0, 0], [s.shape[0].value, rank, rank])
			out_v = tf.slice(v, [0, 0, 0], [v.shape[0].value, rank, v.shape[2].value])
		else:
			v = tf.transpose(v, [1, 0])
			out_u = tf.slice(u, [0, 0], [u.shape[0].value, rank])
			out_s = tf.slice(tf.matrix_diag(s), [0, 0], [rank, rank])
			out_v = tf.slice(v, [0, 0], [rank, v.shape[1].value])

	return out_u, out_s, out_v


# C-Product(核缩并积)
def c_product(inp_tensor_left,
			  inp_tensor_right,
			  name):
	""" C-Product(核缩并积)
		输入两个4阶张量，左乘因子张量的第3阶和右乘因子张量的第2阶可以缩并(矩阵乘法)
		可借助einsum实现
	参数：
		inp_tensor_left: 输入张量A，[batch_size, r_1^(A), m, n, r_2^(A)]
		inp_tensor_right: 输入张量B，[r_1^(B), n, l, r_2^(B)]
		name: 本次计算的名字
	返回：
		output_tensor: 输出张量C，[batch_size, r_1^(A)r_1^(B), m, l, r_2^(A)r_2^(B)]
	"""
	assert len(inp_tensor_left.shape) == 5 and len(inp_tensor_right.shape) == 4, 'Input tensors must be 4th-order except the mode of batch_size.'
	assert inp_tensor_left.shape[3].value == inp_tensor_right.shape[1].value, 'Input left tensor should must be contracted by the right one.'

	with tf.name_scope(name, 'c_product'):
		output_tensor = tf.einsum('bimnj,pnlq->bipmljq', inp_tensor_left, inp_tensor_right)
		out_shape = [output_tensor.shape[0].value,
			  output_tensor.shape[1].value * output_tensor.shape[2].value,
			  output_tensor.shape[3].value,
			  output_tensor.shape[4].value,
			  output_tensor.shape[5].value * output_tensor.shape[6].value]
		output_tensor = tf.reshape(output_tensor, out_shape)

	return output_tensor


# 强克罗内克积
def strong_kronecker(inp_tensor_left,
					 inp_tensor_right,
					 name):
	""" 强克罗内克积
		输入两个4阶张量，左乘因子张量的最后一阶和右乘因子张量的第一阶可以缩并
		可借助einsum实现
	参数：
		inp_tensor_left: 输入张量A，[batch_size, r_1, m_1, n_1, r_2]
		inp_tensor_right: 输入张量B，[batch_size, r_2, m_2, n_2, r_3]
		name: 本次计算的名字
	返回：
		output_tensor: 输出张量C，[batch_size, r_1, m_1m_2, n_1n_2, r_3]
	"""
	assert len(inp_tensor_left.shape) == 5 and len(inp_tensor_right.shape) == 5, 'Input tensors must be 4th-order except the mode of batch_size.'
	assert inp_tensor_left.shape[-1].value == inp_tensor_right.shape[1].value, 'Input left tensor should must be contracted by the right one.'

	with tf.name_scope(name, 'c_product'):
		output_tensor = tf.einsum('bimnj,bjpqk->bimpnqk', inp_tensor_left, inp_tensor_right)
		out_shape = [output_tensor.shape[0].value,
			   output_tensor.shape[1].value,
			   output_tensor.shape[2].value * output_tensor.shape[3].value,
			   output_tensor.shape[4].value * output_tensor.shape[5].value,
			   output_tensor.shape[6].value]
		output_tensor = tf.reshape(output_tensor, out_shape)

	return output_tensor
