import math
import numpy as np
import tensorflow as tf
import t3f


def riemannian_sgd(ph_input_weight, tt_shape, tt_rank, lr, layer_name):
	# svd should be executed on cpu
	with tf.device('/cpu:0'):
		with tf.variable_scope('riemannian' + '_' + layer_name):
			tt_rank = [1] + tt_rank + [1]

			# change ph_input_weight to matrix form
			conv_matrix = tf.reshape(tf.transpose(ph_input_weight, [0,2,1,3]), [np.prod(tt_shape[0]), np.prod(tt_shape[1])])

			# input weight to tt, max rank is the matrix full rank
			tt_input = t3f.to_tt_matrix(conv_matrix, tt_shape, np.min((np.prod(tt_shape[0]), np.prod(tt_shape[1]))))

			# target tensor
			initializer_manifold = t3f.glorot_initializer(tt_shape, tt_rank = tt_rank)
			tt_manifold = t3f.get_variable('manifold', initializer = initializer_manifold)

			# SGD
			gradF = tt_manifold - tt_input
			riemannian_grad = t3f.riemannian.project(gradF, tt_manifold)
			train_step = t3f.assign(tt_manifold, t3f.round(tt_manifold - lr * riemannian_grad, max_tt_rank = tt_rank))
			lossF = 0.5 * t3f.frobenius_norm_squared(tt_manifold - tt_input)

		return list(tt_manifold._tt_cores), [lossF, train_step.op]


def run_manifold_ops(sess, l_tts, l_ops, dict_feeder):
	pre_loss = 0.0
	while True:
		loss, _ = sess.run(l_ops, dict_feeder)
		print('Current loss: %lf.' % loss)
		diff = math.fabs(loss - pre_loss)
		pre_loss = loss
		if diff < 0.1:
			break

	l_tt_data = sess.run(l_tts)
	return l_tt_data
