# 该脚本用于压缩已训练好的网络模型
import os
import imp
import math

import tensorflow as tf
import numpy as np

import Operators


# 模型/程序的全局参数
flags = tf.app.flags
FLAGS = flags.FLAGS

# 模型选择
flags.DEFINE_string('flag_net_module', './ModuleMnist.py', 'Module selection with specific dataset.')

# 全局参数，tf的log文件夹位置
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')


# 运行TT黎曼优化
def run_vars_and_ops(sess, l_tts, l_ops, dict_feeder, name_cur_var):
	pre_loss = 0.0
	while True:
		loss, norm, _ = sess.run(l_ops, dict_feeder)
		a_err = math.fabs(loss - pre_loss)
		err = a_err / norm
		pre_loss = loss
		print('Error of %s is %f and the absolute error is %f' % (name_cur_var, err, a_err))
		if err < 1e-2:
			break

	l_tt_data = sess.run(l_tts)
	return l_tt_data


# 压缩变量
def compress_var(sess, var_value, var_name, n_dim):
	l_var_subnames = var_name.split('/')
	l_var_tt_names = []
	l_full_shape = []
	tt_shape = []
	max_rank = 0
	ttm_rank = 0

	# 不压缩的最外层linear
	if l_var_subnames[1] == 'linear_out':
		var_tt_name = l_var_subnames[0] + '_tt' + '/' + l_var_subnames[1] + '/' + l_var_subnames[2]
		for var in tf.global_variables():
			if var.name == var_tt_name:
				sess.run(tf.assign(var, var_value))
				return None

	# 权重矩阵
	elif l_var_subnames[-1].split(':')[0] == 'var_weights':
		# 对应的TT核变量名和尺寸
		for i in range(n_dim):
			var_tt_name = l_var_subnames[0] + '_tt' + '/' + l_var_subnames[1] + '/' + 'var_weights_%d' % (i + 1) + ':' + l_var_subnames[-1].split(':')[-1]
			l_var_tt_names.append(var_tt_name)
			core_shape = tf.get_default_graph().get_tensor_by_name(var_tt_name).shape
			if len(core_shape) < 4:
				if i == 0:
					core_shape = [1, core_shape[0].value, core_shape[1].value, core_shape[2].value]
				else:
					core_shape = [core_shape[0].value, core_shape[1].value, core_shape[2].value, 1]
			else:
				core_shape = [core_shape[0].value, core_shape[1].value, core_shape[2].value, core_shape[3].value]
			l_full_shape.append(core_shape)

	# 偏置向量
	elif l_var_subnames[-1].split(':')[0] == 'var_biases':
		# 对应的TT核变量名和尺寸
		l_var_subnames[1] = 'bias_' + l_var_subnames[1].split('_')[-1]
		for i in range(n_dim):
			var_tt_name = l_var_subnames[0] + '_tt' + '/' + l_var_subnames[1] + '/' + 'var_biases_%d' % (i + 1) + ':' + l_var_subnames[-1].split(':')[-1]
			l_var_tt_names.append(var_tt_name)
			core_shape = tf.get_default_graph().get_tensor_by_name(var_tt_name).shape
			if len(core_shape) < 3:
				if i == 0:
					core_shape = [1, 1, core_shape[0].value, core_shape[1].value]
				else:
					core_shape = [core_shape[0].value, 1, core_shape[1].value, 1]
			else:
				core_shape = [core_shape[0].value, 1, core_shape[1].value, core_shape[2].value]
			l_full_shape.append(core_shape)


	# 计算tt_shape，max_rank，ttm_rank
	l_left = []
	l_right = []
	l_ranks = []
	for i in range(n_dim):
		l_left.append(l_full_shape[i][1])
		l_right.append(l_full_shape[i][2])
		l_ranks.append(l_full_shape[i][-1])
	tt_shape.append(l_left)
	tt_shape.append(l_right)
	if np.multiply.reduce(l_left) == 1:
		max_rank = int(math.sqrt(np.multiply.reduce(l_right)))
	else:
		max_rank = np.max(np.array([np.multiply.reduce(l_left), np.multiply.reduce(l_right)]))
	ttm_rank = np.max(np.array(l_ranks))

	# 待TT化的原始变量数据placeholder
	tfph_var = tf.placeholder(dtype = tf.float32, shape = var_value.shape, name = 'ph_var')

	# 原始变量TT化的计算
	l_tts, l_ops = Operators.riemannian(tfph_var, tt_shape, max_rank, ttm_rank, name = l_var_subnames[1])
	sess.run(tf.variables_initializer(l_tts))
	dict_feeder = {tfph_var : var_value}
	l_tt_data = run_vars_and_ops(sess, l_tts, l_ops, dict_feeder, l_var_subnames[1] + '/' + l_var_subnames[2])

	# TT变量赋值
	for i in range(n_dim):
		for var in tf.global_variables():
			if var.name == l_var_tt_names[i]:
				sess.run(tf.assign(var, np.squeeze(l_tt_data[i])))
				break

	return None


def run_compressing(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)
	l_ori_vars = []
	step = 0

	# 该图中，读取原始模型变量值，存入numpy数组
	with tf.Graph().as_default(), tf.device('/cpu:0'):				
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))

		# 读取原始模型的变量
		tfob_reader = tf.train.import_meta_graph(str_restore_ckpt +'.meta')
		tfob_reader.restore(tfob_sess, str_restore_ckpt)
		l_var_names = network.get_network_names()

		for i in range(len(l_var_names)):
			var = tfob_sess.run(l_var_names[i])
			l_ori_vars.append(var)
		step = tfob_sess.run('var_global_step:0')

		tfob_sess.close()

	tf.reset_default_graph()

	# 该图中，定义TT网络，将原始模型变量值优化为TT格式并更新变量
	with tf.Graph().as_default(), tf.device('/cpu:0'):
		dict_dataset, dict_mean_std = network.interface_get_dataset(True)
		dict_phs, loss_v, eval_v = network.get_network_tt(dict_mean_std, 1)

		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))

		# 初始化TT模型的变量
		l_vars = tf.global_variables()
		tfob_saver = tf.train.Saver(l_vars)
		tfob_sess.run(tf.global_variables_initializer())

		# 压缩变量
		for i in range(len(l_var_names)):
			compress_var(tfob_sess, l_ori_vars[i], l_var_names[i], network.get_tensor_order())
			print('Original variable: %s has been compressed.' % l_var_names[i])

		# 保存模型
		new_log_dir = FLAGS.flag_log_dir + '/tt'
		if os.path.exists(new_log_dir) is False:
			os.mkdir(new_log_dir)
		str_checkpoint_path = os.path.join(new_log_dir, 'model.ckpt')
		tfob_saver.save(tfob_sess, str_checkpoint_path, global_step = step)


def main(_):
	str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir)
	if str_last_ckpt is not None:
		run_compressing(str_last_ckpt)
	else:
		print('There is no corresponding trained network.')

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
