import os
import imp
import sys
import h5py
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import SVD


# global params
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('flag_net_module', './CIFARModuleVGG14.py', 'Module selection with specific dataset.')
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')
flags.DEFINE_string('flag_log_dir_tt', './log/svd', 'Directory to put log files of svd approximation.')
flags.DEFINE_integer('flag_layer', '12', 'The layer to be approximated, begin at 1 since the first layer (0) is not compressed.')
flags.DEFINE_integer('flag_batch_size', 100, 'Batch size for testing.')
flags.DEFINE_integer('flag_max_epochs', 50, 'Maximum number of epochs to fine tune.')
flags.DEFINE_float('flag_learning_rate', 0.0003, 'Learning rate to define the momentum optimizer for fune tuning.')


# calculate average gradients from multi-GPUs
def average_gradients(tower_grads):
	average_grads = []
	for grad_and_vars in zip(*tower_grads):
		grads = []
		for g, _ in grad_and_vars:
			expanded_g = tf.expand_dims(g, 0)
			grads.append(expanded_g)
		grad = tf.concat(grads, 0)
		grad = tf.reduce_mean(grad, 0)
		grad_and_var = (grad, grad_and_vars[0][1])
		average_grads.append(grad_and_var)
	return average_grads


# read convolutional kernels from the trained model, then approx them and store in TT array
def run_svd(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)

	# read the trained model
	with tf.Graph().as_default(), tf.device('/cpu:0'):
		dict_dataset, dict_mean_std = network.get_dataset()
		dict_inputs_batches = network.construct_batch_part(dict_mean_std, FLAGS.flag_batch_size)
		dict_phs = dict_inputs_batches['input_placeholders']
		t_labels = dict_inputs_batches['batches']['batch_train_labels']
		v_labels = dict_inputs_batches['batches']['batch_validation_labels']
		t_data = dict_inputs_batches['batches']['batch_train_data']
		v_data = dict_inputs_batches['batches']['batch_validation_data']

		tfv_train_phase = tf.Variable(False, trainable = False, name = 'var_train_phase', dtype = tf.bool, collections = [])
		loss_t, eval_t, loss_v, eval_v = network.get_network_output(0, t_data, t_labels, v_data, v_labels, tfv_train_phase)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_saver.restore(tfob_sess, str_restore_ckpt)
		
		# read conv kernels
		lst_conv_name = []
		for var in tf.trainable_variables():
			if 'var_filter' in var.name:
				lst_conv_name.append(var.name)
		
		# svd approx except the 1st conv
		var_name = lst_conv_name[FLAGS.flag_layer]
		print('Begin to approx %s.' % var_name)
		dot = var_name.find('.')
		dic_name = var_name[dot - 1 : dot + 2]
		input_modes = network.dict_shaperank[dic_name][0]
		output_modes = network.dict_shaperank[dic_name][1]
		tt_ranks = network.dict_shaperank[dic_name][2]
		conv_data = tfob_sess.run(var_name)
		tfph_conv_data = tf.placeholder(dtype = tf.float32, shape = [3, 3, np.prod(input_modes), np.prod(output_modes)], name = 'ph_conv_data')
		l_tts = SVD.tt_svd(tfph_conv_data, [[3] + input_modes, [3] + output_modes], tt_ranks, dic_name)
		l_new_vars = [v for v in tf.global_variables() if 'riemannian' + '_' + dic_name in v.name]
		for var in l_new_vars:
			tfob_sess.run(var.initializer)
		dict_feeder = {tfph_conv_data : conv_data}
		l_tt_data = tfob_sess.run(l_tts, dict_feeder)
		for i in range(len(l_tt_data)):
			with h5py.File(FLAGS.flag_log_dir_tt + '/core_%s_%d.h5' % (dic_name, i + 1), 'w') as file:
				file.create_dataset('core', data = l_tt_data[i])
		print('Approx %s is done.' % var_name)

		tfob_sess.close()
	tf.reset_default_graph()


# fine tune the TT model
def fine_tune(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)


def main(_):
	str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir)
	if str_last_ckpt is not None:
		if os.path.exists(FLAGS.flag_log_dir_tt) == False:
			os.mkdir(FLAGS.flag_log_dir_tt)
		while True:
			sys.stdout.write('Checkpoint "%s" found. Continue last training session?\n' % str_last_ckpt)
			sys.stdout.write('Approx - [a/A]. Fine tune - [f/F]. Cancel - [c/C].\n')
			ans = input().lower()
			if len(ans) == 0:
				continue
			if ans[0] == 'a':
				run_svd(str_last_ckpt)
				break
			elif ans[0] == 'f':
				fine_tune(str_last_ckpt)
				break
			elif ans[0] == 'c':
				return
	else:
		print('There is no corresponding trained network.')

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
