import os
import imp
import sys
import h5py
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import Manifold as mf


# global params
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('flag_net_module', './CIFARModuleVGG14.py', 'Module selection with specific dataset.')
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')
flags.DEFINE_string('flag_log_dir_tt', './log/manifold', 'Directory to put log files of manifold approximation.')
flags.DEFINE_integer('flag_layer', '12', 'The layer to be approximated, begin at 1 since the first layer (0) is not compressed.')
flags.DEFINE_integer('flag_batch_size', 100, 'Batch size for testing.')
flags.DEFINE_integer('flag_max_epochs', 50, 'Maximum number of epochs to fine tune.')
flags.DEFINE_float('flag_learning_rate', 0.0003, 'Learning rate to define the momentum optimizer for fune tuning.')


# calculate average gradients from multi-GPUs
def average_gradients(tower_grads):
	average_grads = []
	for grad_and_vars in zip(*tower_grads):
		grads = []
		for g, _ in grad_and_vars:
			expanded_g = tf.expand_dims(g, 0)
			grads.append(expanded_g)
		grad = tf.concat(grads, 0)
		grad = tf.reduce_mean(grad, 0)
		grad_and_var = (grad, grad_and_vars[0][1])
		average_grads.append(grad_and_var)
	return average_grads


# read convolutional kernels from the trained model, then approx them and store in TT array
def run_manifold(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)

	# read the trained model
	with tf.Graph().as_default(), tf.device('/cpu:0'):
		dict_dataset, dict_mean_std = network.get_dataset()
		dict_inputs_batches = network.construct_batch_part(dict_mean_std, FLAGS.flag_batch_size)
		dict_phs = dict_inputs_batches['input_placeholders']
		t_labels = dict_inputs_batches['batches']['batch_train_labels']
		v_labels = dict_inputs_batches['batches']['batch_validation_labels']
		t_data = dict_inputs_batches['batches']['batch_train_data']
		v_data = dict_inputs_batches['batches']['batch_validation_data']

		tfv_train_phase = tf.Variable(False, trainable = False, name = 'var_train_phase', dtype = tf.bool, collections = [])
		loss_t, eval_t, loss_v, eval_v = network.get_network_output(0, t_data, t_labels, v_data, v_labels, tfv_train_phase)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_saver.restore(tfob_sess, str_restore_ckpt)
		
		# read conv kernels
		lst_conv_name = []
		for var in tf.trainable_variables():
			if 'var_filter' in var.name:
				lst_conv_name.append(var.name)
		
		# manifold approx except the 1st conv
		var_name = lst_conv_name[FLAGS.flag_layer]
		print('Begin to approx %s.' % var_name)
		dot = var_name.find('.')
		dic_name = var_name[dot - 1 : dot + 2]
		input_modes = network.dict_shaperank[dic_name][0]
		output_modes = network.dict_shaperank[dic_name][1]
		tt_ranks = network.dict_shaperank[dic_name][2]
		conv_data = tfob_sess.run(var_name)
		tfph_conv_data = tf.placeholder(dtype = tf.float32, shape = [3, 3, np.prod(input_modes), np.prod(output_modes)], name = 'ph_conv_data')
		l_tts, l_ops = mf.riemannian_sgd(tfph_conv_data, [[3] + input_modes, [3] + output_modes], tt_ranks, 0.1, dic_name)
		l_new_vars = [v for v in tf.global_variables() if 'riemannian' + '_' + dic_name in v.name]
		for var in l_new_vars:
			tfob_sess.run(var.initializer)
		dict_feeder = {tfph_conv_data : conv_data}
		l_tt_data = mf.run_manifold_ops(tfob_sess, l_tts, l_ops, dict_feeder)
		for i in range(len(l_tt_data)):
			with h5py.File(FLAGS.flag_log_dir_tt + '/core_%s_%d.h5' % (dic_name, i + 1), 'w') as file:
				file.create_dataset('core', data = l_tt_data[i])
		print('Approx %s is done.' % var_name)

		tfob_sess.close()
	tf.reset_default_graph()


# fine tune the TT model
def fine_tune(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)

	# numpy arrays for copying parameters
	arr_first_conv = None
	arr_last_fc = None
	lst_biases = []
	lst_bn_params = []
	lst_tt_cores = []

	# read the trained model
	with tf.Graph().as_default(), tf.device('/cpu:0'):
		dict_dataset, dict_mean_std = network.get_dataset()
		dict_inputs_batches = network.construct_batch_part(dict_mean_std, FLAGS.flag_batch_size)
		dict_phs = dict_inputs_batches['input_placeholders']
		t_labels = dict_inputs_batches['batches']['batch_train_labels']
		v_labels = dict_inputs_batches['batches']['batch_validation_labels']
		t_data = dict_inputs_batches['batches']['batch_train_data']
		v_data = dict_inputs_batches['batches']['batch_validation_data']

		tfv_train_phase = tf.Variable(False, trainable = False, name = 'var_train_phase', dtype = tf.bool, collections = [])
		loss_t, eval_t, loss_v, eval_v = network.get_network_output(0, t_data, t_labels, v_data, v_labels, tfv_train_phase)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_saver.restore(tfob_sess, str_restore_ckpt)

		### ---------- validate the acc of the original model ---------- ###
		# size of validation set
		n_val_count = dict_dataset['validation']['validation_labels'].shape[0]
		n_val_steps = (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size

		# index of picking batch_size data
		n_index = 0

		# correct samples and losses
		n_val_corrects = 0
		n_val_losses = 0.0

		tfob_sess.run(tfv_train_phase.assign(False))
		while n_val_count > 0:
			# feed batch_size data
			dict_input_feed = network.get_batch_part_validation(dict_dataset, dict_mean_std, dict_phs, n_index, FLAGS.flag_batch_size)

			# execute validation
			eval_validation, loss_validation = tfob_sess.run([eval_v, loss_v], dict_input_feed)
			n_cnt = min(eval_validation.shape[0], n_val_count)
			n_val_count -= n_cnt
			n_cur_step = n_val_steps - (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size
			n_index += FLAGS.flag_batch_size

			# accumulate correct samples
			n_val_corrects += np.sum(eval_validation[:n_cnt])

			# accumulate average losses
			n_val_losses += loss_validation * FLAGS.flag_batch_size

			sys.stdout.write('Step %d/%d. Batch loss = %.2f. Batch precision = %.2f.' % 
					 (n_cur_step, n_val_steps, loss_validation, np.mean(eval_validation) * 100.0))
			sys.stdout.write('\n')
			sys.stdout.flush()

		# Evaluate end!
		validation_precision_value = n_val_corrects / dict_dataset['validation']['validation_labels'].shape[0]
		validation_loss_value = n_val_losses / dict_dataset['validation']['validation_labels'].shape[0]
		sys.stdout.write('Validation loss = %.2f. Validation precision = %.2f.\n' % 
				(validation_loss_value, validation_precision_value * 100.0))
		### ---------- validate the acc of the original model ---------- ###

		# if the TT model does not exist
		str_tt_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir_tt)
		if str_tt_ckpt is None:
			# read conv params
			for var in tf.global_variables():
				if 'var_filter' in var.name and '1.1' in var.name:
					arr_first_conv = tfob_sess.run(var.name)
				elif 'var_biases' in var.name:
					lst_biases.append(tfob_sess.run(var.name))
				elif 'batch_norm' in var.name:
					lst_bn_params.append(tfob_sess.run(var.name))
				elif 'var_weights' in var.name:
					arr_last_fc = tfob_sess.run(var.name)

			# read tt cores
			h5_list = [v for v in os.listdir(FLAGS.flag_log_dir_tt) if 'h5' in v]
			for h5 in h5_list:
				with h5py.File(FLAGS.flag_log_dir_tt + '/' +h5, 'r') as file:
					lst_tt_cores.append(file.get('core').value)

		tfob_sess.close()
	tf.reset_default_graph()

	b_gpu_enabled = False
	l_devices = device_lib.list_local_devices()
	for i in range(len(l_devices)):
		if l_devices[i].device_type == 'GPU':
			if l_devices[i].memory_limit > 2 * 1024 * 1024 * 1024 :
				b_gpu_enabled = True
				break

	# load the TT model
	with tf.Graph().as_default(), tf.device('/cpu:0'):
		dict_dataset, dict_mean_std = network.get_dataset()

		# GPU amount
		n_num_gpus = 1
		if b_gpu_enabled == True:
			for i in range(len(l_devices)):
				if l_devices[i].device_type == 'GPU':
					n_num_gpus += 1
			n_num_gpus -= 1

		tfv_global_step = tf.get_variable('var_global_step', [], tf.int32, tf.constant_initializer(0, tf.int32), trainable = False)
		tfv_train_phase = tf.Variable(True, trainable = False, name = 'var_train_phase', dtype = tf.bool, collections = [])
		tfob_variable_averages = tf.train.ExponentialMovingAverage(0.9, name = 'avg_variable')
		optim = tf.train.MomentumOptimizer(FLAGS.flag_learning_rate, 0.9)
		dict_inputs_batches = network.construct_batch_part(dict_mean_std, FLAGS.flag_batch_size * n_num_gpus)
		dict_phs = dict_inputs_batches['input_placeholders']
		t_labels = dict_inputs_batches['batches']['batch_train_labels']
		v_labels = dict_inputs_batches['batches']['batch_validation_labels']
		t_data = dict_inputs_batches['batches']['batch_train_data']
		v_data = dict_inputs_batches['batches']['batch_validation_data']
		t_data_split = tf.split(t_data, n_num_gpus)
		t_labels_split = tf.split(t_labels, n_num_gpus)
		v_data_split = tf.split(v_data, n_num_gpus)
		v_labels_split = tf.split(v_labels, n_num_gpus)

		tower_losses_t = []
		tower_evals_t = []
		tower_losses_v = []
		tower_evals_v = []
		tower_grads = []		
		for i in range(n_num_gpus):
			with tf.device('/gpu:%d' % i):
				loss_t, eval_t, loss_v, eval_v = network.get_tt_network_output(i, t_data_split[i], t_labels_split[i], v_data_split[i], v_labels_split[i], tfv_train_phase)
				tower_losses_t.append(loss_t)
				tower_evals_t.append(eval_t)
				tower_losses_v.append(loss_v)
				tower_evals_v.append(eval_v)
				grads = optim.compute_gradients(loss_t)
				grads = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads if grad is not None]
				tower_grads.append(grads)

		# record true parameters before designing training graph
		lst_true_global_vars = tf.global_variables()

		grads = average_gradients(tower_grads)
		tfop_apply_gradients = optim.apply_gradients(grads, tfv_global_step)
		with tf.control_dependencies([tfop_apply_gradients]):
			tfop_normalize_gs = tfv_global_step.assign_add(n_num_gpus - 1)
		tfop_variable_averages_apply = tfob_variable_averages.apply(tf.trainable_variables())
		tfv_train_loss = tf.Variable(5.0, trainable = False, name = 'var_train_loss', dtype = tf.float32)
		tfv_train_precision = tf.Variable(0.0, trainable = False, name = 'var_train_precision', dtype = tf.float32)
		l_ops_train_lp_update = []
		for i in range(n_num_gpus):
			l_ops_train_lp_update.append(tfv_train_loss.assign_sub(0.1 * (tfv_train_loss - tower_losses_t[i])))
			new_precision = tf.reduce_mean(tf.cast(tower_evals_t[i], tf.float32))
			l_ops_train_lp_update.append(tfv_train_precision.assign_sub(0.1 * (tfv_train_precision - new_precision)))
		tfop_train_lp_update = tf.group(*l_ops_train_lp_update)
		tfop_train = tf.group(tfop_apply_gradients, tfop_normalize_gs, tfop_variable_averages_apply, tfop_train_lp_update)

		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_saver_ema = tf.train.Saver(tfob_variable_averages.variables_to_restore())
		if b_gpu_enabled == True:
			tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, gpu_options = tf.GPUOptions(allow_growth = True, per_process_gpu_memory_fraction = 0.95)))
		else:
			tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_sess.run(tf.global_variables_initializer())

		# restore if the TT model already exists, or assign the tt value
		n_epoch_steps = int(dict_dataset['train']['train_labels'].shape[0] / FLAGS.flag_batch_size + 0.5)
		n_start_epoch = 0
		str_tt_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir_tt)
		if str_tt_ckpt is not None:
			tfob_saver.restore(tfob_sess, str_tt_ckpt)
			sys.stdout.write('Previously started training session restored from "%s".\n' % str_tt_ckpt)
			n_start_epoch = int(tfob_sess.run(tfv_global_step)) // n_epoch_steps
		else:
			# cover the initialized params
			i_tt = 0
			i_biase = 0
			i_bn = 0
			for var in lst_true_global_vars:
				if 'var_filter' in var.name and '1.1' in var.name:
					tfob_sess.run(tf.assign(var, arr_first_conv))
				elif 'var_conv_core' in var.name:
					shape = [lst_tt_cores[i_tt].shape[0] * lst_tt_cores[i_tt].shape[1], lst_tt_cores[i_tt].shape[2] * lst_tt_cores[i_tt].shape[3]]
					tfob_sess.run(tf.assign(var, np.reshape(lst_tt_cores[i_tt], shape)))
					i_tt = i_tt + 1
				elif 'var_biases' in var.name:
					tfob_sess.run(tf.assign(var, lst_biases[i_biase]))
					i_biase = i_biase + 1
				elif 'batch_norm' in var.name:
					tfob_sess.run(tf.assign(var, lst_bn_params[i_bn]))
					i_bn = i_bn + 1
				elif 'var_weights' in var.name:
					tfob_sess.run(tf.assign(var, arr_last_fc))
				print('Parameter: %s is assigned.' % var.name)

			### ---------- test the current performance first ---------- ###
			# size of validation set
			n_val_count = dict_dataset['validation']['validation_labels'].shape[0]
			n_val_steps = (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size

			# index of picking batch_size data
			n_index = 0

			# correct samples and losses
			n_val_corrects = 0
			n_val_losses = 0.0

			tfob_sess.run(tfv_train_phase.assign(False))
			while n_val_count > 0:
				# run validation
				dict_input_feed = network.get_batch_part_validation(dict_dataset, dict_mean_std, dict_phs, n_index, FLAGS.flag_batch_size * n_num_gpus)
				eval_validation_and_loss_validation = tfob_sess.run(tower_evals_v + tower_losses_v, dict_input_feed)
				eval_validation = np.concatenate(eval_validation_and_loss_validation[:n_num_gpus], axis = 0)
				loss_validation = eval_validation_and_loss_validation[-n_num_gpus:]
				n_cnt = min(eval_validation.shape[0], n_val_count)
				n_val_count -= n_cnt
				n_cur_step = n_val_steps - (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size
				n_index += (FLAGS.flag_batch_size * n_num_gpus)

				# accumulate positive examples and losses
				n_val_corrects += np.sum(eval_validation[:n_cnt])
				n_val_losses += np.sum(loss_validation) * FLAGS.flag_batch_size
				print('Step %d/%d. Batch loss = %.2f. Batch precision = %.2f.' % (n_cur_step, n_val_steps, np.mean(loss_validation), np.mean(eval_validation) * 100.0))
				
			# Evaluate end! evaluate current result and restore checkpoint without EMA for the next training
			validation_precision_value = n_val_corrects / dict_dataset['validation']['validation_labels'].shape[0]
			validation_loss_value = n_val_losses / dict_dataset['validation']['validation_labels'].shape[0]
			print('Validation loss = %.2f. Validation precision = %.2f.' % (validation_loss_value, validation_precision_value * 100.0))
			### ---------- test the current performance first ---------- ###

			# save model to flag_log_dir_ft
			str_checkpoint_path = os.path.join(FLAGS.flag_log_dir_tt, 'model.ckpt')
			str_ckpt = tfob_saver.save(tfob_sess, str_checkpoint_path, tfv_global_step)

		sys.stdout.write('Starting with epoch #%d.\n' % (n_start_epoch + 1))

		# loss and val recorded
		l_rc_loss_pre = []
		if os.path.exists(FLAGS.flag_log_dir_tt + '/learning_curve.h5'):
			with h5py.File(FLAGS.flag_log_dir_tt + '/learning_curve.h5', 'r') as file:
				arr_rc_loss_pre = file.get('curve').value
			l_rc_loss_pre = arr_rc_loss_pre.tolist()



def main(_):
	str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir)
	if str_last_ckpt is not None:
		if os.path.exists(FLAGS.flag_log_dir_tt) == False:
			os.mkdir(FLAGS.flag_log_dir_tt)
		while True:
			sys.stdout.write('Checkpoint "%s" found. Continue last training session?\n' % str_last_ckpt)
			sys.stdout.write('Approx - [a/A]. Fine tune - [f/F]. Cancel - [c/C].\n')
			ans = input().lower()
			if len(ans) == 0:
				continue
			if ans[0] == 'a':
				run_manifold(str_last_ckpt)
				break
			elif ans[0] == 'f':
				fine_tune(str_last_ckpt)
				break
			elif ans[0] == 'c':
				return
	else:
		print('There is no corresponding trained network.')

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
