import os
import shutil
import imp
import time
import h5py
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np


# global params
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('flag_net_module', './ModuleFC_MNIST.py', 'Module selection with specific dataset.')
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')
flags.DEFINE_integer('flag_batch_size', 1, 'Batch size which must be divided extractly by the size of dataset.')
flags.DEFINE_integer('flag_model', 1, 'Selected mode. 0: baseline; 1: CP; 2: TT.')
flags.DEFINE_integer('flag_rankA', 2, 'Rank of A.')
flags.DEFINE_integer('flag_rankB', 2, 'Rank of B.')
flags.DEFINE_integer('flag_KT_rank', 6, 'Value of KT rank.')


def run_testing(b_gpu_enabled = False, str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)

	with tf.Graph().as_default(), tf.device('/cpu:0'):
		print('Begin to get dataset.')
		dict_dataset = network.get_dataset()
		print('Get dataset has done.')
				
		# iteration step, initialize as 0
		tfv_global_step = tf.get_variable('var_global_step', [], tf.int32, tf.constant_initializer(0, tf.int32), trainable = False)

		# flag to indicate validating (False)
		tfv_train_phase = tf.Variable(False, trainable = False, name = 'var_train_phase', dtype = tf.bool, collections = [])

		# EMA for all trainable variables
		tfob_variable_averages = tf.train.ExponentialMovingAverage(0.9, name = 'avg_variable')

		# getting data
		dict_inputs_batches = network.construct_batch_part(FLAGS.flag_batch_size)
		dict_phs = dict_inputs_batches['input_placeholders']
		t_labels = dict_inputs_batches['batches']['batch_train_labels']
		v_labels = dict_inputs_batches['batches']['batch_validation_labels']
		t_data = dict_inputs_batches['batches']['batch_train_data']
		v_data = dict_inputs_batches['batches']['batch_validation_data']
		
		# network inference
		tower_losses_v = []
		tower_evals_v = []
		with tf.device('/gpu:0'):
			loss_t, eval_t, loss_v, eval_v = network.get_network_output(FLAGS.flag_model, FLAGS.flag_KT_rank, FLAGS.flag_rankA, FLAGS.flag_rankB, t_data, t_labels, v_data, v_labels, tfv_train_phase)

			tower_losses_v.append(loss_v)
			tower_evals_v.append(eval_v)

		# model saver
		tfob_saver_ema = tf.train.Saver(tfob_variable_averages.variables_to_restore())

		# Session
		if b_gpu_enabled == True:
			tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, gpu_options = tf.GPUOptions(allow_growth = True, per_process_gpu_memory_fraction = 0.99)))
		else:
			tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))

		tfob_sess.run(tf.global_variables_initializer())

		if str_restore_ckpt is not None:
			tfob_saver_ema.restore(tfob_sess, str_restore_ckpt)
			print('Previously started training session restored from "%s".\n' % str_restore_ckpt)
		print('Starting.')

		tfob_sess.run(tfv_train_phase.assign(False))
		
		n_val_count = dict_dataset['validation']['validation_labels'].shape[0]
		n_val_steps = (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size			
		n_index = 0

		n_val_corrects = 0
		n_val_losses = 0.0

		# current epoch
		while n_val_count > 0:
			dict_input_feed = network.get_batch_part_validation(dict_dataset, dict_phs, n_index, FLAGS.flag_batch_size)

			eval_validation_and_loss_validation = tfob_sess.run(tower_evals_v + tower_losses_v, dict_input_feed)
			eval_validation = np.concatenate(eval_validation_and_loss_validation[:1], axis = 0)
			loss_validation = eval_validation_and_loss_validation[-1:]
			n_cnt = min(eval_validation.shape[0], n_val_count)
			n_val_count -= n_cnt
			n_cur_step = n_val_steps - (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size
			n_index += (FLAGS.flag_batch_size)

			n_val_corrects += np.sum(eval_validation[:n_cnt])
			n_val_losses += np.sum(loss_validation) * FLAGS.flag_batch_size

			print('Step %d/%d. Batch loss = %.2f. Batch precision = %.2f.' % 
					(n_cur_step, n_val_steps, np.mean(loss_validation), np.mean(eval_validation) * 100.0))

		# Evaluate end!
		validation_precision_value = n_val_corrects / dict_dataset['validation']['validation_labels'].shape[0]
		validation_loss_value = n_val_losses / dict_dataset['validation']['validation_labels'].shape[0]
		print('Validation loss = %.2f. Validation precision = %.2f.\n' % 
				   (validation_loss_value, validation_precision_value * 100.0))


def main(_):
	b_gpu_enabled = False
	l_devices = device_lib.list_local_devices()
	for i in range(len(l_devices)):
		if l_devices[i].device_type == 'GPU':
			if l_devices[i].memory_limit > 2 * 1024 * 1024 * 1024 :
				b_gpu_enabled = True
				break

	str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir)
	if str_last_ckpt is not None:
		run_testing(b_gpu_enabled, str_last_ckpt)

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
