import sys
import imp
import time

import tensorflow as tf
import numpy as np

# 模型/程序的全局参数
flags = tf.app.flags
FLAGS = flags.FLAGS

# 模型选择
flags.DEFINE_string('flag_net_module', './ModuleMnist.py', 'Module selection with specific dataset.')

# 全局参数，tf的log文件夹位置
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')


def run_testing(str_ori_ckpt = None, str_tt_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)
	l_vars = []
	dict_dataset, dict_mean_std = network.get_dataset()

	# TT训练模型，参数读入l_vars
	with tf.Graph().as_default(), tf.device('/cpu:0'):	
		dict_phs, loss_t, eval_t, loss_v, eval_v = network.get_network_output(dict_mean_std, 1, True)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_saver.restore(tfob_sess, str_tt_ckpt)

		# 读取变量数据
		l_vars = tfob_sess.run(network.get_tt_variables(4))

		tfob_sess.close()
	tf.reset_default_graph()

	# TT测试模型，l_vars参数写入
	with tf.Graph().as_default(), tf.device('/cpu:0'):	
		dict_phs, loss_v, eval_v = network.get_network_acc(dict_mean_std)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_sess.run(tf.global_variables_initializer())

		# 更新变量
		for i in range(len(l_vars)):
			tfob_sess.run(tf.assign(tf.global_variables()[i], l_vars[i]))

		# 验证集容量，并根据容量计算步数
		n_val_count = dict_dataset['validation']['validation_labels'].shape[0]

		# 每次在数据集中取数据的头索引
		n_index = 0

		# 正例数量和误差
		n_val_corrects = 0
		n_val_losses = 0.0

		# 线程协调器
		tfob_coord = tf.train.Coordinator()
		th_threads = tf.train.start_queue_runners(tfob_sess, tfob_coord)

		# 测试过程
		start_time = time.time()
		while n_val_count > 0:
			# 当前step喂一个数据
			dict_input_feed = network.get_batch_part_validation(dict_dataset, dict_mean_std, dict_phs, n_index, 1)

			# 执行验证
			eval_test, loss_test = tfob_sess.run([eval_v, loss_v], dict_input_feed)
			n_cnt = min(eval_test.shape[0], n_val_count)
			n_val_count -= n_cnt
			n_index += 1

			# 正例数累加
			n_val_corrects += np.sum(eval_test[:n_cnt])

			# 平均误差累加
			n_val_losses += loss_test

		# 测试结束，总结果估计
		end_time = time.time()
		test_precision_value = n_val_corrects / dict_dataset['validation']['validation_labels'].shape[0]
		test_loss_value = n_val_losses / dict_dataset['validation']['validation_labels'].shape[0]
		sys.stdout.write('Summary. Test loss = %.2f. Test precision = %.2f. Time expanding = %.d ms.\n' % 
				   (test_loss_value, test_precision_value * 100.0, int(round((end_time - start_time) * 1000))))


def main(_):
	str_ori_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir + '/ori')
	str_tt_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir + '/tt')
	if str_ori_ckpt is not None and str_tt_ckpt is not None:
		run_testing(str_ori_ckpt, str_tt_ckpt)
	else:
		print('There is no corresponding trained network.')

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
