# 该脚本用于对已有模型进行正向运行测试
import sys
import imp
import time

import tensorflow as tf
import numpy as np


# 模型/程序的全局参数
flags = tf.app.flags
FLAGS = flags.FLAGS

# 模型选择
flags.DEFINE_string('flag_net_module', './ModuleMnist.py', 'Module selection with specific dataset.')

# 全局参数，tf的log文件夹位置
flags.DEFINE_string('flag_log_dir', './log', 'Directory to put log files.')

# 测试选择，标准模型还是TT模型
flags.DEFINE_boolean('flag_is_tt', True, 'True means to select the TT module while False means to select the standard module to test.')

# batch size大小
flags.DEFINE_integer('flag_batch_size', 100, 'Batch size which must be divided extractly by the size of dataset.')


def run_testing(str_restore_ckpt = None):
	network = imp.load_source('network', FLAGS.flag_net_module)

	with tf.Graph().as_default(), tf.device('/cpu:0'):
		print('Begin to get dataset.')
		dict_dataset, dict_mean_std = network.interface_get_dataset(FLAGS.flag_is_tt)
		print('Get dataset has done.')

		# 网络定义及读取
		if FLAGS.flag_is_tt is True:
			dict_phs, loss_v, eval_v = network.get_network_tt(dict_mean_std, FLAGS.flag_batch_size)
		else:
			dict_phs, loss_v, eval_v = network.get_network_normal(dict_mean_std, FLAGS.flag_batch_size)
		tfob_saver = tf.train.Saver(tf.global_variables())
		tfob_sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True, device_count = {'GPU': 0}))
		tfob_saver.restore(tfob_sess, str_restore_ckpt)

		# 验证集容量，并根据容量计算步数
		n_val_count = dict_dataset['validation']['validation_labels'].shape[0]
		n_val_steps = (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size

		# 每次在数据集中取batch_size数据的头索引
		n_index = 0

		# 正例数量和误差
		n_val_corrects = 0
		n_val_losses = 0.0

		# 线程协调器
		tfob_coord = tf.train.Coordinator()
		th_threads = tf.train.start_queue_runners(tfob_sess, tfob_coord)

		# 测试过程
		start_time = time.time()
		while n_val_count > 0:
			# 当前step喂一个batch_size大小的数据
			dict_input_feed = network.interface_get_batch_part_validation(dict_dataset, dict_mean_std, dict_phs, n_index, FLAGS.flag_batch_size, FLAGS.flag_is_tt)

			# 执行验证
			ep_start_time = time.time()
			eval_test, loss_test = tfob_sess.run([eval_v, loss_v], dict_input_feed)
			ep_end_time = time.time()
			n_cnt = min(eval_test.shape[0], n_val_count)
			n_val_count -= n_cnt
			n_cur_step = n_val_steps - (n_val_count + FLAGS.flag_batch_size - 1) // FLAGS.flag_batch_size
			n_index += FLAGS.flag_batch_size

			# 正例数累加
			n_val_corrects += np.sum(eval_test[:n_cnt])

			# 平均误差累加
			n_val_losses += loss_test * FLAGS.flag_batch_size

			sys.stdout.write('Step %d/%d. Batch loss = %.2f. Batch precision = %.2f. Batch time expanding = %.2f ms.' % 
					(n_cur_step, n_val_steps, loss_test, np.mean(eval_test) * 100.0, int(round((ep_end_time - ep_start_time) * 1000))))
			sys.stdout.write('\n')
			sys.stdout.flush()

		# 测试结束，总结果估计
		end_time = time.time()
		test_precision_value = n_val_corrects / dict_dataset['validation']['validation_labels'].shape[0]
		test_loss_value = n_val_losses / dict_dataset['validation']['validation_labels'].shape[0]
		sys.stdout.write('Summary. Test loss = %.2f. Test precision = %.2f. Time expanding = %.2f ms.\n' % 
				   (test_loss_value, test_precision_value * 100.0, int(round((end_time - start_time) * 1000))))

		tfob_coord.request_stop()
		tfob_coord.join(th_threads)


def main(_):
	if FLAGS.flag_is_tt is False:
		str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir)
	else:
		str_last_ckpt = tf.train.latest_checkpoint(FLAGS.flag_log_dir + '/tt')

	if str_last_ckpt is not None:
		run_testing(str_last_ckpt)
	else:
		print('There is no corresponding trained network.')

	print('Program is finished.')


if __name__ == '__main__':
    tf.app.run()
