# -- coding: utf-8 --
'''
测试程序，针对整个测试集
'''
from __future__ import absolute_import
from __future__ import division
# from __future__ import print_function

import math
import tensorflow as tf
import time
from datetime import datetime
from sklearn.metrics import roc_curve

from tensorflow.python.platform import gfile
from prostate_input import inputPipeLine
import prostate_network
from tensorflow.python.platform import tf_logging as logging
import pdb
import time
import numpy as np
import tensorflow.contrib.metrics as metrics
import os
from compiler.ast import flatten
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import matplotlib
import pylab
slim = tf.contrib.slim

tf.app.flags.DEFINE_integer(
	'batch_size', 100, 'The number of samples in each batch.')

tf.app.flags.DEFINE_integer(
	'max_num_batches', None,
	'Max number of batches to evaluate by default use all.')

tf.app.flags.DEFINE_string(
	'checkpoint_dir', None,
	'The directory where the model was written to or an absolute path to a '
	'checkpoint file.')

tf.app.flags.DEFINE_string(
	'eval_log_dir', None, 'Directory where the results are saved to.')

tf.app.flags.DEFINE_bool(
	'write_eval_log', None, 'Directory where the results are saved to.')

tf.app.flags.DEFINE_bool(
	'count_num_examples', None, 'Task id of the replica running the training.')

# tf.app.flags.DEFINE_integer(
# 	'num_preprocessing_threads', 4,
# 	'The number of threads used to create the batches.')

tf.app.flags.DEFINE_string(
	'dataset_dir', None, 'The directory where the dataset files are stored.')

tf.app.flags.DEFINE_string(
	'train_log_path', None, 'train_log_path.')

tf.app.flags.DEFINE_string(
	'model_name', None, 'The name of the architecture to evaluate.')

tf.app.flags.DEFINE_float(
	'moving_average_decay', None,
	'The decay to use for the moving average.'
	'If left as None, then moving averages are not used.')

tf.app.flags.DEFINE_string('is_train_data', 'validation','''''')
tf.app.flags.DEFINE_integer('num_examples', 10000,'''''')
FLAGS = tf.app.flags.FLAGS

def model_select(model, image_batch, is_training=False, reuse = None):
	if model== 'vgg16':
		return prostate_network.vgg16_inference(image_batch,is_training=is_training)
	if model== 'resnet_v2_50':
		return prostate_network.resnet50_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_101':
		return prostate_network.resnet101_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_152':
		return prostate_network.resnet152_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'InceptionV3':
		return prostate_network.inception_v3_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'InceptionResnetV2':
		return prostate_network.inception_resnet_v2_inference(image_batch, is_training=is_training, reuse = reuse)

def main(_):
	if FLAGS.count_num_examples:
		total_pos, total_neg, total_examples = get_num_examples(FLAGS.dataset_dir)
		FLAGS.num_examples = total_examples
		print('--------training patches info by counting---------')
		print('total postive patches:%d\n'%total_pos)
		print('total negtive patches:%d\n'%total_neg)
		print('total patches:%d\n'%total_examples)
	else:
		pass

	if not FLAGS.dataset_dir:
		raise ValueError('You must supply the dataset directory with --dataset_dir')

	tf.logging.set_verbosity(tf.logging.INFO)
	with tf.Graph().as_default():
		global_step = tf.train.create_global_step()
		images, labels, _,_,_, rawimages = inputPipeLine(FLAGS.is_train_data, batchSize = FLAGS.batch_size, fast_mode = True, Data_Dir = FLAGS.dataset_dir, numEpochs = 1, net_use=FLAGS.model_name)
		logits, end_points = model_select(FLAGS.model_name, images)
		# pdb.set_trace()

		if FLAGS.moving_average_decay:
			variable_averages = tf.train.ExponentialMovingAverage(
				FLAGS.moving_average_decay, global_step)
			variables_to_restore = variable_averages.variables_to_restore(
				slim.get_model_variables())
			variables_to_restore[global_step.op.name] = global_step
		else:
			variables_to_restore = slim.get_variables_to_restore()

		predictions = tf.argmax(logits, 1)#输出0,1
		soft_predictions =tf.nn.softmax(logits,dim=-1)#输出softmax概率
		# pdb.set_trace()
		labels = tf.squeeze(labels)

		names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
			'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
			'Auc': slim.metrics.streaming_auc(soft_predictions[:,1], labels),
			'TP': slim.metrics.streaming_true_positives(predictions, labels),
			'FP': slim.metrics.streaming_false_positives(predictions, labels),
			'TN': slim.metrics.streaming_true_negatives(predictions, labels),
			'FN': slim.metrics.streaming_false_negatives(predictions, labels),
			'Precision':slim.metrics.streaming_precision(predictions, labels),
			'Recall':slim.metrics.streaming_recall(predictions, labels),
		})

		# Print the summaries to screen.
		for name, value in names_to_values.items():
			summary_name = 'eval/%s' % name
			op = tf.summary.scalar(summary_name, value, collections=[])
			op = tf.Print(op, [value], summary_name)
			tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

		if FLAGS.max_num_batches:
			num_batches = FLAGS.max_num_batches
		else:
			# This ensures that we make a single pass over all of the data.
			num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))

		if tf.gfile.IsDirectory(FLAGS.checkpoint_dir):
			checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
		else:
			checkpoint_path = FLAGS.checkpoint_dir

		tf.logging.info('Checkpoint Path: %s' % checkpoint_path)
		tf.logging.info('DataSet Path: %s' % FLAGS.dataset_dir)

		gpu_options = tf.GPUOptions(allow_growth=True)
		session_config=tf.ConfigProto(
			allow_soft_placement=True,
			log_device_placement=False,
			gpu_options=gpu_options)

		init_op = tf.group(tf.global_variables_initializer(),
							tf.local_variables_initializer())

		# metrics_values = slim.evaluation.evaluate_once(
		# 									master=FLAGS.master,
		# 									checkpoint_path=checkpoint_path,
		# 									logdir=FLAGS.eval_log_dir,
		# 									num_evals=num_batches,
		# 									initial_op=init_op,
		# 									eval_op=list(names_to_updates.values()),
		# 									final_op=list(names_to_updates.values()),
		# 									variables_to_restore=variables_to_restore,
		# 									session_config=session_config)
		
		start_time = time.time()
		tf.logging.info('Checkpoint Path: %s' % checkpoint_path)
		tf.logging.info('DataSet Path: %s' % FLAGS.dataset_dir)

		#测试，以batch为单位，循环测试num_batches次
		metrics_values,y_pred_list,y_label_list = test(checkpoint_path=FLAGS.checkpoint_dir,
								num_evals=num_batches,
								initial_op=init_op,
								eval_op=list(names_to_updates.values()),
								eval_values=list(names_to_values.values()),
								variables_to_restore=variables_to_restore,
								session_config=session_config,
								soft_predictions=soft_predictions,
								labels=labels,
								end_points=end_points,
								images=images,
								rawimages=rawimages)

		timecost(time.time()-start_time)
		result_print(metrics_values)

		# 写测试日志
		eval_log(FLAGS.eval_log_dir,
				FLAGS.train_log_path,
				checkpoint_path,
				FLAGS.num_examples,
				metrics_values,
				y_pred_list,
				y_label_list)
		print('eval_log has been write')	

def result_print(metrics_values):
	'''
	打印测试结果

	Args：
	metrics_values：性能指标参数
	'''
	Accuracy_map = {}#记录性能指标的字典
	Accuracy_map['Accuracy'] = metrics_values[7]
	Accuracy_map['AUC'] = metrics_values[1]
	Accuracy_map['Precision'] = metrics_values[3]
	Accuracy_map['Recall'] = metrics_values[2]
	Accuracy_map['TP'] = metrics_values[4]
	Accuracy_map['FN'] = metrics_values[6]
	Accuracy_map['TN'] = metrics_values[5]
	Accuracy_map['FP'] = metrics_values[0]
	print('\n\n--------------Total Test Result------------')
	format_strr = ('num_examples: %d\n \
		Accuracy: %f\n \
		AUC: %f\n \
		Precision: %f\n \
		Recall: %f\n \
		TP: %d\n \
		FN: %d\n \
		TN: %d\n \
		FP: %d\n ')

	print(format_strr % (FLAGS.num_examples,
						Accuracy_map['Accuracy'],
						Accuracy_map['AUC'],
						Accuracy_map['Precision'],
						Accuracy_map['Recall'],
						Accuracy_map['TP'],
						Accuracy_map['FN'],
						Accuracy_map['TN'],
						Accuracy_map['FP']))

def test(checkpoint_path,
				num_evals,
				initial_op,
				eval_op,
				eval_values,
				variables_to_restore,
				session_config,
				soft_predictions,
				labels,
				end_points,
				images,
				rawimages):
	'''
	测试程序
	原测试程序为主函数中被注释的slim.evaluation.evaluate_once，但这个歌函数封装的太死，
	因此此函数是参考了原函数的处理过程后的一个解耦的重写，用作测试的核心函数，可以做出较多的个性化数据查看和输出

	Args：
	num_evals：本函数的循环次数，一次跑一个batch，总数由主函数中math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))所确定
	initial_op：初始化op
	eval_op：需要更新的性能参数op，参考主函数names_to_updates.values()
	eval_values：性能参数，参考主函数names_to_values.values()
	variables_to_restore：需要恢复的网络参数
	soft_predictions：经过softmax处理的logits
	end_points、images、rawimages：用于提取特征图，可不关注
	'''
	gpu_options = tf.GPUOptions(allow_growth=True)
	sess = tf.Session(config=session_config)
	sess.run(initial_op)
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)
	tf.logging.info('Evaluating model from %s' %checkpoint_path)

	ckpt = tf.train.get_checkpoint_state(checkpoint_path)
	saver = tf.train.Saver(variables_to_restore)
	try:
		saver.restore(sess, ckpt.model_checkpoint_path)
		# global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
		# print 'Succesfully loaded model from %s at step=%s.' %(ckpt.model_checkpoint_path, global_step)
	except:
		raise ValueError('No checkpoint found')

	print '%s: start evaluation on TestSet.' % (datetime.now())
	evals_completed = 1

	y_pred_list=[]
	y_label_list=[]
	try:
		while evals_completed <= num_evals and not coord.should_stop():   
			_, y_pred, y_label = sess.run([eval_op, soft_predictions, labels])

			#以下注释用于提取特征层图像
			# plt.figure(1)
			# plt.imshow(sess.run(rawimages[0,:,:,:]))
			# plt.savefig('/home/ramsley/conv_figs/origin_img.jpg')
			# cmap=matplotlib.cm.jet
			# pylab.show()
			# # plt.figure(2)
			# for i in range(64):
			# 	# plt.subplot(8,8,i+1)
			# 	plt.imshow(sess.run([end_points['resnet_v2_50/block1/unit_3/bottleneck_v2/conv1'][0,:,:,i]][0]), cmap=cmap)
			# 	plt.savefig('/home/ramsley/conv_figs/resnet_v2_50_block1_unit_3_conv1_%s.jpg'%(i+1))
			# 	# pdb.set_trace()
			# # pylab.show()
			# pdb.set_trace()
			
			y_pred_list.append(list(y_pred[:,1]))
			y_label_list.append(list(y_label))
			y_pred_list = flatten(y_pred_list)
			y_label_list = flatten(y_label_list)
			logging.info('Evaluation [%d/%d]', evals_completed, num_evals)       
			if evals_completed >= num_evals:
				coord.request_stop()
			evals_completed += 1
	except tf.errors.OutOfRangeError:
		print ('Done Eval -- epochlimit reached')
		coord.request_stop()
	finally:
		# coord.request_stop()
		FP,Auc,Recall,Precision,TP,TN,FN,Accuracy = sess.run(eval_values)
		Sensitivity = TP/(TP+FN)
		Specifity = TN/(TN+FP)
		format_str_1 = ('Accuracy: %3f, Auc: %.3f, Precision: %.3f, Recall: %.3f, Sensitivity: %.3f, Specifity: %.3f')
		format_str_2 = ('TP: %d, FN: %d, TN: %d, FP: %d')
		print('--------------Test Result------------')
		print(format_str_1 % (Accuracy,Auc,Precision,Recall,Sensitivity,Specifity))
		print(format_str_2 % (TP,FN,TN,FP))
		metrics_values = [0]*8
		metrics_values[7] = Accuracy
		metrics_values[1] = Auc
		metrics_values[3] = Precision
		metrics_values[2] = Recall
		metrics_values[4] = TP
		metrics_values[6] = FN
		metrics_values[5] = TN
		metrics_values[0] = FP
	coord.join(threads, stop_grace_period_secs=10)
	sess.close()
	return metrics_values,y_pred_list,y_label_list


def eval_log(eval_dir, train_log_path, checkpoint_path, num_examples, metrics_values, y_pred_list, y_label_list):
	'''
	写测试结果日志

	Args：
	eval_dir：eval_log_dir，保存test log的目录
	train_log_path：train log的位置，因为需要读取网络的训练状态做参考
	num_examples：参与测试的总样本量
	metrics_values：记录了性能指标的字典
	y_pred_list、y_label_list：预测结果\真实值列表，用于后期画ROC曲线

	'''
	if not os.path.exists(eval_dir):
		os.mkdir(eval_dir)
	log_txt_path = eval_dir + '/eval_log.txt'
	roc_txt_path = eval_dir + '/roc.txt'
	eval_log_file = open(log_txt_path,'w')
	roc_log_file = open(roc_txt_path,'w')
	eval_log_file.write('\n--------------Traning Statistics------------\n')
	eval_log_file.write('%s'%time.strftime('%Y-%m-%d %H:%m:%S',time.localtime())+'\n')
	if os.path.exists(train_log_path):
		train_log_file = open(train_log_path,'r')
		from_trainlog = train_log_file.readlines()[-14:]
		for line in from_trainlog:
			eval_log_file.write(line)
	else:
		print('train_log doesn\'t exits!')

	eval_log_file.write('--------------Test Result------------\n')
	format_str2 = ('num_examples: %d\n \
	Accuracy: %f\n \
	AUC: %f\n \
	Precision: %f\n \
	Recall: %f\n \
	TP: %d\n \
	FN: %d\n \
	TN: %d\n \
	FP: %d\n ')

	eval_log_file.write(format_str2 % (num_examples,
									metrics_values[7],
									metrics_values[1],
									metrics_values[3],
									metrics_values[2],
									metrics_values[4],
									metrics_values[6],
									metrics_values[5],
									metrics_values[0]))
	roc_log_file.write(str(y_pred_list))
	roc_log_file.write('\n')
	roc_log_file.write(str(y_label_list))
	roc_log_file.write('\n')
	if os.path.exists(train_log_path):
		train_log_file.close()
	eval_log_file.close()
	roc_log_file.close()

def get_num_examples(dataset_dir):
	'''
	统计DataSet中的记录条目数量，此处DataSet内容为TFRecords
	'''
	total_pos = 0
	total_neg = 0
	total_examples = 0

	if not gfile.Exists(dataset_dir):
		raise ValueError('Failed to find label directory: ' + dataset_dir)

	filename_list =  []
	for filename in os.listdir(dataset_dir):
		filepath = os.path.join(dataset_dir,filename)
		filename_list.append(filepath)

	dataset_queue = tf.train.string_input_producer(filename_list, shuffle=False, num_epochs=1, capacity=150) #创建文件队列
	reader = tf.TFRecordReader()
	_, serialized_example = reader.read(dataset_queue)
	num_records = reader.num_records_produced()
	features = tf.parse_single_example(serialized_example,
					 features={
						'image_raw': tf.FixedLenFeature([], tf.string),
						'label' : tf.FixedLenFeature([], tf.int64),
						'Xcoor' : tf.FixedLenFeature([], tf.int64),
						'Ycoor' : tf.FixedLenFeature([], tf.int64),
						'patchName' : tf.FixedLenFeature([], tf.string),
					 })
	label_fetch = tf.cast(features['label'], tf.int32)
	filename_fetch = tf.cast(features['patchName'], tf.string)
	gpu_options = tf.GPUOptions(allow_growth=True)
	# init_op = tf.global_variables_initializer()
	loc_init_op = tf.local_variables_initializer()
	sess = tf.Session(config=tf.ConfigProto(
		allow_soft_placement=True,
		log_device_placement=False,
		gpu_options=gpu_options))
	sess.run(loc_init_op)
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)
	try:
		while not coord.should_stop():
			label = label_fetch.eval(session=sess)
			if label==0:
				total_neg += 1
			else:
				total_pos += 1
			total_examples += 1
			if total_examples%1000==0:
				print(total_examples)
	except tf.errors.OutOfRangeError:
		print ('Done getting -- epochlimit reached')
	finally:
		coord.request_stop()
	coord.join(threads)
	sess.close()
	print('--------training patches info---------')
	print('total postive patches:%d\n'%total_pos)
	print('total negtive patches:%d\n'%total_neg)
	print('total patches:%d\n'%total_examples)
	return total_pos, total_neg, total_examples

def timecost(duration):
	secs = duration%60
	hours = duration//3600
	minutes = duration//60-hours*60
	print ('Time cost: %dh %dmin %dsecs' % (hours,minutes,secs))

if __name__ == '__main__':
	start_time = time.time()
	tf.app.run()
	print('Processing finish at %s'%time.strftime('%Y-%M-%d %H:%M:%S',time.localtime()))
	timecost(time.time()-start_time)
