# -- coding: utf-8 --
'''
测试程序，针对单个WSI
'''
from __future__ import absolute_import
from __future__ import division
# from __future__ import print_function

import math
import tensorflow as tf
import time
from datetime import datetime

from tensorflow.python.platform import gfile
from prostate_input import inputPipeLine
import prostate_network
from tensorflow.python.platform import tf_logging as logging
import pdb
import time
import numpy as np
import tensorflow.contrib.metrics as metrics
import os
import xlwt

slim = tf.contrib.slim
os.environ['CUDA_VISIBLE_DEVICES']='1'#指定对程序可见的GPU

tf.app.flags.DEFINE_integer(
	'batch_size', 100, 'The number of samples in each batch.')

tf.app.flags.DEFINE_string(
	'checkpoint_dir', None,
	'The directory where the model was written to or an absolute path to a '
	'checkpoint file.')

tf.app.flags.DEFINE_string(
	'eval_log_dir', None, 'Directory where the results are saved to.')

tf.app.flags.DEFINE_bool(
	'write_eval_log', None, 'Directory where the results are saved to.')

tf.app.flags.DEFINE_string(
	'single_test_list', '', '单例测试对象列表.')

tf.app.flags.DEFINE_string(
	'dataset_dir', None, 'The directory where the dataset files are stored.')

tf.app.flags.DEFINE_string(
	'train_log_path', None, 'train_log_path.')

tf.app.flags.DEFINE_string(
	'model_name', None, 'The name of the architecture to evaluate.')

tf.app.flags.DEFINE_float(
	'moving_average_decay', None,
	'The decay to use for the moving average.'
	'If left as None, then moving averages are not used.')

tf.app.flags.DEFINE_string('is_train_data', 'validation','''''')
tf.app.flags.DEFINE_integer('num_examples', 10000,'''''')

FLAGS = tf.app.flags.FLAGS

def model_select(model, image_batch, is_training=False, reuse = None):
	if model== 'vgg16':
		return prostate_network.vgg16_inference(image_batch, is_training=is_training)
	if model== 'resnet_v2_12':
		return prostate_network.resnet12_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_24':
		return prostate_network.resnet24_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_35':
		return prostate_network.resnet35_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_50':
		return prostate_network.resnet50_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_152':
		return prostate_network.resnet152_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'inception_v3':
		return prostate_network.inception_v3_inference(image_batch, is_training=is_training, reuse = reuse)

#全局变量
Accuracy_map = {}
num_examples=0
total_Accuracy=0
total_AUC=0
total_Precision=0
total_Recall=0
total_TP=0
total_FN=0
total_TN=0
total_FP=0
def main(_):
	if not FLAGS.dataset_dir:
		raise ValueError('You must supply the dataset directory with --dataset_dir')
	if not os.path.exists(FLAGS.eval_log_dir):
		os.mkdir(FLAGS.eval_log_dir)

	test_list=[]
	if FLAGS.single_test_list=='None':
		pass #raise ValueError('In single test mode.You must supply the explicit test sample name')
	else:
		for test_name in FLAGS.single_test_list.split(','):
			test_list.append(test_name)

	filename_list =  []
	if test_list:
		for filename in test_list:
			filepath = os.path.join(FLAGS.dataset_dir,filename+'.tfrecords')
			filename_list.append(filepath)
	else:
		for filename in os.listdir(FLAGS.dataset_dir):
			if filename.endswith('.tfrecords'):
				filepath = os.path.join(FLAGS.dataset_dir,filename)
				filename_list.append(filepath)

	filename_list.sort()
	uneval_list = []

	if FLAGS.write_eval_log:
		log_txt_path = FLAGS.eval_log_dir + '/single_eval_log.txt'
		eval_log_file = open(log_txt_path,'w')
		eval_log_file.write('\n--------------Traning Statistics------------\n')
		eval_log_file.write('%s'%time.strftime('%Y-%m-%d %H:%m:%S',time.localtime())+'\n')
		if os.path.exists(FLAGS.train_log_path):
			train_log_file = open(FLAGS.train_log_path,'r')
			from_trainlog = train_log_file.readlines()[-14:]
			for line in from_trainlog:
				eval_log_file.write(line)
			train_log_file.close()
			eval_log_file.close()
		else:
			print('train_log doesn\'t exits!')

	tf.logging.set_verbosity(tf.logging.INFO)

	for i in range(len(filename_list)):
		filename=filename_list[i].split('/')[-1].split('.')[0]
		total_pos, total_neg, total_examples = get_num_examples(filename_list[i])
		FLAGS.num_examples = total_examples
		print('--------  %s  ---------'%filename)
		print('total postive patches:%d\n'%total_pos)
		print('total negtive patches:%d\n'%total_neg)
		print('total patches:%d\n'%total_examples)
		if total_examples==0:
			uneval_list.append(filename)
			continue
		with tf.Graph().as_default():
			global_step = tf.train.create_global_step()

			images, labels, _,_,_, rawimages=inputPipeLine(FLAGS.is_train_data, batchSize = 32, Data_path = filename_list[i], net_use=FLAGS.model_name)
			logits, end_points = model_select(FLAGS.model_name, images)

			if FLAGS.moving_average_decay:
				variable_averages = tf.train.ExponentialMovingAverage(
					FLAGS.moving_average_decay, global_step)
				variables_to_restore = variable_averages.variables_to_restore(
					slim.get_model_variables())
				variables_to_restore[global_step.op.name] = global_step
			else:
				variables_to_restore = slim.get_variables_to_restore()

			predictions = tf.argmax(logits, 1)
			labels = tf.squeeze(labels)
			

			names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
				'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
				'Auc': slim.metrics.streaming_auc(predictions, labels),
				'TP': slim.metrics.streaming_true_positives(predictions, labels),
				'FP': slim.metrics.streaming_false_positives(predictions, labels),
				'TN': slim.metrics.streaming_true_negatives(predictions, labels),
				'FN': slim.metrics.streaming_false_negatives(predictions, labels),
				'Precision':slim.metrics.streaming_precision(predictions, labels),
				'Recall':slim.metrics.streaming_recall(predictions, labels)
			})

			# Print the summaries to screen.
			for name, value in names_to_values.items():
				summary_name = 'eval/%s' % name
				op = tf.summary.scalar(summary_name, value, collections=[])
				op = tf.Print(op, [value], summary_name)
				tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

			num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))

			if tf.gfile.IsDirectory(FLAGS.checkpoint_dir):
				checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
			else:
				checkpoint_path = FLAGS.checkpoint_dir

			tf.logging.info('Checkpoint Path: %s' % checkpoint_path)
			tf.logging.info('Data Path: %s' %filename_list[i] )

			gpu_options = tf.GPUOptions(allow_growth=True)
			session_config=tf.ConfigProto(
				allow_soft_placement=True,
				log_device_placement=False,
				gpu_options=gpu_options)

			init_op = tf.group(tf.global_variables_initializer(),
								tf.local_variables_initializer())
			# metrics_values = slim.evaluation.evaluate_once(
			# 									master=FLAGS.master,
			# 									checkpoint_path=checkpoint_path,
			# 									logdir=FLAGS.eval_log_dir,
			# 									num_evals=num_batches,
			# 									initial_op=init_op,
			# 									eval_op=list(names_to_updates.values()),
			# 									final_op=list(names_to_updates.values()),
			# 									variables_to_restore=variables_to_restore,
			# 									session_config=session_config)

			# graph_def = tf.get_default_graph().as_graph_def()
			# summary_op = tf.summary.merge_all() #merge 哪些summary
			# summary_writer =tf.summary.FileWriter(FLAGS.eval_log_dir,
			# 										graph=graph_def)
			start_time = time.time()
			metrics_values = test_once(checkpoint_path=FLAGS.checkpoint_dir,
											num_evals=num_batches,
											initial_op=init_op,
											eval_op=list(names_to_updates.values()),
											eval_values=list(names_to_values.values()),
											variables_to_restore=variables_to_restore,
											session_config=session_config,
											name=filename)
			timecost(time.time()-start_time)
			Accuracy_map = single_result_print(i, len(filename_list), filename, metrics_values, checkpoint_path)

	if FLAGS.write_eval_log:
		wrt_xls(Accuracy_map, uneval_list)
	all_result_print()

def single_result_print(i, len_of_filename_list, filename, metrics_values, checkpoint_path):
	'''
	打印单例测试结果

	Args：
	metrics_values：性能指标参数
	'''
	global num_examples,total_TP,total_FN,total_TN,total_FP
	global Accuracy_map
	Accuracy_map[filename] = {}
	if FLAGS.write_eval_log:
		Accuracy_map[filename]['Accuracy'] = metrics_values[7]
		Accuracy_map[filename]['AUC'] = metrics_values[1]
		Accuracy_map[filename]['Precision'] = metrics_values[3]
		Accuracy_map[filename]['Recall'] = metrics_values[2]
		Accuracy_map[filename]['TP'] = metrics_values[4]
		Accuracy_map[filename]['FN'] = metrics_values[6]
		Accuracy_map[filename]['TN'] = metrics_values[5]
		Accuracy_map[filename]['FP'] = metrics_values[0]

		eval_log(FLAGS.eval_log_dir,
				checkpoint_path,
				FLAGS.num_examples,
				metrics_values,
				filename)
		print('[%d/%d], finish eval for %s, eval_log has been update' %(i+1,len_of_filename_list,filename))
	else:
		print('[%d/%d], finish eval for %s' %(i+1,len_of_filename_list,filename))
	num_examples+=FLAGS.num_examples
	# total_Accuracy+=metrics_values[7]
	# total_AUC+=metrics_values[1]
	# total_Precision+=metrics_values[3]
	# total_Recall+=metrics_values[2]
	total_TP+=metrics_values[4]
	total_FN+=metrics_values[6]
	total_TN+=metrics_values[5]
	total_FP+=metrics_values[0]
	return Accuracy_map

def all_result_print():
	'''
	打印全测试集结果
	'''
	global num_examples,total_TP,total_FN,total_TN,total_FP
	print('\n\n--------------Total Test Result------------')
	format_strr = ('num_examples: %d\n \
		Accuracy: %f\n \
		Precision: %f\n \
		Recall: %f\n \
		Sensitivity: %f\n \
		Specifity: %f\n \
		f1-score: %f\n \
		TP: %d\n \
		FN: %d\n \
		TN: %d\n \
		FP: %d\n ')

	print(format_strr % (num_examples,
						(total_TP+total_TN)/(total_TP+total_FP+total_TN+total_FN),
						total_TP/(total_TP+total_FP),
						total_TP/(total_TP+total_FN),
						total_TP/(total_TP+total_FN),
						total_TN/(total_TN+total_FP),
						(2*total_TP)/(2*total_TP+total_FP+total_FN),
						total_TP,
						total_FN,
						total_TN,
						total_FP))

def wrt_xls(Accuracy_map, uneval_list):
	'''
	单例测试结果写入Excel
	'''
	if not os.path.exists(FLAGS.eval_log_dir):
		os.mkdir(FLAGS.eval_log_dir)
	eval_log_file = open(FLAGS.eval_log_dir + '.txt','a')
	eval_log_file.write('\n-----uneval_list-----\n')
	eval_log_file.write('-%s-\n\n'%str(uneval_list))
	eval_log_file.close()
	databook = xlwt.Workbook(encoding = 'ascii')
	datasheet = databook.add_sheet('data_sheet')
	datasheet.write(0,1,label='Accuracy')
	datasheet.write(0,2,label='AUC')
	datasheet.write(0,3,label='Precision')
	datasheet.write(0,4,label='Recall')
	datasheet.write(0,5,label='TP')
	datasheet.write(0,6,label='FN')
	datasheet.write(0,7,label='TN')
	datasheet.write(0,8,label='FP')
	sorted_Accuracy_map=Accuracy_map.items()
	# sorted(sorted_Accuracy_map, lambda x,y:cmp(x[1]['Accuracy'], y[1]['Accuracy']), reverse=True)
	for i in range(len(sorted_Accuracy_map)):
		# eval_log_file.write('%s: %d%%\n'%(sorted_Accuracy_map[i][0],sorted_Accuracy_map[i][1]['Accuracy']))
		datasheet.write(i+1,0,label=str(sorted_Accuracy_map[i][0]))
		datasheet.write(i+1,1,label=str(sorted_Accuracy_map[i][1]['Accuracy']))
		datasheet.write(i+1,2,label=str(sorted_Accuracy_map[i][1]['AUC']))
		datasheet.write(i+1,3,label=str(sorted_Accuracy_map[i][1]['Precision']))
		datasheet.write(i+1,4,label=str(sorted_Accuracy_map[i][1]['Recall']))
		datasheet.write(i+1,5,label=str(sorted_Accuracy_map[i][1]['TP']))
		datasheet.write(i+1,6,label=str(sorted_Accuracy_map[i][1]['FN']))
		datasheet.write(i+1,7,label=str(sorted_Accuracy_map[i][1]['TN']))
		datasheet.write(i+1,8,label=str(sorted_Accuracy_map[i][1]['FP']))
	databook.save(FLAGS.eval_log_dir + '/data_statics.xls')

def test_once(checkpoint_path,
				num_evals,
				initial_op,
				eval_op,
				eval_values,
				variables_to_restore,
				session_config,
				name):
	'''
	测试程序——单例
	原测试程序为主函数中被注释的slim.evaluation.evaluate_once，但这个歌函数封装的太死，
	因此此函数是参考了原函数的处理过程后的一个解耦的重写，用作测试的核心函数，可以做出较多的个性化数据查看和输出

	Args：
	num_evals：本函数的循环次数，一次跑一个batch，总数由主函数中math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))所确定
	initial_op：初始化op
	eval_op：需要更新的性能参数op，参考主函数names_to_updates.values()
	eval_values：性能参数，参考主函数names_to_values.values()
	variables_to_restore：需要恢复的网络参数
	name：WSI名称
	'''
	gpu_options = tf.GPUOptions(allow_growth=True)
	sess = tf.Session(config=session_config)
	sess.run(initial_op)
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)
	tf.logging.info('Evaluating model from %s' %checkpoint_path)

	ckpt = tf.train.get_checkpoint_state(checkpoint_path)
	saver = tf.train.Saver(variables_to_restore)
	try:
		saver.restore(sess, ckpt.model_checkpoint_path)
		# global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
		# print 'Succesfully loaded model from %s at step=%s.' %(ckpt.model_checkpoint_path, global_step)
	except:
		raise ValueError('No checkpoint found')

	print '%s: start evaluation on TestSet.' % (datetime.now())
	evals_completed = 1

	try:
		while evals_completed <= num_evals and not coord.should_stop():   
			sess.run(eval_op)
			logging.info('Evaluation [%d/%d]', evals_completed, num_evals)       
			if evals_completed >= num_evals:
				coord.request_stop()
			evals_completed += 1
	except tf.errors.OutOfRangeError:
		print ('Done Eval -- epochlimit reached')
		coord.request_stop()
	finally:
		# coord.request_stop()
		FP,Auc,Recall,Precision,TP,TN,FN,Accuracy = sess.run(eval_values)
		Sensitivity = TP/(TP+FN)
		Specifity = TN/(TN+FP)
		format_str_1 = ('Accuracy: %3f, Auc: %.3f, Precision: %.3f, Recall: %.3f,Sensitivity: %.3f ,Specifity: %.3f')
		format_str_2 = ('TP: %d, FN: %d, TN: %d, FP: %d')
		print('--------------%s Test Result------------'%name)
		print(format_str_1 % (Accuracy,Auc,Precision,Recall,Sensitivity,Specifity))
		print(format_str_2 % (TP,FN,TN,FP))
		metrics_values = [0]*8
		metrics_values[7] = Accuracy
		metrics_values[1] = Auc
		metrics_values[3] = Precision
		metrics_values[2] = Recall
		metrics_values[4] = TP
		metrics_values[6] = FN
		metrics_values[5] = TN
		metrics_values[0] = FP
	coord.join(threads, stop_grace_period_secs=10)
	sess.close()
	return metrics_values

def eval_log(eval_dir, checkpoint_path, num_examples, metrics_values, filename):
	'''
	写测试结果日志

	Args：
	eval_dir：eval_log_dir，保存test log的目录
	train_log_path：train log的位置，因为需要读取网络的训练状态做参考
	num_examples：参与测试的总样本量
	metrics_values：记录了性能指标的字典

	'''
	if not os.path.exists(eval_dir):
		os.mkdir(eval_dir)
	log_txt_path = eval_dir + '/single_eval_log.txt'
	eval_log_file = open(log_txt_path,'a')
	eval_log_file.write('--------------%s Test Result------------\n'%filename)
	format_str2 = ('num_examples: %d\n \
	Accuracy: %f\n \
	AUC: %f\n \
	Precision: %f\n \
	Recall: %f\n \
	TP: %d\n \
	FN: %d\n \
	TN: %d\n \
	FP: %d\n ')

	eval_log_file.write(format_str2 % (num_examples,
									metrics_values[7],
									metrics_values[1],
									metrics_values[3],
									metrics_values[2],
									metrics_values[4],
									metrics_values[6],
									metrics_values[5],
									metrics_values[0]))
	eval_log_file.close()

def get_num_examples(Data_path):
	'''
	统计WSI中的记录条目数量，此处为WSI对应的TFRecords
	'''
	total_pos = 0
	total_neg = 0
	total_examples = 0

	dataset_queue = tf.train.string_input_producer([Data_path], shuffle=False, num_epochs=1, capacity=15) #创建文件队列
	reader = tf.TFRecordReader()
	_, serialized_example = reader.read(dataset_queue)
	num_records = reader.num_records_produced()
	features = tf.parse_single_example(serialized_example,
					 features={
						'image_raw': tf.FixedLenFeature([], tf.string),
						'label' : tf.FixedLenFeature([], tf.int64),
						'Xcoor' : tf.FixedLenFeature([], tf.int64),
						'Ycoor' : tf.FixedLenFeature([], tf.int64),
						'patchName' : tf.FixedLenFeature([], tf.string),
					 })
	label_fetch = tf.cast(features['label'], tf.int32)
	filename_fetch = tf.cast(features['patchName'], tf.string)
	gpu_options = tf.GPUOptions(allow_growth=True)
	# init_op = tf.global_variables_initializer()
	loc_init_op = tf.local_variables_initializer()
	sess = tf.Session(config=tf.ConfigProto(
		allow_soft_placement=True,
		log_device_placement=False,
		gpu_options=gpu_options))
	sess.run(loc_init_op)
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)
	try:
		while not coord.should_stop():
			label = label_fetch.eval(session=sess)
			if label==0:
				total_neg += 1
			else:
				total_pos += 1
			total_examples += 1
			# if total_examples%1000==0:
			#   print(total_examples)
			# pdb.set_trace()
	except tf.errors.OutOfRangeError:
		print ('Done getting -- epochlimit reached')
	finally:
		coord.request_stop()
	coord.join(threads)
	sess.close()
	return total_pos, total_neg, total_examples

def timecost(duration):
	secs = duration%60
	hours = duration//3600
	minutes = duration//60-hours*60
	print ('Time cost: %dh %dmin %dsecs' % (hours,minutes,secs))

if __name__ == '__main__':
	tf.app.run()
