# -- coding: utf-8 --
'''
训练程序
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from deployment import model_deploy
from prostate_input import inputPipeLine
import prostate_network
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import gfile
from tensorflow.python.ops import math_ops
import time
import numpy as np
import os
import pdb
import sys

slim = tf.contrib.slim
# os.environ['CUDA_VISIBLE_DEVICES']='2,3'

tf.app.flags.DEFINE_string(
		'train_dir', '/tmp/tfmodel/',
		'Directory where checkpoints and event logs are written to.')

tf.app.flags.DEFINE_bool(
		'count_num_examples', None, 'Task id of the replica running the training.')

######################
# model_deploy.DeploymentConfig 以及被注释暂时不用的参数 #
######################
tf.app.flags.DEFINE_integer('num_clones', 4,'Number of model clones to deploy.')

tf.app.flags.DEFINE_boolean('clone_on_cpu', False,'Use CPUs to deploy clones.')

tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')

tf.app.flags.DEFINE_integer(
		'num_ps_tasks', 0,
		'The number of parameter servers. If the value is 0, then the parameters '
		'are handled locally by the worker.')

# tf.app.flags.DEFINE_integer(
# 		'log_every_n_steps', 10,
# 		'The frequency with which logs are print.')

# tf.app.flags.DEFINE_integer(
# 		'save_summaries_secs', 600,
# 		'The frequency with which summaries are saved, in seconds.')

# tf.app.flags.DEFINE_integer(
# 		'save_interval_secs', 600,
# 		'The frequency with which the model is saved, in seconds.')

# tf.app.flags.DEFINE_string(
# 		'dataset_split_name', 'train', 'The name of the train/test split.')
# 		
# tf.app.flags.DEFINE_integer(
# 		'labels_offset', 0,
# 		'An offset for the labels in the dataset. This flag is primarily used to '
# 		'evaluate the VGG and ResNet architectures which do not use a background '
# 		'class for the ImageNet dataset.')
# 		
# tf.app.flags.DEFINE_string(
# 		'preprocessing_name', None, 'The name of the preprocessing to use. If left '
# 		'as `None`, then the model_name flag is used.')
# 		
# tf.app.flags.DEFINE_integer(
# 		'train_image_size', None, 'Train image size')
# 		
# tf.app.flags.DEFINE_integer('max_number_of_steps', None,
# 							'The maximum number of training steps.')
# 							
# tf.app.flags.DEFINE_string(
# 		'dataset_name', 'prostate', 'The name of the dataset to load.')


tf.app.flags.DEFINE_integer(
		'task', 0, 'Task id of the replica running the training.')

######################
# 优化器参数定义，一般不用修改 #
######################
tf.app.flags.DEFINE_float(
		'weight_decay', None, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
		'optimizer', 'adam',
		'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
		'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
		'adadelta_rho', 0.95,
		'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
		'adagrad_initial_accumulator_value', 0.1,
		'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
		'adam_beta1', 0.9,
		'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
		'adam_beta2', 0.999,
		'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
													'The learning rate power.')
tf.app.flags.DEFINE_float(
		'ftrl_initial_accumulator_value', 0.1,
		'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
		'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
		'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
		'momentum', 0.9,
		'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')


#######################
# 学习率相关参数，一般不改 #
#######################

tf.app.flags.DEFINE_float(
		'pos_weight', 1.0,
		'自定义参数，用于改变正负样本权重，但似乎作用不大.')

tf.app.flags.DEFINE_string(
		'learning_rate_decay_type',
		'exponential',
		'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
		' or "polynomial"')

tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')

tf.app.flags.DEFINE_float(
		'end_learning_rate', 0.0001,
		'The minimal end learning rate used by a polynomial decay learning rate.')

tf.app.flags.DEFINE_float(
		'label_smoothing', 0.0, 'The amount of label smoothing.')

tf.app.flags.DEFINE_float(
		'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')

tf.app.flags.DEFINE_float(
		'num_epochs_per_decay', 2.0,
		'Number of epochs after which learning rate decays.')

tf.app.flags.DEFINE_bool(
		'sync_replicas', False,
		'Whether or not to synchronize the replicas during training.')

tf.app.flags.DEFINE_integer(
		'replicas_to_aggregate', 1,
		'The Number of gradients to collect before updating params.')

tf.app.flags.DEFINE_float(
		'moving_average_decay', None,
		'The decay to use for the moving average.'
		'If left as None, then moving averages are not used.')

#######################
# 数据集相关 #
#######################

tf.app.flags.DEFINE_string(
		'dataset_dir', None, 'The directory where the dataset files are stored.')

tf.app.flags.DEFINE_string(
		'model_name', None, 'The name of the architecture to train.')

tf.app.flags.DEFINE_integer(
		'batch_size', 32, 'The number of samples in each batch.')

tf.app.flags.DEFINE_integer('max_number_of_epochs', None,
							'The maximum number of training epochs.')

#####################
# Fine-Tuning Flags #
#####################

tf.app.flags.DEFINE_string(
		'checkpoint_path', None,
		'The path to a checkpoint from which to fine-tune.')

tf.app.flags.DEFINE_string(
		'checkpoint_exclude_scopes', None,
		'Comma-separated list of scopes of variables to exclude when restoring '
		'from a checkpoint.')

tf.app.flags.DEFINE_string(
		'trainable_scopes', None,
		'Comma-separated list of scopes to filter the set of variables to train.'
		'By default, None would train all the variables.')

tf.app.flags.DEFINE_boolean(
		'ignore_missing_vars', False,
		'When restoring a checkpoint would ignore missing variables.')

tf.app.flags.DEFINE_integer(
		'num_examples', None,
		'When restoring a checkpoint would ignore missing variables.')

FLAGS = tf.app.flags.FLAGS

def _configure_learning_rate(num_samples_per_epoch, global_step):
	'''
	配置学习率及更新方式
	'''
	decay_steps = int((FLAGS.num_examples / FLAGS.batch_size) *
										FLAGS.num_epochs_per_decay)
	if FLAGS.sync_replicas:
		decay_steps /= FLAGS.replicas_to_aggregate

	if FLAGS.learning_rate_decay_type == 'exponential':
		return tf.train.exponential_decay(FLAGS.learning_rate,
										global_step,
										decay_steps,
										FLAGS.learning_rate_decay_factor,
										staircase=True,
										name='exponential_decay_learning_rate')
	elif FLAGS.learning_rate_decay_type == 'fixed':
		return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
	elif FLAGS.learning_rate_decay_type == 'polynomial':
		return tf.train.polynomial_decay(FLAGS.learning_rate,
										global_step,
										decay_steps,
										FLAGS.end_learning_rate,
										power=1.0,
										cycle=False,
										name='polynomial_decay_learning_rate')
	else:
		raise ValueError('learning_rate_decay_type [%s] was not recognized',
										 FLAGS.learning_rate_decay_type)

def _configure_optimizer(learning_rate):
	'''
	配置优化器
	'''
	if FLAGS.optimizer == 'adadelta':
		optimizer = tf.train.AdadeltaOptimizer(
				learning_rate,
				rho=FLAGS.adadelta_rho,
				epsilon=FLAGS.opt_epsilon)
	elif FLAGS.optimizer == 'adagrad':
		optimizer = tf.train.AdagradOptimizer(
				learning_rate,
				initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
	elif FLAGS.optimizer == 'adam':
		optimizer = tf.train.AdamOptimizer(
				learning_rate,
				beta1=FLAGS.adam_beta1,
				beta2=FLAGS.adam_beta2,
				epsilon=FLAGS.opt_epsilon)
	elif FLAGS.optimizer == 'ftrl':
		optimizer = tf.train.FtrlOptimizer(
				learning_rate,
				learning_rate_power=FLAGS.ftrl_learning_rate_power,
				initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
				l1_regularization_strength=FLAGS.ftrl_l1,
				l2_regularization_strength=FLAGS.ftrl_l2)
	elif FLAGS.optimizer == 'momentum':
		optimizer = tf.train.MomentumOptimizer(
				learning_rate,
				momentum=FLAGS.momentum,
				name='Momentum')
	elif FLAGS.optimizer == 'rmsprop':
		optimizer = tf.train.RMSPropOptimizer(
				learning_rate,
				decay=FLAGS.rmsprop_decay,
				momentum=FLAGS.momentum,
				epsilon=FLAGS.opt_epsilon)
	elif FLAGS.optimizer == 'sgd':
		optimizer = tf.train.GradientDescentOptimizer(learning_rate)
	else:
		raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
	return optimizer

def model_select(model, image_batch, weight_decay=None, is_training=True, reuse = None):
	if model== 'vgg16':
		return prostate_network.vgg16_inference(image_batch, weight_decay=weight_decay,is_training=is_training)
	if model== 'resnet_v2_50':
		return prostate_network.resnet50_inference(image_batch, weight_decay=weight_decay,is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_101':
		return prostate_network.resnet101_inference(image_batch, weight_decay=weight_decay,is_training=is_training, reuse = reuse)
	if model== 'resnet_v2_152':
		return prostate_network.resnet152_inference(image_batch, weight_decay=weight_decay,is_training=is_training, reuse = reuse)
	if model== 'InceptionV3':
		return prostate_network.inception_v3_inference(image_batch, weight_decay=weight_decay,is_training=is_training, reuse = reuse)
	if model== 'InceptionResnetV2':
		return prostate_network.inception_resnet_v2_inference(image_batch, weight_decay=weight_decay,is_training=is_training, reuse = reuse)

def count_model_size():
    size = lambda v: reduce(lambda x, y: x * y, v.get_shape().as_list())
    n = sum(size(v) for v in tf.trainable_variables())
    print("Model size: %dK" % (n / 1000,))

def _get_init_fn():
	"""Returns a function run by the chief worker to warm-start the training.

	Note that the init_fn is only run when initializing the model during the very
	first global step.

	Returns:
		An init function run by the supervisor.
	"""
	if FLAGS.checkpoint_path is None:
		return None

	# Warn the user if a checkpoint exists in the train_dir. Then we'll be
	# ignoring the checkpoint anyway.
	if tf.train.latest_checkpoint(FLAGS.train_dir):
		tf.logging.info(
				'Ignoring --checkpoint_path because a checkpoint already exists in %s'
				% FLAGS.train_dir)
		# return None
		checkpoint_path = tf.train.latest_checkpoint(FLAGS.train_dir)
		variables_to_restore = []
		for var in slim.get_model_variables():
			variables_to_restore.append(var)
		return slim.assign_from_checkpoint_fn(
			checkpoint_path,
			variables_to_restore,
			ignore_missing_vars=FLAGS.ignore_missing_vars)

	exclusions = []
	if FLAGS.checkpoint_exclude_scopes:
		exclusions = [scope.strip()for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

	variables_to_restore = []
	for var in slim.get_model_variables():
		excluded = False
		for exclusion in exclusions:
			if var.op.name.startswith(exclusion):
				excluded = True
				break
		if not excluded:
			variables_to_restore.append(var)

	if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
		checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
	else:
		checkpoint_path = FLAGS.checkpoint_path

	tf.logging.info('Fine-tuning from %s' % checkpoint_path)

	return slim.assign_from_checkpoint_fn(
			checkpoint_path,
			variables_to_restore,
			ignore_missing_vars=FLAGS.ignore_missing_vars)

def _get_variables_to_train():
	"""Returns a list of variables to train.

	Returns:
		A list of variables to train by the optimizer.
	"""
	if FLAGS.trainable_scopes is None:
		return tf.trainable_variables()
	else:
		scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]

	variables_to_train = []
	for scope in scopes:
		variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
		variables_to_train.extend(variables)

	return variables_to_train

def restoreFromSaver(saver, sess):
	ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
	if ckpt and ckpt.model_checkpoint_path:
		saver.restore(sess, ckpt.model_checkpoint_path)
		# gfile.DeleteRecursively(FLAGS.checkpoint_path)
		# pre_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
		# return pre_steps
		print('checkpoint file has been restored!')
	else:
		print('No checkpoint file found')
		return

def main(_):
	if FLAGS.count_num_examples:
		# pdb.set_trace()
		total_pos, total_neg, total_examples = get_num_examples(FLAGS.dataset_dir)
		FLAGS.num_examples = total_examples
		print('--------training patches info by counting---------')
		print('total postive patches:%d\n'%total_pos)
		print('total negtive patches:%d\n'%total_neg)
		print('total patches:%d\n'%total_examples)
	else:
		pass

	if not FLAGS.dataset_dir:
		raise ValueError('You must supply the dataset directory with --dataset_dir')

	print('dataset directory:%s', FLAGS.dataset_dir)
	tf.logging.set_verbosity(tf.logging.INFO)
	with tf.Graph().as_default():
		#######################
		# Config model_deploy #
		#######################
		deploy_config = model_deploy.DeploymentConfig(
				num_clones=FLAGS.num_clones,
				clone_on_cpu=FLAGS.clone_on_cpu,
				replica_id=FLAGS.task,
				num_replicas=FLAGS.worker_replicas,
				num_ps_tasks=FLAGS.num_ps_tasks)

		# Create global_step
		with tf.device(deploy_config.variables_device()):
			global_step = tf.train.create_global_step()#slim.create_global_step()

			images, labels,_,_,_=inputPipeLine('train', batchSize = FLAGS.batch_size, fast_mode = True, Data_Dir = FLAGS.dataset_dir, numEpochs = None, net_use=FLAGS.model_name)
			labels = slim.one_hot_encoding(
					labels,2)
			batch_queue = slim.prefetch_queue.prefetch_queue(
					[images, labels], capacity=2 * deploy_config.num_clones)

		####################
		# Define the model #
		####################
		summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
		def clone_fn(batch_queue):
			"""Allows data parallelism by creating multiple clones of network_fn."""
			with tf.device(deploy_config.inputs_device()):
				images, labels = batch_queue.dequeue()
			logits, end_points = model_select(FLAGS.model_name, images, weight_decay=FLAGS.weight_decay)
			
			predictions = tf.argmax(logits, 1)
			soft_predictions =tf.nn.softmax(logits,dim=-1)#输出softmax概率
			t_labels = tf.argmax(labels, 1)

			# Define the metrics:用于统计每batch或epoch的评估指标
			names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
				'Accuracy': slim.metrics.streaming_accuracy(predictions, t_labels),
				'Auc': slim.metrics.streaming_auc(predictions, t_labels),
				'TP': slim.metrics.streaming_true_positives(predictions, t_labels),
				'FP': slim.metrics.streaming_false_positives(predictions, t_labels),
				'TN': slim.metrics.streaming_true_negatives(predictions, t_labels),
				'FN': slim.metrics.streaming_false_negatives(predictions, t_labels),
				'Precision':slim.metrics.streaming_precision(predictions, t_labels),
				'Recall':slim.metrics.streaming_recall(predictions, t_labels)
			})
			batch_accuracy = slim.metrics.accuracy(predictions, t_labels)

			for name, value in names_to_values.items():
				summaries.add(tf.summary.scalar('eval/%s' % name, value))

			if 'AuxLogits' in end_points:
				tf.losses.softmax_cross_entropy(
						logits=end_points['AuxLogits'], onehot_labels=labels,
						label_smoothing=FLAGS.label_smoothing, weights=0.4, scope='aux_loss')

			#以下tf.nn.weighted_cross_entropy_with_logits涉及框架函数修改，增加pos_weight，当pos_weight=1.0，与原函数一致
			weight_loss = tf.nn.weighted_cross_entropy_with_logits(
					logits=logits, targets=labels, pos_weight=FLAGS.pos_weight)
			weight_loss = math_ops.reduce_mean(weight_loss)
			tf.losses.add_loss(weight_loss)
			return end_points, names_to_values, names_to_updates, batch_accuracy

		# Gather initial summaries.
		summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

		clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
		first_clone_scope = deploy_config.clone_scope(0)
		# Gather update_ops from the first clone. These contain, for example,
		# the updates for the batch_norm variables created by network_fn.
		update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)

		metrics_updateops = []
		metrics_values = []
		batch_accuracy_total = []

		for i in range(FLAGS.num_clones):
			names_to_values= clones[i].names_to_values
			names_to_updates= clones[i].names_to_updates
			batch_accuracy=clones[i].batch_accuracy
			metrics_updateops.append(names_to_updates.values())
			metrics_values.append(names_to_values)
			batch_accuracy_total.append(batch_accuracy)
		batch_accuracy= tf.reduce_mean(batch_accuracy_total)

		# Add summaries for 评估指标移动平均？
		metrics_values_stat = {}
		for key in metrics_values[0].keys():
			tmp = []
			for item in metrics_values:
				tmp.append(item[key])
				if key=='Accuracy' or key=='Auc' or key=='Precision' or key=='Recall':
					metrics_values_stat[key] = tf.reduce_mean(tmp)
				else:
					metrics_values_stat[key] = tf.reduce_sum(tmp)
		for name, value in metrics_values_stat.items():
			summaries.add(tf.summary.scalar('eval/%s' % name, value))

		end_points = clones[0].outputs
		for end_point in end_points:
			x = end_points[end_point]
			summaries.add(tf.summary.histogram('activations/' + end_point, x))
			summaries.add(tf.summary.scalar('sparsity/' + end_point,tf.nn.zero_fraction(x)))

		# Add summaries for losses.
		for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
			summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

		# Add summaries for variables.
		for variable in slim.get_model_variables():
			summaries.add(tf.summary.histogram(variable.op.name, variable))

		#################################
		# Configure the moving averages #
		#################################
		if FLAGS.moving_average_decay:
			moving_average_variables = slim.get_model_variables()
			variable_averages = tf.train.ExponentialMovingAverage(
					FLAGS.moving_average_decay, global_step)
		else:
			moving_average_variables, variable_averages = None, None

		#########################################
		# Configure the optimization procedure. #
		#########################################
		num_samples_per_epoch = FLAGS.num_examples
		with tf.device(deploy_config.optimizer_device()):
			learning_rate = _configure_learning_rate(num_samples_per_epoch, global_step)
			optimizer = _configure_optimizer(learning_rate)
			summaries.add(tf.summary.scalar('learning_rate', learning_rate))

		if FLAGS.sync_replicas:#默认False，不用管
			# If sync_replicas is enabled, the averaging will be done in the chief
			# queue runner.
			optimizer = tf.train.SyncReplicasOptimizer(
					opt=optimizer,
					replicas_to_aggregate=FLAGS.replicas_to_aggregate,
					variable_averages=variable_averages,
					variables_to_average=moving_average_variables,
					replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
					total_num_replicas=FLAGS.worker_replicas)
		elif FLAGS.moving_average_decay:
			update_ops.append(variable_averages.apply(moving_average_variables))

		variables_to_train = _get_variables_to_train()
		count_model_size()
		# pdb.set_trace()

		total_loss, clones_gradients = model_deploy.optimize_clones(
				clones,
				optimizer,
				var_list=variables_to_train)

		summaries.add(tf.summary.scalar('total_loss', total_loss))
		grad_updates = optimizer.apply_gradients(clones_gradients,global_step=global_step)
		update_ops.append(grad_updates)

		update_op = tf.group(*update_ops)
		# with tf.control_dependencies([update_op]):
		# 	train_tensor = tf.identity(total_loss, name='train_op')

		# Add the summaries from the first clone. These contain the summaries
		# created by model_fn and either optimize_clones() or _gather_clone_loss().
		summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,first_clone_scope))

		summary_op = tf.summary.merge(list(summaries), name='summary_op')

		#原训练函数封装程度过高，下面做了解耦
		# slim.learning.train(
		#     train_tensor,
		#     logdir=FLAGS.train_dir,
		#     master=FLAGS.master,
		#     is_chief=(FLAGS.task == 0),
		#     init_fn=_get_init_fn(),
		#     summary_op=summary_op,
		#     number_of_steps=FLAGS.max_number_of_steps,
		#     log_every_n_steps=FLAGS.log_every_n_steps,
		#     save_summaries_secs=FLAGS.save_summaries_secs,
		#     save_interval_secs=FLAGS.save_interval_secs,
		#     sync_optimizer=optimizer if FLAGS.sync_replicas else None)

		train_op = update_op
		init = tf.global_variables_initializer()
		gpu_options = tf.GPUOptions(allow_growth=True)
		sess = tf.Session(config=tf.ConfigProto(
			allow_soft_placement=True,
			log_device_placement=False,
			gpu_options=gpu_options))

		sess.run(init)
		init_fn = _get_init_fn()
		saver = tf.train.Saver(max_to_keep=2)
		if init_fn:
			init_fn(sess)
		else:
			# restoreFromSaver(saver,sess)
			sess.run(init)
		# logging.info("Restoring parameters from %s", FLAGS.checkpoint_path)
		acc_init=tf.local_variables_initializer()
		sess.run(acc_init)
		
		tf.train.start_queue_runners(sess=sess)#开始加载训练文件队列

		summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
												graph=sess.graph)

		num_examples_per_step = FLAGS.batch_size * FLAGS.num_clones
		steps_per_epoch = int(FLAGS.num_examples / (FLAGS.batch_size*FLAGS.num_clones))
		print (steps_per_epoch)
		last_Accuracy=0
		last_Auc=0
		last_Precision=0
		last_Recall=0
		last_TN=0
		last_TP=0
		last_FN=0
		last_TN=0
		last_FP=0

		FLAGS.max_number_of_steps = FLAGS.max_number_of_epochs * steps_per_epoch
		for step in xrange(FLAGS.max_number_of_steps+10):
			try:
				start_time = time.time()
				_, loss_value, batch_acc, _ = sess.run([train_op, total_loss, batch_accuracy, metrics_updateops])

				duration = time.time() - start_time
				assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
				
				examples_per_sec = num_examples_per_step / duration
				sec_per_batch = duration / FLAGS.num_clones
				
				if step % 50 == 0:        
					format_str = ('%s: step %d, loss = %.3f (%.1f examples/sec; %.3f '
							'sec/batch)')
					print (format_str % (time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), step, loss_value,
								 examples_per_sec, sec_per_batch))
					print ('batch accuracy= %.3f'%batch_acc)

				if step % 300 == 0:#更新summary
					summary_str = sess.run(summary_op)
					summary_writer.add_summary(summary_str, step)

				if step % 500 == 0 or (step + 1) == FLAGS.max_number_of_steps:#每保存检查点
					checkpoint_pathh = os.path.join(FLAGS.train_dir, 'model.ckpt')
					saver.save(sess, checkpoint_pathh, global_step=step)

				if step%steps_per_epoch==0:#每个epoch结束统计性能指标
					Accuracy,Auc,TP,FP,TN,FN,Precision,Recall = sess.run([metrics_values_stat['Accuracy'],
																		metrics_values_stat['Auc'],
																		metrics_values_stat['TP'],
																		metrics_values_stat['FP'],
																		metrics_values_stat['TN'],
																		metrics_values_stat['FN'],
																		metrics_values_stat['Precision'],
																		metrics_values_stat['Recall']
																		])
					tf.logging.info('--------------No.%d Epoch Statistics------------'%int(step/steps_per_epoch))
					format_str = ('Accuracy: %3f, Auc: %.3f, TP: %d, FN: %d, TN: %d, FP: %d, Precision: %.3f, Recall: %.3f')
					tf.logging.info(format_str % (Accuracy,Auc,TP,FN,TN,FP,Precision,Recall))
					print('\n\n\n')
					last_Accuracy=Accuracy #record the nearest epoch statis
					last_Auc=Auc
					last_Precision=Precision
					last_Recall=Recall
					last_TN=TN
					last_TP=TP
					last_FN=FN
					last_TN=TN
					last_FP=FP
					sess.run(acc_init)

				if step==FLAGS.max_number_of_steps-1:#训练结束，写训练日志
					train_log(train_dir=FLAGS.train_dir,
										model_name=FLAGS.model_name,
										checkpoint_path=FLAGS.checkpoint_path,
										train_steps=step,
										train_epochs=step/steps_per_epoch,
										batch_size=FLAGS.batch_size,
										learning_rate=FLAGS.learning_rate,
										learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
										num_epochs_per_decay=FLAGS.num_epochs_per_decay,
										pos_weight=FLAGS.pos_weight,
										optimizer=FLAGS.optimizer,
										weight_decay=FLAGS.weight_decay,
										num_examples=FLAGS.num_examples,
										Accuracy=last_Accuracy,
										AUC=last_Auc,
										Precision=last_Precision,
										Recall=last_Recall,
										TP=last_TP,
										FN=last_FN,
										TN=last_TN,
										FP=last_FP)
					print('train_log has been write')

			except KeyboardInterrupt:#强制退出训练，写训练日志
				train_log(train_dir=FLAGS.train_dir,
									model_name=FLAGS.model_name,
									checkpoint_path=FLAGS.checkpoint_path,
									train_steps=step,
									train_epochs=step/steps_per_epoch,
									batch_size=FLAGS.batch_size,
									learning_rate=FLAGS.learning_rate,
									learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
									num_epochs_per_decay=FLAGS.num_epochs_per_decay,
									pos_weight=FLAGS.pos_weight,
									optimizer=FLAGS.optimizer,
									weight_decay=FLAGS.weight_decay,
									num_examples=FLAGS.num_examples,
									Accuracy=last_Accuracy,
									AUC=last_Auc,
									Precision=last_Precision,
									Recall=last_Recall,
									TP=last_TP,
									FN=last_FN,
									TN=last_TN,
									FP=last_FP)
				sys.exit('train_log has been write')

def get_num_examples(dataset_dir):
	'''
	统计DataSet中的记录条目数量，此处DataSet内容为TFRecords
	'''
	total_pos = 0
	total_neg = 0
	total_examples = 0

	if not gfile.Exists(dataset_dir):
		raise ValueError('Failed to find label directory: ' + dataset_dir)

	filename_list =  []
	for filename in os.listdir(dataset_dir):
		filepath = os.path.join(dataset_dir,filename)
		filename_list.append(filepath)

	dataset_queue = tf.train.string_input_producer(filename_list, shuffle=False, num_epochs=1, capacity=150) #创建文件队列
	reader = tf.TFRecordReader()
	# pdb.set_trace()
	_, serialized_example = reader.read(dataset_queue)
	num_records = reader.num_records_produced()
	features = tf.parse_single_example(serialized_example,
										 features={
											'image_raw': tf.FixedLenFeature([], tf.string),
											'label' : tf.FixedLenFeature([], tf.int64),
											'Xcoor' : tf.FixedLenFeature([], tf.int64),
											'Ycoor' : tf.FixedLenFeature([], tf.int64),
											'patchName' : tf.FixedLenFeature([], tf.string),
										 })
	label_fetch = tf.cast(features['label'], tf.int32)
	filename_fetch = tf.cast(features['patchName'], tf.string)
	gpu_options = tf.GPUOptions(allow_growth=True)
	loc_init_op = tf.local_variables_initializer()
	sess = tf.Session(config=tf.ConfigProto(
			allow_soft_placement=True,
			log_device_placement=False,
			gpu_options=gpu_options))
	sess.run(loc_init_op)
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)
	try:
		while not coord.should_stop():
			label = label_fetch.eval(session=sess)
			if label==0:
				total_neg += 1
			else:
				total_pos += 1
			total_examples += 1
			if total_examples%1000==0:
				print(total_examples)
	except tf.errors.OutOfRangeError:
		print ('Done getting -- epochlimit reached')
	finally:
		coord.request_stop()
	coord.join(threads)
	sess.close()
	return total_pos, total_neg, total_examples

def train_log(train_dir,
				model_name,
				checkpoint_path,
				train_steps,
				train_epochs,
				batch_size,
				learning_rate,
				learning_rate_decay_factor,
				num_epochs_per_decay,
				pos_weight,
				optimizer,
				weight_decay,
				num_examples,
				Accuracy,
				AUC,
				Precision,
				Recall,
				TP,
				FN,
				TN,
				FP):
	log_txt_path = FLAGS.train_dir + '/train_log.txt'
	if os.path.exists(log_txt_path):
		train_log_file = open(log_txt_path,'r')
		lines = train_log_file.readlines()
		if lines!=[]:
			last_day = lines[-25].strip().strip('-').split('-')[-1]
			today = time.asctime().split()[2]
			train_log_file.close()
			if today > last_day:
				train_log_file = open(log_txt_path,'a')
				train_log_file.write('\n---------------%s---------------'%time.strftime('%Y-%m-%d',time.localtime())+'\n')
			else:
				train_log_file.close()
				train_log_file = open(log_txt_path,'a')
				train_log_file.write('\n')
	else:
		train_log_file = open(log_txt_path,'a')
		train_log_file.write('---------------%s---------------'%time.strftime('%Y-%m-%d',time.localtime())+'\n')
	
	format_str = ('%s:\n\
		train_dir: %s\n \
		model_name: %s\n \
		checkpoint_path: %s\n \
		train_steps: %d\n \
		train_epochs: %d\n \
		batch_size: %d\n \
		learning_rate: %f\n \
		learning_rate_decay_factor: %f\n \
		num_epochs_per_decay: %d\n \
		pos_weight: %f\n \
		optimizer: %s\n \
		weight_decay: %f\n \
		num_examples: %d\n')

	format_str2 = ('latest %d epoch status:\n\
		Accuracy: %f\n \
		AUC: %f\n \
		Precision: %f\n \
		Recall: %f\n \
		TP: %d\n \
		FN: %d\n \
		TN: %d\n \
		FP: %d\n ')
	train_log_file.write(format_str % (time.strftime('%Y-%m-%d %H:%m:%S',time.localtime()),train_dir,
																			model_name,
																			checkpoint_path,
																			train_steps,
																			train_epochs,
																			batch_size,
																			learning_rate,
																			learning_rate_decay_factor,
																			num_epochs_per_decay,
																			pos_weight,
																			optimizer,
																			weight_decay,
																			num_examples))
	if train_epochs:
		train_log_file.write(format_str2 % (train_epochs,
																				Accuracy,
																				AUC,
																				Precision,
																				Recall,
																				TP,
																				FN,
																				TN,
																				FP))
	train_log_file.close()

def timecost(duration):
	secs = duration%60
	hours = duration//3600
	minutes = duration//60-hours*60
	print ('Time cost: %dh %dmin %dsecs' % (hours,minutes,secs))
	
if __name__ == '__main__':
	start_time = time.time()
	tf.app.run()
	print('Processing finish at %s'%time.strftime('%Y-%M-%d %H:%M:%S',time.localtime()))
	timecost(time.time()-start_time)