# -- coding: utf-8 --
"""
训练
"""   

from __future__ import absolute_import #在 3.0 以前的旧版本中启用相对导入等特性所必须的 future 语句
from __future__ import division
from datetime import datetime
import time
import os.path
import re
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
from six.moves import xrange  
import tensorflow as tf
from prostate_input import inputPipeLine
import define
import pdb
import prostate_network
import tensorflow.contrib.framework as framework
import tensorflow.contrib.metrics as metrics
from tensorflow.python import debug as tfdbg
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
#######################
# Learning Rate Flags #
#######################

tf.app.flags.DEFINE_string(
	'learning_rate_decay_type',
	'exponential',
	'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
	' or "polynomial"')

tf.app.flags.DEFINE_float('init_learning_rate', 0.01, 'Initial learning rate.')

tf.app.flags.DEFINE_float(
	'end_learning_rate', 0.0001,
	'The minimal end learning rate used by a polynomial decay learning rate.')

tf.app.flags.DEFINE_float(
	'label_smoothing', 0.0, 'The amount of label smoothing.')

tf.app.flags.DEFINE_float(
	'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')

tf.app.flags.DEFINE_float(
	'num_epochs_per_decay', 2.0,
	'Number of epochs after which learning rate decays.')

tf.app.flags.DEFINE_float(
    'MOVING_AVERAGE_DECAY', 0.9999,
    'If left as None, then moving averages are not used.')

#######################
# Dataset Flags #
#######################

tf.app.flags.DEFINE_string(
	'dataset_train_or_test', 'train', 'The name of the train/test split.')

tf.app.flags.DEFINE_string(
	'dataset_dir', None, 'The directory where the dataset files are stored.')

#####################
# Fine-Tuning Flags #
#####################

tf.app.flags.DEFINE_string(
	'pretrained_model_ckpt_path', None,
	'The path to a checkpoint from which to fine-tune.')

tf.app.flags.DEFINE_string(
	'checkpoint_exclude_scopes', None,
	'Comma-separated list of scopes of variables to exclude when restoring '
	'from a checkpoint.')

tf.app.flags.DEFINE_string(
	'trainable_scopes', None,
	'Comma-separated list of scopes to filter the set of variables to train.'
	'By default, None would train all the variables.')

tf.app.flags.DEFINE_boolean(
	'ignore_missing_vars', False,
	'When restoring a checkpoint would ignore missing variables.')


######################
# Trainig Flags #
######################

tf.app.flags.DEFINE_float(
    'moving_average_mode', None,
    'If left as None, then moving averages are not used.')

tf.app.flags.DEFINE_string(
	'optimizer', 'sgd',
	'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
	'"ftrl", "momentum", "sgd" or "rmsprop".')

tf.app.flags.DEFINE_boolean('log_device_placement', False,
							"""Whether to log device placement.""") #打印运行设备：gpu

tf.app.flags.DEFINE_integer('num_gpus', 4,'''''')  #使用GPU数


tf.app.flags.DEFINE_integer(
	'batch_size', 32, 'The number of samples in each batch.')

tf.app.flags.DEFINE_string(
	'model', 'inception_v3', 'The name of the architecture to train.')


tf.app.flags.DEFINE_integer('max_steps', 100000,
							'The maximum number of training steps.')

tf.app.flags.DEFINE_string('train_save_ckpt', '/home/ramsley/workspace/prostate/prostate_ckpt',"""""")


tf.app.flags.DEFINE_string('train_log_dir', '/home/ramsley/workspace/prostate/prostate_train_log',
							 """Directory where to write event logs and checkpoint.""")   #tensorboard log
tf.app.flags.DEFINE_string('is_train_data', 'train','''''')
tf.app.flags.DEFINE_string('fast_mode', True, '''''''') #
tf.app.flags.DEFINE_string('NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN', 160000, '''''''')
tf.app.flags.DEFINE_string('train_TFRecords_dir', define.TRAIN_TFRECORDS_DIR, '''''''') #数据读取路径

def restoreFromSaver(saver, sess):
	ckpt = tf.train.get_checkpoint_state(FLAGS.train_log_dir)
	pdb.set_trace()
	if ckpt and ckpt.model_checkpoint_path:
		saver.restore(sess, ckpt.model_checkpoint_path)
		gfile.DeleteRecursively(FLAGS.train_log_dir)
		# pre_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
		#return pre_steps
		print('checkpoint file has been restored!')
	else:
		print('No checkpoint file found')
		return

def model_select(model, image_batch, is_training=True, reuse = None):
	if model== 'vgg16':
		return prostate_network.vgg16_inference(image_batch, is_training=is_training)
	if model== 'resnet34':
		return prostate_network.resnet34_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet50':
		return prostate_network.resnet50_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'resnet152':
		return prostate_network.resnet152_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'inception_resnet_v2':
		return prostate_network.inception_resnet_v2_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'inception_v3':
		return prostate_network.inception_v3_inference(image_batch, is_training=is_training, reuse = reuse)
	if model== 'inception_v4':
		return prostate_network.inception_v4_inference(image_batch, is_training=is_training, reuse = reuse)

def optimizer_select(learning_rate):

	if FLAGS.optimizer == 'adadelta':
		optimizer = tf.train.AdadeltaOptimizer(
			learning_rate,
			rho=0.95,
			epsilon=1e-8)
	elif FLAGS.optimizer == 'adagrad':
		optimizer = tf.train.AdagradOptimizer(
			learning_rate,
			initial_accumulator_value=0.1)
	elif FLAGS.optimizer == 'adam':
		optimizer = tf.train.AdamOptimizer(
			learning_rate,
			beta1=0.9,
			beta2=0.999,
			epsilon=1e-8)
	elif FLAGS.optimizer == 'momentum':
		optimizer = tf.train.MomentumOptimizer(
			learning_rate,
			momentum=0.9,
			name='Momentum')
	elif FLAGS.optimizer == 'rmsprop':
		optimizer = tf.train.RMSPropOptimizer(
			learning_rate,
			decay=0.9,
			momentum=0.9,
			epsilon=1e-10)
	elif FLAGS.optimizer == 'sgd':
		optimizer = tf.train.GradientDescentOptimizer(learning_rate)
	else:
		raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
	return optimizer

def _get_init_fn():
	"""Returns a function run by the chief worker to warm-start the training.

	Note that the init_fn is only run when initializing the model during the very
	first global step.

	Returns:
	An init function run by the supervisor.
	"""
	if FLAGS.pretrained_model_ckpt_path is None:
		return None

	# Warn the user if a checkpoint exists in the train_dir. Then we'll be
	# ignoring the checkpoint anyway.
	# if tf.train.latest_checkpoint(FLAGS.pretrained_model_ckpt_path):
	# 	tf.logging.info(
	# 		'Ignoring --pretrained_model_ckpt_path because a checkpoint already exists in %s'
	# 		% FLAGS.pretrained_model_ckpt_path)
	# 	return None

	exclusions = []
	if FLAGS.checkpoint_exclude_scopes:
		exclusions = [scope.strip()
						for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

	# TODO(sguada) variables.filter_variables()
	variables_to_restore = []
	for var in slim.get_model_variables():
		excluded = False
		for exclusion in exclusions:
			if var.op.name.startswith(exclusion):
				excluded = True
				break
		if not excluded:
			variables_to_restore.append(var)

	if tf.gfile.IsDirectory(FLAGS.pretrained_model_ckpt_path):
		pretrained_model_ckpt_path = tf.train.latest_checkpoint(FLAGS.pretrained_model_ckpt_path)
	else:
		pretrained_model_ckpt_path = FLAGS.pretrained_model_ckpt_path

	tf.logging.info('Fine-tuning from %s' % pretrained_model_ckpt_path)

	return slim.assign_from_checkpoint_fn(
		pretrained_model_ckpt_path,
		variables_to_restore,
		ignore_missing_vars=FLAGS.ignore_missing_vars)


def _get_variables_to_train(restore_flag):
	"""Returns a list of variables to train.

	Returns:
	A list of variables to train by the optimizer. 
	"""
	if FLAGS.trainable_scopes is None or restore_flag!='cp':
		return tf.trainable_variables()
	else:
		scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]

	variables_to_train = []
	for scope in scopes:
		variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
		variables_to_train.extend(variables)
	return variables_to_train

def tower_loss(scope,image_batch, label_batch):
	
	logits, end_points = model_select(FLAGS.model, image_batch)

	# if re.match('^inception',FLAGS.model):
	#   prostate_network.inception_loss(logits, label_batch)    
	# else:
	sum_loss = []
	prostate_network.loss(logits, label_batch, end_points)
	losses = tf.get_collection('losses', scope) #包含以往损失和本次cross_entropy_mean
	loss = tf.add_n(losses)
	sum_loss.append(loss)

	# total_loss = tf.add_n(losses, name='total_loss')#计算总损失

	regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
	regularization_loss = tf.add_n(regularization_losses)
	sum_loss.append(regularization_loss)
	total_loss = tf.add_n(sum_loss, name='total_loss')
	# loss_averages = tf.train.ExponentialMovingAverage(0.9, name='loss_avg')##########

	# for l in losses + [total_loss]: 
	loss_name = re.sub('%s_[0-9]*/' % prostate_network.TOWER_NAME, '', loss.op.name) #re.sub：正则替换，此处'tower_[0-9]/'将替换为空 ''，作用？
	tf.summary.scalar(loss_name, loss) #把每一批次的loss，和总loss写入summary，在何处写入？（summaries.append？？）
	regularization_loss_name = re.sub('%s_[0-9]*/' % prostate_network.TOWER_NAME, '', regularization_loss.op.name) 
	tf.summary.scalar(regularization_loss_name, regularization_loss)

	return logits,total_loss 


def average_gradients(tower_grads):
	average_grads = []
	for grad_and_vars in zip(*tower_grads):#提取一个变量及其对应的grad，共4组tuple,逐个变量处理
		grads = []
		for g, _ in grad_and_vars:  #对一个g在N个tower求平均（4个），g为int类型
			grads.append(g) 
		grad = tf.reduce_mean(grads, 0) #int: a,b,c,d平均值
		v = grad_and_vars[0][1] #参数
		grad_and_var = (grad, v) #参数及其对应平均grad 
		average_grads.append(grad_and_var)
	return average_grads

def valid_accuracy(logits, label_batch):
	# prediction, _ = model_select(FLAGS.model, image_batch, is_training=None, reuse=True)
	prediction = tf.argmax(logits,1)
	accuracy = metrics.accuracy(tf.cast(prediction,tf.int32), label_batch)
	tf.summary.scalar('accuracy', accuracy)
	return accuracy

def train(restore_flag):
	with tf.Graph().as_default(), tf.device('/cpu:0'):  #构造默认计算图
		# global_step = tf.get_variable(            #tf.Variable() 每次都在创建新对象，reuse=True 和它没有关系。对于get_variable()，如果已经创建的变量对象，就把那个对象返回，如果没有创建变量对象的话，就创建一个新的
		#   'global_step', [],              #使用tf.Variable时，如果检测到命名冲突，系统会自己处理。使用tf.get_variable()时，系统不会处理冲突，而会报错
		#   initializer=tf.constant_initializer(0), trainable=False)    #需要共享变量的时候，需要使用tf.get_variable()
		global_step = slim.create_global_step()
		num_batches_per_epoch = (FLAGS.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
								 FLAGS.batch_size)  #每次迭代处理批量
		decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) #衰减周期=每次迭代处理批量*每周期处理批量
		lr = tf.train.exponential_decay(FLAGS.init_learning_rate,
										global_step,
										decay_steps,    #decay steps
										FLAGS.learning_rate_decay_factor,    #衰减率
										staircase=True)
		opt = optimizer_select(lr)  #梯度下降
		tower_grads = []    #各GPU计算梯度
		with tf.variable_scope(tf.get_variable_scope()):    #tf.get_variable_scope() = ？？
			for i in xrange(FLAGS.num_gpus):
				with tf.device('/gpu:%d' % i):
					with tf.name_scope('%s_%d' % (prostate_network.TOWER_NAME, i)) as scope: #name_scope 是给op_name加前缀, variable_scope是给get_variable()创建的变量的名字加前缀。
						# pdb.set_trace()
						# print tf.get_variable_scope()
						image_batch, label_batch,_ ,_ ,_  = inputPipeLine(FLAGS.is_train_data, batchSize = FLAGS.batch_size, \
												fast_mode = FLAGS.fast_mode, Data_Dir = FLAGS.train_TFRecords_dir, numEpochs = None)
						logits,loss = tower_loss(scope, image_batch, label_batch)    ##############add##########
						# select variables to train(useful for load pretrained model)
						variables_to_train = _get_variables_to_train(restore_flag)
						# Reuse variables for the next tower.
						tf.get_variable_scope().reuse_variables()
						# Retain the summaries from the final tower.最后获取的是最后一个GPU的summary？？
						summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) 
						# Calculate the gradients for the batch of data on this tower.
						grads = opt.compute_gradients(loss,variables_to_train)
						# Keep track of the gradients across all towers.
						tower_grads.append(grads)
		# print(slim.get_model_variables())
		# pdb.set_trace()
		grads = average_gradients(tower_grads)

		summaries.append(tf.summary.scalar('learning_rate', lr))
		for grad, var in grads:
			if grad is not None:
				summaries.append(       #为变量梯度构造summary直方图（对比每次更新的梯度下降情况）
					tf.summary.histogram(var.op.name + '/gradients', grad)) #var.op.name=？？
		# 对可训练参数构建summary直方图
		for var in tf.trainable_variables():
			summaries.append(tf.summary.histogram(var.op.name, var))

		apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) #使用计算的梯度反向更新参数
		update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

		if FLAGS.moving_average_mode:
			variable_averages = tf.train.ExponentialMovingAverage(
				FLAGS.MOVING_AVERAGE_DECAY, global_step)
			variables_averages_op = variable_averages.apply(tf.trainable_variables())
			update_ops.append(variables_averages_op)
		
		with tf.control_dependencies(update_ops):
			train_op = tf.group(apply_gradient_op)#loss_averages_op为新加

		# saver = tf.train.Saver(tf.global_variables())
		# Build the summary operation from the last tower summaries.last tower？
		summary_op =tf.summary.merge_all()
		init = tf.global_variables_initializer()

		# config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)
		# config.gpu_options.per_process_gpu_memory_fraction = 0.5
		# per_process_gpu_memory_fraction指定了每个GPU进程中使用显存的上限，但它只能均匀作用于所有GPU，无法对不同GPU设置不同的上限
		gpu_options = tf.GPUOptions(allow_growth=True)
		sess = tf.Session(config=tf.ConfigProto(
			allow_soft_placement=True,
			log_device_placement=FLAGS.log_device_placement,
			gpu_options=gpu_options))

		# sess = tfdbg.LocalCLIDebugWrapperSession(sess)
		# sess.add_tensor_filter('has_inf_or_nan',tfdbg.has_inf_or_nan)
		if restore_flag == 'cp':
			sess.run(init)
			init_fn = _get_init_fn()
			init_fn(sess)
		elif restore_flag == 're':
			if FLAGS.moving_average:
				saver = tf.train.Saver(variable_averages.variables_to_restore())   
			else:   
				saver = tf.train.Saver()        
			restoreFromSaver(saver,sess)
			sess.run(init)
		elif restore_flag == 'ov':
			sess.run(init)

		
		# if restore_flag:
		#   #variables_to_restore = framework.get_variables_to_restore(exclude=['Conv2d_1c_1x1','global_step'])
		#   #init_fn = framework.assign_from_checkpoint_fn(FLAGS.pretrained_model_checkpoint_path, variables_to_restore)
		#   #init_fn(sess)
		#   saver = tf.train.Saver(variable_averages.variables_to_restore())
		#   saver.restore(sess, FLAGS.pretrained_model_checkpoint_path)
		#   print('checkpoint file has been restored!')
		#   # restoreFromSaver(saver,sess)
		# else:
		#   sess.run(init)
		saver = tf.train.Saver()
		tf.train.start_queue_runners(sess=sess)
		summary_writer = tf.summary.FileWriter(FLAGS.train_log_dir,
												graph=sess.graph)
		# accuracy = 0
		for step in xrange(FLAGS.max_steps):
			start_time = time.time()
			_, loss_value = sess.run([train_op, loss])
			# _, loss_value,logits_value,label_value = sess.run([train_op, loss,logits,label_batch])
			duration = time.time() - start_time
			assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
			# accuracy += valid_accuracy(logits, label_batch).eval(session=sess)
			# for var in tf.trainable_variables():
			# 	print (var)
			# 	var.eval(session=sess)
			# 	break
			if step % 10 == 0:
				num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
				examples_per_sec = num_examples_per_step / duration
				sec_per_batch = duration / FLAGS.num_gpus

				format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
						'sec/batch)')
				print format_str % (time.strftime('%Y-%M-%d %H:%M:%S',time.localtime(time.time())), step, loss_value,
							 examples_per_sec, sec_per_batch)
				# print logits_value[:10]
				# print label_value[:10]
				# acc = valid_accuracy(image_batch, label_batch).eval(session=sess)
			if step % 100 == 0:
				summary_str = sess.run(summary_op)
				summary_writer.add_summary(summary_str, step)#写入运行日志，各变量是每步的信息都记录下来？
				# accuracy /= step
				# print 'accuracy(100steps) = %.2f %%' %(accuracy*100)
			if step % 500 == 0 or (step + 1) == FLAGS.max_steps:
				checkpoint_path = os.path.join(FLAGS.train_log_dir, 'model.ckpt')
				saver.save(sess, checkpoint_path, global_step=step)
			# if step % num_batches_per_epoch == 0:
			#   accuracy /= step
			#   print 'accuracy(epoch) = %.2f %%' %(accuracy/step*100)

def main(argv=None):  
	restore_flag = None
	if gfile.Exists(FLAGS.train_log_dir):
		# if os.listdir(FLAGS.train_log_dir):
		while True:
			answer = raw_input('please select a mode,(cp/re/ov)\n\
				cp: restore from a pretrained model\n\
				re: restore from a self-trained model\n\
				ov: train a new model\n'
				)
			if answer == 'cp' or answer =='ov':
				restore_flag = answer
				gfile.DeleteRecursively(FLAGS.train_log_dir)
				break
			elif answer == 're':
				restore_flag = answer
				break
			else:
				print 'please chose the right one.'
				continue
	else:
		gfile.MakeDirs(FLAGS.train_log_dir)
	train(restore_flag)
	
if __name__ == '__main__':
	tf.app.run()