#_*_ coding:utf-8_*_

'''
训练模型主函数
调用Input_data和model模块来训练模型
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os.path
import sys

import numpy as np
from six.moves import xrange
import tensorflow as tf

import input_data
import models
from tensorflow.python.platform import gfile
from tensorflow.contrib import slim as slim 

FLAGS = None


def main(_):
	# 记录所有的Log信息
	tf.logging.set_verbosity(tf.logging.INFO)

	sess = tf.InteractiveSession()

	# 配置模型信息
	model_settings = models.prepare_model_settings(
			len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
			FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
			FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
	# 对数据进行预处理
	audio_processor = input_data.AudioProcessor(
			FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
			FLAGS.unknown_percentage,
			FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
			FLAGS.testing_percentage, model_settings)

	fingerprint_size = model_settings['fingerprint_size']   # 特征个数
	label_count = model_settings['label_count']             # 分类数
	time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)  # 帧移的采样点个数

	# 计算训练的步长；前期训练步长大些，训练的快一点，后期步长短些提高精度。训练步数设定的是15000 + 3000
	# 前15000训练用0.001的训练步长，后期用0.0001的训练步长
	training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
	learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
	if len(training_steps_list) != len(learning_rates_list):
		raise Exception(
				'--how_many_training_steps and --learning_rate must be equal length '
				'lists, but are %d and %d long instead' % (len(training_steps_list),
										len(learning_rates_list)))

	fingerprint_input = tf.placeholder(
			tf.float32, [None, fingerprint_size], name='fingerprint_input')

	# logits是神经网络的输出
	logits, dropout_prob = models.create_model(
			fingerprint_input,
			model_settings,
			FLAGS.model_architecture,
			FLAGS.model_size_info,
			is_training=True)

	ground_truth_input = tf.placeholder(
			tf.float32, [None, label_count], name='groundtruth_input')

	# 错误检查，防止在训练过程中出现NaN或者其他相关的计算错误，在输入参数中开启，默认是False不做检查
	control_dependencies = []
	if FLAGS.check_nans:
		checks = tf.add_check_numerics_ops()
		control_dependencies = [checks]

	# 使用softmax_cross_entropy_with_logits，用这个函数是因为，上面计算图的输出，没有经过softmax过sigmoid
	# 在这个函数内部。logits先经过sigmoid，再来计算交叉熵，但是这个函数对交叉熵的计算方式进行了优化，这样使得计算结果
	# 不会溢出
	with tf.name_scope('cross_entropy'):
		cross_entropy_mean = tf.reduce_mean(
				tf.nn.softmax_cross_entropy_with_logits(
					labels=ground_truth_input, logits=logits))
	tf.summary.scalar('cross_entropy', cross_entropy_mean)  #把交叉熵记录到Tensorboard里面去

	update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)	# 获取在训练过程中的计算

	# 使用control_dependencies来控制计算过程，先计算graph中的所有op，然后再计算contril_dependencies（如果没有开启就不会计算）
	with tf.name_scope('train'), tf.control_dependencies(update_ops), tf.control_dependencies(control_dependencies):
		learning_rate_input = tf.placeholder(
				tf.float32, [], name='learning_rate_input')
		train_op = tf.train.AdamOptimizer(
				learning_rate_input)
		train_step = slim.learning.create_train_op(cross_entropy_mean, train_op)
		# train_step = tf.train.GradientDescentOptimizer(
		# 	learning_rate_input).minimize(cross_entropy_mean)
	predicted_indices = tf.argmax(logits, 1)			# 网络预测结果
	expected_indices = tf.argmax(ground_truth_input, 1)		# 标签结果
	correct_prediction = tf.equal(predicted_indices, expected_indices)
	# 混淆矩阵
	confusion_matrix = tf.confusion_matrix(
			expected_indices, predicted_indices, num_classes=label_count)
	evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
	tf.summary.scalar('accuracy', evaluation_step)

	global_step = tf.train.get_or_create_global_step()
	increment_global_step = tf.assign(global_step, global_step + 1)

	saver = tf.train.Saver(tf.global_variables())

	# 合并所有的summary，然后存储在默认路径下 C:\\Users\\WangS\\Desktop\\Bishe\\src\\CNN\\train (by default)
	merged_summaries = tf.summary.merge_all()
	train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
						sess.graph)
	validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')

	tf.global_variables_initializer().run()

	params = tf.trainable_variables()
	num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
	print('Total number of Parameters: ', num_params)

	start_step = 1

	# 读取已经预训练的模型，在参数中传入对应的路径才会调用
	if FLAGS.start_checkpoint:
		models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
		start_step = global_step.eval(session=sess)

	tf.logging.info('Training from step: %d ', start_step)

	tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
				FLAGS.model_architecture + '.pbtxt')

	with gfile.GFile(
			os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
			'w') as f:
		f.write('\n'.join(audio_processor.words_list))

	# 训练
	best_accuracy = 0
	training_steps_max = np.sum(training_steps_list)
	for training_step in xrange(start_step, training_steps_max + 1):

		# 用来计算训练步长，如果小于前面的训练轮数，则直接获取，然后break;否则递增，获取步长
		training_steps_sum = 0
		for i in range(len(training_steps_list)):
			training_steps_sum += training_steps_list[i]
			if training_step <= training_steps_sum:
				learning_rate_value = learning_rates_list[i]
				break

		# 获取特征
		train_fingerprints, train_ground_truth = audio_processor.get_data(
				FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
				FLAGS.background_volume, time_shift_samples, 'training', sess)
		
		# 根据获取的数据，进行训练
		train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
				[
					merged_summaries, evaluation_step, cross_entropy_mean, train_step,
					increment_global_step
				],
				feed_dict={
					fingerprint_input: train_fingerprints,
					ground_truth_input: train_ground_truth,
					learning_rate_input: learning_rate_value,
					dropout_prob: 1.0
				})

		train_writer.add_summary(train_summary, training_step)	# Tensorboard数据记录
		tf.logging.info('Step #%d: rate %f, accuracy %.2f%%, cross entropy %f' %
					(training_step, learning_rate_value, train_accuracy * 100,
					cross_entropy_value))

		is_last_step = (training_step == training_steps_max)

		if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
			set_size = audio_processor.set_size('validation')
			total_accuracy = 0
			total_conf_matrix = None
			for i in xrange(0, set_size, FLAGS.batch_size):
				validation_fingerprints, validation_ground_truth = (
						audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
								0.0, 0, 'validation', sess))
				validation_summary, validation_accuracy, conf_matrix = sess.run(
					[merged_summaries, evaluation_step, confusion_matrix],
					feed_dict={
						fingerprint_input: validation_fingerprints,
						ground_truth_input: validation_ground_truth,
						dropout_prob: 1.0
					})
				validation_writer.add_summary(validation_summary, training_step)
				batch_size = min(FLAGS.batch_size, set_size - i)
				total_accuracy += (validation_accuracy * batch_size) / set_size
				if total_conf_matrix is None:
					total_conf_matrix = conf_matrix
				else:
					total_conf_matrix += conf_matrix
			tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
			tf.logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)' %
					(training_step, total_accuracy * 100, set_size))

			# 当模型在测试集上有变化的时候，保存当前模型
			if total_accuracy > best_accuracy:
				best_accuracy = total_accuracy
				checkpoint_path = os.path.join(FLAGS.train_dir, 'best',
								FLAGS.model_architecture + '_'+ str(int(best_accuracy*10000)) + '.ckpt')
				tf.logging.info('Saving best model to "%s-%d"', checkpoint_path, training_step)
				saver.save(sess, checkpoint_path, global_step=training_step)
			tf.logging.info('So far the best validation accuracy is %.2f%%' % (best_accuracy*100))

	# 训练完成后，跑测试集
	set_size = audio_processor.set_size('testing')
	tf.logging.info('set_size=%d', set_size)
	total_accuracy = 0
	total_conf_matrix = None
	for i in xrange(0, set_size, FLAGS.batch_size):
		test_fingerprints, test_ground_truth = audio_processor.get_data(
			FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
		test_accuracy, conf_matrix = sess.run(
			[evaluation_step, confusion_matrix],
			feed_dict={
				fingerprint_input: test_fingerprints,
				ground_truth_input: test_ground_truth,
				dropout_prob: 1.0
			})
		batch_size = min(FLAGS.batch_size, set_size - i)
		total_accuracy += (test_accuracy * batch_size) / set_size
		if total_conf_matrix is None:
			total_conf_matrix = conf_matrix
		else:
			total_conf_matrix += conf_matrix
	tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
	tf.logging.info('Final test accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,
								set_size))


if __name__ == '__main__':
	parser = argparse.ArgumentParser()
	parser.add_argument(
		 '--data_url',
		 type=str,
		 #default='http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
		 default = '',  # 默认不从网络下载
		 help='Location of speech training data archive on the web.')
	parser.add_argument(
		'--data_dir',
		type=str,
		default='G:\\wav_data',
		help="""\
		Where is the the speech training data.
		""")
	parser.add_argument(
		'--background_volume',
		type=float,
		default=0.1,
		help="""\
		How loud the background noise should be, between 0 and 1.
		""")
	parser.add_argument(
		'--background_frequency',
		type=float,
		default=0.8,
		help="""\
		How many of the training samples have background noise mixed in.
		""")
	parser.add_argument(
		'--silence_percentage',
		type=float,
		default=10.0,
		help="""\
		How much of the training data should be silence.
		""")
	parser.add_argument(
		'--unknown_percentage',	# 位置语音数据的比例
		type=float,
		default=10.0,
		help="""\
		How much of the training data should be unknown words.
		""")
	parser.add_argument(
		'--time_shift_ms',
		type=float,
		default=100.0,
		help="""\
		Range to randomly shift the training audio by in time.
		""")
	parser.add_argument(
		'--testing_percentage',
		type=int,
		default=10,
		help='What percentage of wavs to use as a test set.')
	parser.add_argument(
		'--validation_percentage',
		
		type=int,
		default=10,
		help='What percentage of wavs to use as a validation set.')
	parser.add_argument(
		'--sample_rate',
		type=int,
		default=16000,
		help='Expected sample rate of the wavs',)
	parser.add_argument(
		'--clip_duration_ms',
		type=int,
		default=1000,
		help='Expected duration in milliseconds of the wavs',)
	parser.add_argument(      #设置默认帧长为40ms
		'--window_size_ms',
		type=float,
		default=40.0,
		help='How long each spectrogram timeslice is',)
	parser.add_argument(        # 帧移动20ms
		'--window_stride_ms',
		type=float,
		default=20.0,
		help='How long each spectrogram timeslice is',)
	parser.add_argument(
		'--dct_coefficient_count',
		type=int,
		default=40,
		help='How many bins to use for the MFCC fingerprint',)
	parser.add_argument(
		'--how_many_training_steps',
		type=str,
		default='12000,8000',
		help='How many training loops to run',)
	parser.add_argument(
		'--eval_step_interval',		# 多久进行一次验证，默认400	
		type=int,
		default=400,
		help='How often to evaluate the training results.')
	parser.add_argument(
		'--learning_rate',
		type=str,
		default='0.001,0.0001',
		help='How large a learning rate to use when training.')
	parser.add_argument(
		'--batch_size',
		type=int,
		default=100,
		help='How many items to train with at once',)
	parser.add_argument(
		'--summaries_dir',
		type=str,
		default='C:\\Users\\WangS\\Desktop\\Bishe\\src\\CNN\\train_file',
		help='Where to save summary logs for TensorBoard.')
	parser.add_argument(
		'--wanted_words',
		type=str,
		default='yes,no,up,down,left,right,on,off,stop,go,bird,cat,dog,wow,marvin',
		help='Words to use (others will be added to an unknown label)',)
	parser.add_argument(
		'--train_dir',
		type=str,
		default='C:\\Users\\WangS\\Desktop\\Bishe\\src\\CNN\\my_model',
		help='Directory to write event logs and checkpoint.')
	parser.add_argument(
		'--save_step_interval',
		type=int,
		default=100,
		help='Save model checkpoint every save_steps.')
	parser.add_argument(
		'--start_checkpoint',
		type=str,
		default='',
		help='If specified, restore this pretrained model before any training.')
	parser.add_argument(
		'--model_architecture',
		type=str,
		default='low_latency_conv',
		help='What model architecture to use')
	parser.add_argument(
		'--model_size_info',
		type=int,
		nargs="+",
		default=[128,128,128,128],
		help='Model dimensions - different for various models')
	parser.add_argument(
		'--check_nans',
		type=bool,
		default=False,
		help='Whether to check for invalid numbers during processing')

	FLAGS, unparsed = parser.parse_known_args()   #使用unparse来获取已经传递进来的参数
	tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
