from __future__ import absolute_import

from __future__ import division
from __future__ import print_function

import argparse
import os.path
import sys
import numpy as np

import tensorflow as tf
import input_data
import models

def run_quant_inference(wanted_words, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, dct_coefficient_count, 
				model_architecture, model_size_info):
	"""
	对训练好的模型数据进行量化，将权重和偏置量化为8bit定点数据。
	量化后将量化的参数以层为单位写入.h文件
	输入的model参数必须和训练的模型参数一致，否则量化后的测试集会出错
	"""
	tf.logging.set_verbosity(tf.logging.INFO)
	sess = tf.InteractiveSession()
	# 声明一些必须要的变量和类
	words_list = input_data.prepare_words_list(wanted_words.split(','))
	model_settings = models.prepare_model_settings(
		len(words_list), sample_rate, clip_duration_ms, window_size_ms,
		window_stride_ms, dct_coefficient_count)
	audio_processor = input_data.AudioProcessor(
		FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
		FLAGS.unknown_percentage,
		FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
		FLAGS.testing_percentage, model_settings)
	
	label_count = model_settings['label_count']
	fingerprint_size = model_settings['fingerprint_size'] 		# 一个样本的输入长度

	fingerprint_input = tf.placeholder(
		tf.float32, [None, fingerprint_size], name='fingerprint_input')

	logits = models.create_model(
		fingerprint_input,
		model_settings,
		FLAGS.model_architecture,
		FLAGS.model_size_info,
		is_training=False)

	ground_truth_input = tf.placeholder(
		tf.float32, [None, label_count], name='groundtruth_input')

	predicted_indices = tf.argmax(logits, 1)
	expected_indices = tf.argmax(ground_truth_input, 1)
	correct_prediction = tf.equal(predicted_indices, expected_indices)
	confusion_matrix = tf.confusion_matrix(
		expected_indices, predicted_indices, num_classes=label_count)
	evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))	# 计算识别率
	models.load_variables_from_checkpoint(sess, FLAGS.checkpoint)			# 读取模型
	# 通过meta文件来重建graph
	# saver=tf.train.import_meta_graph(model_dir+'low_latency_conv_8485.ckpt-12800.meta')
	# saver.restore(sess,tf.train.latest_checkpoint(model_dir))

	# 量化并写入文件
	f = open('weights.h','wb')	# 打开文件，不存在则创建文件
	f.close()

	for v in tf.trainable_variables():	# 遍历所有的可训练变量，依次量化，并存储
		var_name = str(v.name)
		var_values = sess.run(v)	# 获取该变量实际值

		# 获取最大和最小值，用来确定量化位数
		min_value = var_values.min()
		max_value = var_values.max()
		# 确定量化的整数位，取最大或最小值的最大绝对值，然后取log对数，再向上取整
		int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
		dec_bits = 7-int_bits   # 因为有一个符号位
		var_values = np.round(var_values*2**dec_bits)		# 对量化后的值四舍五入
		var_name = var_name.replace('/','_')			# 替换变量名中的'/'和':'
		var_name = var_name.replace(':','_')
		with open('weights.h','a') as f:
			f.write('#define '+var_name+' {')
		# 对变量做转置操作，区分卷积层参数和非卷积层参数
		if(len(var_values.shape)>2):
			transposed_wts = np.transpose(var_values,(3,0,1,2))
		else:
			transposed_wts = np.transpose(var_values)
		with open('weights.h','a') as f:
			# tofile将数组中的数据以二进制的格式写进文件，输出的数据不保存数组形状和元素类型等信息
			transposed_wts.tofile(f,sep=", ",format="%d")
			f.write('}\n')
		# 将量化后的值转变回小数
		var_values = var_values/(2**dec_bits)
		# 将量化后再转变为小数的参数值赋予graph中的变量，以便于下面的测试
		var_values = sess.run(tf.assign(v,var_values))
		# 打印输出量化值和最大最小值和量化前后的值差
		print(var_name+' number of wts/bias: '+str(var_values.shape)+\
				' dec bits: '+str(dec_bits)+\
				' max: ('+str(var_values.max())+','+str(max_value)+')'+\
				' min: ('+str(var_values.min())+','+str(min_value)+')')
	
	# training集样本测试
	set_size = audio_processor.set_size('training') 	# 获取"training"集下的数据长度
	tf.logging.info('set_size=%d', set_size)
	total_accuracy = 0
	total_conf_matrix = None
	# 以batch_size为单位读取数据
	for i in range(0, set_size, FLAGS.batch_size):
		training_fingerprints, training_ground_truth = (
			audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
						0.0, 0, 'training', sess))
		training_accuracy, conf_matrix = sess.run(
			[evaluation_step, confusion_matrix],
			feed_dict={
				fingerprint_input: training_fingerprints,
				ground_truth_input: training_ground_truth,
			})
		batch_size = min(FLAGS.batch_size, set_size - i)
		total_accuracy += (training_accuracy * batch_size) / set_size
		if total_conf_matrix is None:
			total_conf_matrix = conf_matrix
		else:
			total_conf_matrix += conf_matrix
	tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
	tf.logging.info('Training accuracy = %.2f%% (N=%d)' %
			(total_accuracy * 100, set_size))

	# validation集测试设置，同上
	set_size = audio_processor.set_size('validation')
	tf.logging.info('set_size=%d', set_size)
	total_accuracy = 0
	total_conf_matrix = None
	for i in range(0, set_size, FLAGS.batch_size):
		validation_fingerprints, validation_ground_truth = (
			audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
						0.0, 0, 'validation', sess))
		validation_accuracy, conf_matrix = sess.run(
			[evaluation_step, confusion_matrix],
			feed_dict={
			fingerprint_input: validation_fingerprints,
			ground_truth_input: validation_ground_truth,
			})
		batch_size = min(FLAGS.batch_size, set_size - i)
		total_accuracy += (validation_accuracy * batch_size) / set_size
		if total_conf_matrix is None:
			total_conf_matrix = conf_matrix
		else:
			total_conf_matrix += conf_matrix
	tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
	tf.logging.info('Validation accuracy = %.2f%% (N=%d)' %
			(total_accuracy * 100, set_size))
	
	# test set
	set_size = audio_processor.set_size('testing')
	tf.logging.info('set_size=%d', set_size)
	total_accuracy = 0
	total_conf_matrix = None
	for i in range(0, set_size, FLAGS.batch_size):
		test_fingerprints, test_ground_truth = audio_processor.get_data(
			FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
		test_accuracy, conf_matrix = sess.run(
			[evaluation_step, confusion_matrix],
			feed_dict={
				fingerprint_input: test_fingerprints,
				ground_truth_input: test_ground_truth,
			})
		batch_size = min(FLAGS.batch_size, set_size - i)
		total_accuracy += (test_accuracy * batch_size) / set_size
		if total_conf_matrix is None:
			total_conf_matrix = conf_matrix
		else:
			total_conf_matrix += conf_matrix
	tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
	tf.logging.info('Test accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,
							set_size))

def main(_):

	# Create the model, load weights from checkpoint and run on train/val/test
	run_quant_inference(FLAGS.wanted_words, FLAGS.sample_rate,
		FLAGS.clip_duration_ms, FLAGS.window_size_ms,
		FLAGS.window_stride_ms, FLAGS.dct_coefficient_count,
		FLAGS.model_architecture, FLAGS.model_size_info)


if __name__ == '__main__':
	parser = argparse.ArgumentParser()
	parser.add_argument(
		'--data_url',
		type=str,
		# pylint: disable=line-too-long
		default='',
		# pylint: enable=line-too-long
		help='Location of speech training data archive on the web.')
	parser.add_argument(
		'--data_dir',
		type=str,
		default='G:\\wav_data',
		help="""\
		Where is the the speech training data.
		""")
	parser.add_argument(
		'--silence_percentage',
		type=float,
		default=10.0,
		help="""\
		How much of the training data should be silence.
		""")
	parser.add_argument(
		'--unknown_percentage',	# 位置语音数据的比例
		type=float,
		default=10.0,
		help="""\
		How much of the training data should be unknown words.
		""")
	parser.add_argument(
		'--testing_percentage',
		type=int,
		default=10,
		help='What percentage of wavs to use as a test set.')
	parser.add_argument(
		'--validation_percentage',
		type=int,
		default=10,
		help='What percentage of wavs to use as a validation set.')
	parser.add_argument(
		'--sample_rate',
		type=int,
		default=16000,
		help='Expected sample rate of the wavs',)
	parser.add_argument(
		'--clip_duration_ms',
		type=int,
		default=1000,
		help='Expected duration in milliseconds of the wavs',)
	parser.add_argument(      #设置默认帧长为40ms
		'--window_size_ms',
		type=float,
		default=40.0,
		help='How long each spectrogram timeslice is',)
	parser.add_argument(        # 帧移动20ms
		'--window_stride_ms',
		type=float,
		default=20.0,
		help='How long each spectrogram timeslice is',)
	parser.add_argument(
		'--dct_coefficient_count',
		type=int,
		default=40,
		help='How many bins to use for the MFCC fingerprint',)
	parser.add_argument(
		'--batch_size',
		type=int,
		default=100,
		help='How many items to train with at once',)
	parser.add_argument(
		'--wanted_words',
		type=str,
		default='yes,no,up,down,left,right,on,off,stop,go,bird,cat,dog,wow,marvin',
		help='Words to use (others will be added to an unknown label)',)
	parser.add_argument(
		'--checkpoint',
		type=str,
		default='C:\\Users\\WangS\\Desktop\\Bishe\\src\\CNN\\my_model\\best\\',
		help='Checkpoint to load the weights from.')
	parser.add_argument(
		'--model_architecture',
		type=str,
		default='low_latency_conv',
		help='What model architecture to use')
	parser.add_argument(
		'--model_size_info',
		type=int,
		nargs="+",
		default=[128,128,128,128],
		help='Model dimensions - different for various models')
	parser.add_argument(
		'--act_max',
		type=float,
		nargs="+",
		default=[128,128,128],
		help='activations max')

	FLAGS, unparsed = parser.parse_known_args()
	tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
