'''
根据设定的参数创建模型
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math

import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs

def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
				window_size_ms, window_stride_ms,
				dct_coefficient_count):
	"""
	根据物理设定来设定模型参数，主要是计算采样点数和时长等
	"""
	desired_samples = int(sample_rate * clip_duration_ms / 1000)
	window_size_samples = int(sample_rate * window_size_ms / 1000)
	window_stride_samples = int(sample_rate * window_stride_ms / 1000)
	length_minus_window = (desired_samples - window_size_samples)
	if length_minus_window < 0:
		spectrogram_length = 0
	else:
		spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
	fingerprint_size = dct_coefficient_count * spectrogram_length
	return {
		'desired_samples': desired_samples,   # 采样数
		'window_size_samples': window_size_samples, # 滑动窗口采样数
		'window_stride_samples': window_stride_samples,   #步长采样数
		'spectrogram_length': spectrogram_length,     # 频谱长度
		'dct_coefficient_count': dct_coefficient_count,
		'fingerprint_size': fingerprint_size,   #语音数据长度
		'label_count': label_count,   #标签数量
		'sample_rate': sample_rate,
	}


def create_model(fingerprint_input, model_settings, model_architecture,
			model_size_info, is_training, runtime_settings=None):
	'''
	根据传进来的模型参数选择模型
	'''

	if model_architecture == 'single_fc':
		return create_single_fc_model(fingerprint_input, model_settings,
						is_training)
	elif model_architecture == 'conv':
		return create_conv_model(fingerprint_input, model_settings, is_training)
	elif model_architecture == 'low_latency_conv':
		return create_low_latency_conv_model(fingerprint_input, model_settings,
							is_training)
	elif model_architecture == 'dnn':
		return create_dnn_model(fingerprint_input, model_settings, model_size_info,
						is_training)
	elif model_architecture == 'cnn':
		return create_cnn_model(fingerprint_input, model_settings, model_size_info,
						is_training)
	else:
		raise Exception('model_architecture argument "' + model_architecture +
					'" not recognized, should be one of "single_fc", "conv",' +
					' "low_latency_conv", "low_latency_svdf",'+ 
					' "dnn", "cnn", "basic_lstm", "lstm",'+
					' "gru", "crnn" or "ds_cnn"')


def load_variables_from_checkpoint(sess, start_checkpoint):
	"""
	从checkpoint读取参数，预先训练
	"""
	saver = tf.train.Saver(tf.global_variables())
	saver.restore(sess, tf.train.latest_checkpoint(start_checkpoint))


def create_single_fc_model(fingerprint_input, model_settings, is_training):
	"""
	只有一层全连接的模型，最简单的模型

		 (特征输入)
		    v
		[矩阵相乘]<-(权重)
		    v
		[加偏置]<-(偏置)
		    v
	"""
	if is_training:
		dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
	fingerprint_size = model_settings['fingerprint_size']
	label_count = model_settings['label_count']
	weights = tf.Variable(
			tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))
	bias = tf.Variable(tf.zeros([label_count]))
	logits = tf.matmul(fingerprint_input, weights) + bias
	if is_training:
		return logits, dropout_prob
	else:
		return logits


def create_conv_model(fingerprint_input, model_settings, is_training):
	"""
	一个标准的卷积神经网络的模型，这就是CNN的那篇文章里面的标准CNN模型，'cnn-trad-fpool3'
	http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf

	      (特征输入)
	        v
	    [卷积]<-(权重)
	        v
	    [加偏置]<-(偏置)
	        v
	      [Relu]
	        v
	      [池化]
	        v
	    [卷积]<-(weights)
	        v
	    [偏置]<-(bias)
	        v
	      [Relu]
	        v
	      [池化]
	        v
	    [矩阵相乘]<-(权重)
	        v
	    [偏置]<-(偏置)
	        v
	"""
	if is_training:
		dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
	input_frequency_size = model_settings['dct_coefficient_count']
	input_time_size = model_settings['spectrogram_length']
	fingerprint_4d = tf.reshape(fingerprint_input,
	                            [-1, input_time_size, input_frequency_size, 1])
	first_filter_width = 8
	first_filter_height = 20
	first_filter_count = 32
	first_weights = tf.Variable(
			tf.truncated_normal(
				[first_filter_height, first_filter_width, 1, first_filter_count],
				stddev=0.01),name='first_conv_weights')
	first_bias = tf.Variable(tf.zeros([first_filter_count]),name='first_conv_bias')
	first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],
					'SAME') + first_bias
	first_relu = tf.nn.relu(first_conv)
	if is_training:
		first_dropout = tf.nn.dropout(first_relu, dropout_prob)
	else:
		first_dropout = first_relu
	max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
	second_filter_width = 4
	second_filter_height = 10
	second_filter_count = 32
	second_weights = tf.Variable(
		tf.truncated_normal(
				[
					second_filter_height, second_filter_width, first_filter_count,
					second_filter_count
				],
		stddev=0.01),name='second_conv_weights')
	second_bias = tf.Variable(tf.zeros([second_filter_count]),name='second_conv_bias')
	second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],
	                           'SAME') + second_bias
	second_relu = tf.nn.relu(second_conv)
	if is_training:
		second_dropout = tf.nn.dropout(second_relu, dropout_prob)
	else:
		second_dropout = second_relu
	second_conv_shape = second_dropout.get_shape()
	second_conv_output_width = second_conv_shape[2]
	second_conv_output_height = second_conv_shape[1]
	second_conv_element_count = int(
			second_conv_output_width * second_conv_output_height *
			second_filter_count)
	flattened_second_conv = tf.reshape(second_dropout,
						[-1, second_conv_element_count])
	label_count = model_settings['label_count']
	num_first_fc = 128;
	first_fc_weights = tf.Variable(
			tf.truncated_normal(
					[second_conv_element_count, num_first_fc], stddev=0.01),name='first_fc_weights')
	first_fc_bias = tf.Variable(tf.zeros([num_first_fc]),name='first_fc_bias')
	first_fc = tf.matmul(flattened_second_conv, first_fc_weights) + first_fc_bias
	final_fc_weights = tf.Variable(
			tf.truncated_normal(
				[num_first_fc, label_count], stddev = 0.01), name= 'final_fc_weights')
	final_fc_bias = tf.Variable(tf.zeros([label_count]), name='final_fc_bias')
	final_fc = tf.matmul(first_fc, final_fc_weights) + final_fc_bias

	if is_training:
		return final_fc, dropout_prob
	else:
		return final_fc


def create_low_latency_conv_model(fingerprint_input, model_settings,
					is_training):
	"""
	这个也是论文中的模型，计算量比上个少些， 'cnn-one-fstride4'
	    (输入特征)
	        v
	      [卷积]
	        v
	      [偏置]
	        v
	      [Relu]
	        v
	      [矩阵相乘]
	        v
	      [偏置]
	        v
	      [矩阵相乘]
	        v
	      [偏置]
	        v
	      [矩阵相乘]
	        v
	      [偏置]
	        v
	"""
	if is_training:
		dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
	input_frequency_size = model_settings['dct_coefficient_count']
	input_time_size = model_settings['spectrogram_length']
	fingerprint_4d = tf.reshape(fingerprint_input,
	                            [-1, input_time_size, input_frequency_size, 1])
	first_filter_width = 8
	first_filter_height = 32
	first_filter_count = 64
	first_filter_stride_x = 1
	first_filter_stride_y = 1
	first_weights = tf.Variable(
			tf.truncated_normal(
				[first_filter_height, first_filter_width, 1, first_filter_count],
				stddev=0.01), name='first_conv_w')
	first_bias = tf.Variable(tf.zeros([first_filter_count]),name='first_conv_b')
	first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
			1, first_filter_stride_y, first_filter_stride_x, 1
	], 'VALID') + first_bias
	first_relu = tf.nn.relu(first_conv)
	if is_training:
		first_dropout = tf.nn.dropout(first_relu, dropout_prob)
	else:
		first_dropout = first_relu
	first_conv_output_width = math.floor(
			(input_frequency_size - first_filter_width + first_filter_stride_x) /
			first_filter_stride_x)
	first_conv_output_height = math.floor(
			(input_time_size - first_filter_height + first_filter_stride_y) /
			first_filter_stride_y)
	first_conv_element_count = int(
			first_conv_output_width * first_conv_output_height * first_filter_count)
	flattened_first_conv = tf.reshape(first_dropout,
	                                  [-1, first_conv_element_count])
	first_fc_output_channels = 128
	first_fc_weights = tf.Variable(
			tf.truncated_normal(
				[first_conv_element_count, first_fc_output_channels], stddev=0.01),name='first_fc_w')
	first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]),name='first_fc_b')
	first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
	if is_training:
		second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
	else:
		second_fc_input = first_fc
	second_fc_output_channels = 128
	second_fc_weights = tf.Variable(
			tf.truncated_normal(
				[first_fc_output_channels, second_fc_output_channels], stddev=0.01),name='second_fc_w')
	second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]),name='second_fc_bias')
	second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
	if is_training:
		final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
	else:
		final_fc_input = second_fc
	label_count = model_settings['label_count']
	final_fc_weights = tf.Variable(
			tf.truncated_normal(
				[second_fc_output_channels, label_count], stddev=0.01),name='final_fc_w')
	final_fc_bias = tf.Variable(tf.zeros([label_count]),name='final_fc_bias')
	final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
	if is_training:
		return final_fc, dropout_prob
	else:
		return final_fc

def create_dnn_model(fingerprint_input, model_settings, model_size_info, 
					is_training):
	"""
	多层全连接的模型，根据输入的modelsize来定义模型结构
	"""

	if is_training:
		dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
	fingerprint_size = model_settings['fingerprint_size']
	label_count = model_settings['label_count']
	num_layers = len(model_size_info)
	layer_dim = [fingerprint_size]
	layer_dim.extend(model_size_info)
	flow = fingerprint_input
	tf.summary.histogram('input', flow)
	for i in range(1, num_layers + 1):
		with tf.variable_scope('fc'+str(i)):
			W = tf.get_variable('W', shape=[layer_dim[i-1], layer_dim[i]],
					initializer=tf.contrib.layers.xavier_initializer())
			tf.summary.histogram('fc_'+str(i)+'_w', W)
			b = tf.get_variable('b', shape=[layer_dim[i]])
			tf.summary.histogram('fc_'+str(i)+'_b', b)
			flow = tf.matmul(flow, W) + b
			flow = tf.nn.relu(flow)
			if is_training:
				flow = tf.nn.dropout(flow, dropout_prob)

	weights = tf.get_variable('final_fc', shape=[layer_dim[-1], label_count], 
				initializer=tf.contrib.layers.xavier_initializer())
	bias = tf.Variable(tf.zeros([label_count]))
	logits = tf.matmul(flow, weights) + bias
	if is_training:
		return logits, dropout_prob
	else:
		return logits

def create_cnn_model(fingerprint_input, model_settings, model_size_info,
	                     is_training):
	"""
	两个卷积层，后面接着一个全连接网络
	"""
	if is_training:
		dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
	input_frequency_size = model_settings['dct_coefficient_count']
	input_time_size = model_settings['spectrogram_length']
	fingerprint_4d = tf.reshape(fingerprint_input,
					[-1, input_time_size, input_frequency_size, 1])

	first_filter_count = model_size_info[0]
	first_filter_height = model_size_info[1]
	first_filter_width = model_size_info[2]
	first_filter_stride_y = model_size_info[3]
	first_filter_stride_x = model_size_info[4]

	second_filter_count = model_size_info[5]
	second_filter_height = model_size_info[6]
	second_filter_width = model_size_info[7]
	second_filter_stride_y = model_size_info[8]
	second_filter_stride_x = model_size_info[9]
	
	linear_layer_size = model_size_info[10]
	fc_size = model_size_info[11]

	# 第一个卷积层
	first_weights = tf.Variable(
		tf.truncated_normal(
			[first_filter_height, first_filter_width, 1, first_filter_count],
			stddev=0.01))
	first_bias = tf.Variable(tf.zeros([first_filter_count]))
	first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
			1, first_filter_stride_y, first_filter_stride_x, 1
	], 'VALID') + first_bias
	first_conv = tf.layers.batch_normalization(first_conv, training=is_training,
				name='bn1')
	first_relu = tf.nn.relu(first_conv)
	if is_training:
		first_dropout = tf.nn.dropout(first_relu, dropout_prob)
	else:
		first_dropout = first_relu
	first_conv_output_width = math.ceil(
			(input_frequency_size - first_filter_width + 1) /
			first_filter_stride_x)
	first_conv_output_height = math.ceil(
			(input_time_size - first_filter_height + 1) /
			first_filter_stride_y)

	# 第二个卷积层
	second_weights = tf.Variable(
		tf.truncated_normal(
			[second_filter_height, second_filter_width, first_filter_count, 
				second_filter_count],
				stddev=0.01))
	second_bias = tf.Variable(tf.zeros([second_filter_count]))
	second_conv = tf.nn.conv2d(first_dropout, second_weights, [
			1, second_filter_stride_y, second_filter_stride_x, 1
	], 'VALID') + second_bias
	second_conv = tf.layers.batch_normalization(second_conv, training=is_training,
				name='bn2')
	second_relu = tf.nn.relu(second_conv)
	if is_training:
		second_dropout = tf.nn.dropout(second_relu, dropout_prob)
	else:
		second_dropout = second_relu
	second_conv_output_width = math.ceil(
		(first_conv_output_width - second_filter_width + 1) /
		second_filter_stride_x)
	second_conv_output_height = math.ceil(
		(first_conv_output_height - second_filter_height + 1) /
		second_filter_stride_y)

	second_conv_element_count = int(
		second_conv_output_width*second_conv_output_height*second_filter_count)
	flattened_second_conv = tf.reshape(second_dropout,
						[-1, second_conv_element_count])

	# 全连接层
	W = tf.get_variable('W', shape=[second_conv_element_count, linear_layer_size],
		initializer=tf.contrib.layers.xavier_initializer())
	b = tf.get_variable('b', shape=[linear_layer_size])
	flow = tf.matmul(flattened_second_conv, W) + b

	first_fc_output_channels = fc_size
	first_fc_weights = tf.Variable(
		tf.truncated_normal(
			[linear_layer_size, first_fc_output_channels], stddev=0.01))
	first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
	first_fc = tf.matmul(flow, first_fc_weights) + first_fc_bias
	first_fc = tf.layers.batch_normalization(first_fc, training=is_training, 
				name='bn3')
	first_fc = tf.nn.relu(first_fc)
	if is_training:
		final_fc_input = tf.nn.dropout(first_fc, dropout_prob)
	else:
		final_fc_input = first_fc
	label_count = model_settings['label_count']
	final_fc_weights = tf.Variable(
		tf.truncated_normal(
			[first_fc_output_channels, label_count], stddev=0.01))
	final_fc_bias = tf.Variable(tf.zeros([label_count]))
	final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
	if is_training:
		return final_fc, dropout_prob
	else:
		return final_fc
