import tensorflow as tf

import InputDataMnist
import Layers


NUM_CLASSES = 10


def interface_get_dataset(flag_tt_network = False):
	if flag_tt_network is True:
		return InputDataMnist.get_dataset_tt()
	else:
		return InputDataMnist.get_dataset()


def interface_get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	return InputDataMnist.get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size)


def interface_get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network = False):
	return InputDataMnist.get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network)


# 获取TT阶数
def get_tensor_order():
	return InputDataMnist.NUM_DIM


# 普通网络结构，用于训练以及作为压缩的基准
def network(data, labels, b_reuse, tfv_train_phase = None, name = None):
	if name is not None:
		name = 'network' + '_' + name

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.linear(l_layers[-1], 1296, name_scope = 'linear_1'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_1'))

		l_layers.append(Layers.linear(l_layers[-1], 4096, name_scope = 'linear_2'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_2'))

		l_layers.append(Layers.linear(l_layers[-1], 256, name_scope = 'linear_3'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# 压缩网络结构，输入数据为NUM_DIM个张量
def network_tt(l_data, labels, name = None):
	if name is not None:
		name = 'network' + '_' + name + '_tt'

	with tf.variable_scope(name):
		l_layers = []
		l_layers.append(l_data)

		l_layers.append(Layers.tt_linear(l_layers[-1], [6, 6, 6, 6], name_scope = 'linear_1'))
		l_layers.append(Layers.tt_rounding(l_layers[-1], name_scope = 'rounding_1'))
		l_layers.append(Layers.tt_biases(l_layers[-1], name_scope = 'bias_1'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_1'))

		l_layers.append(Layers.tt_linear(l_layers[-1], [8, 8, 8, 8], name_scope = 'linear_2'))
		l_layers.append(Layers.tt_rounding(l_layers[-1], name_scope = 'rounding_2'))
		l_layers.append(Layers.tt_biases(l_layers[-1], name_scope = 'bias_2'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_2'))

		l_layers.append(Layers.tt_linear(l_layers[-1], [4, 4, 4, 4], name_scope = 'linear_3'))
		l_layers.append(Layers.tt_rounding(l_layers[-1], name_scope = 'rounding_3'))
		l_layers.append(Layers.tt_biases(l_layers[-1], name_scope = 'bias_3'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_3'))
		l_layers.append(Layers.tt_contract(l_layers[-1], name_scope = 'contract_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# 获取普通网络结构中所有权重变量名称
def get_network_names():
	return ['network_mnist' + '/' + 'linear_1' + '/' + 'var_weights:0', 'network_mnist' + '/' + 'linear_1' + '/' + 'var_biases:0', 
		 'network_mnist' + '/' + 'linear_2' + '/' + 'var_weights:0', 'network_mnist' + '/' + 'linear_2' + '/' + 'var_biases:0', 
		 'network_mnist' + '/' + 'linear_3' + '/' + 'var_weights:0', 'network_mnist' + '/' + 'linear_3' + '/' + 'var_biases:0', 
		 'network_mnist' + '/' + 'linear_out' + '/' + 'var_weights:0', 'network_mnist' + '/' + 'linear_out' + '/' + 'var_biases:0']


# 获取普通网络输出，包括训练与验证
def get_network_output(dict_mean_std, flag_batch_size, tfv_train_phase):
	dict_inputs_batches = InputDataMnist.construct_batch_part(dict_mean_std, flag_batch_size)

	t_labels = dict_inputs_batches['batches']['batch_train_labels']
	v_labels = dict_inputs_batches['batches']['batch_validation_labels']
	t_data = dict_inputs_batches['batches']['batch_train_data']
	v_data = dict_inputs_batches['batches']['batch_validation_data']

	with tf.device('/gpu:0'):
		loss_train, eval_train = network(t_data, t_labels, False, tfv_train_phase, 'mnist')
		loss_validation, eval_validation = network(v_data, v_labels, True, tfv_train_phase, 'mnist')

	return dict_inputs_batches['input_placeholders'], loss_train, eval_train, loss_validation, eval_validation


# 获取普通网络的验证输出
def get_network_normal(dict_mean_std, flag_batch_size):
	dict_inputs_batches = InputDataMnist.construct_batch_normal(dict_mean_std, flag_batch_size)

	v_labels = dict_inputs_batches['batches']['batch_validation_labels']
	v_data = dict_inputs_batches['batches']['batch_validation_data']
	loss_validation, eval_validation = network(v_data, v_labels, False, name = 'mnist')

	return dict_inputs_batches['input_placeholders'], loss_validation, eval_validation


# 获取TT网络的验证输出
def get_network_tt(dict_mean_std, flag_batch_size):
	dict_inputs_batches = InputDataMnist.construct_batch_tt(dict_mean_std, flag_batch_size)

	v_labels = dict_inputs_batches['batches']['batch_validation_labels']
	v_data = []
	for i in range(InputDataMnist.NUM_DIM):
		v_data.append(dict_inputs_batches['batches']['batch_validation_data_%d' % (i + 1)])
	loss_validation, eval_validation = network_tt(v_data, v_labels, name = 'mnist')

	return dict_inputs_batches['input_placeholders'], loss_validation, eval_validation
