import tensorflow as tf

import InputDataMnist
import Layers


NUM_CLASSES = 10


def interface_get_dataset(flag_tt_network = False):
	return InputDataMnist.get_dataset(flag_tt_network)


def interface_riemannian_data(ph_input_data):
	return InputDataMnist.riemannian_data(ph_input_data)


def interface_save_tt_data(l_tt_data, b_t_or_v):
	return InputDataMnist.save_tt_data(l_tt_data, b_t_or_v)


def interface_save_tt_dataset(l_tt_mean, l_tt_std, arr_train_labels, arr_validation_labels):
	return InputDataMnist.save_tt_dataset(l_tt_mean, l_tt_std, arr_train_labels, arr_validation_labels)


def interface_get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network):
	return InputDataMnist.get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network)


def interface_get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network):
	return InputDataMnist.get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network)


def network_normal(data, labels, b_reuse, tfv_train_phase = None, name = None):
	if name is not None:
		name = 'network_normal' + '_' + name

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.linear(l_layers[-1], 1296, name_scope = 'linear_1'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_1'))

		l_layers.append(Layers.linear(l_layers[-1], 4096, name_scope = 'linear_2'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_2'))

		l_layers.append(Layers.linear(l_layers[-1], 256, name_scope = 'linear_3'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


def network_tt(data, labels, b_reuse, tfv_train_phase = None, name = None):
	if name is not None:
		name = 'network_tt' + '_' + name

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.tt_linear(l_layers[-1], [6, 6, 6, 6], name_scope = 'linear_1'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_1'))
		l_layers.append(Layers.tt_dropout(l_layers[-1], dropout_rate(0.9), name_scope = 'dropout_1'))
		l_layers.append(Layers.tt_reduced_rank(l_layers[-1], name_scope = 'rounding_1'))

		l_layers.append(Layers.tt_linear(l_layers[-1], [8, 8, 8, 8], name_scope = 'linear_2'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_2'))
		l_layers.append(Layers.tt_dropout(l_layers[-1], dropout_rate(0.9), name_scope = 'dropout_2'))
		l_layers.append(Layers.tt_reduced_rank(l_layers[-1], name_scope = 'rounding_2'))

		l_layers.append(Layers.tt_linear(l_layers[-1], [4, 4, 4, 4], name_scope = 'linear_3'))
		l_layers.append(Layers.tt_relu(l_layers[-1], name_scope = 'relu_linear_3'))
		l_layers.append(Layers.tt_dropout(l_layers[-1], dropout_rate(0.9), name_scope = 'dropout_3'))
		l_layers.append(Layers.tt_contract(l_layers[-1], name_scope = 'contract_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


def get_network_output(dict_mean_std, flag_batch_size, flag_tt_network, tfv_train_phase):
	dict_inputs_batches = InputDataMnist.construct_batch_part(dict_mean_std, flag_batch_size, flag_tt_network)

	t_labels = dict_inputs_batches['batches']['batch_train_labels']
	v_labels = dict_inputs_batches['batches']['batch_validation_labels']
	if flag_tt_network is False:
		t_data = dict_inputs_batches['batches']['batch_train_data_1']
		v_data = dict_inputs_batches['batches']['batch_validation_data_1']
	else:
		t_data = []
		v_data = []
		for i in range(InputDataMnist.NUM_DIM):
			t_data.append(dict_inputs_batches['batches']['batch_train_data_%d' % (i + 1)])
			v_data.append(dict_inputs_batches['batches']['batch_validation_data_%d' % (i + 1)])

	with tf.device('/gpu:0'):
		if flag_tt_network is False:
			loss_train, eval_train = network_normal(t_data, t_labels, False, tfv_train_phase, 'mnist')
			loss_validation, eval_validation = network_normal(v_data, v_labels, True, tfv_train_phase, 'mnist')
		else:
			loss_train, eval_train = network_tt(t_data, t_labels, False, tfv_train_phase, 'mnist')
			loss_validation, eval_validation = network_tt(v_data, v_labels, True, tfv_train_phase, 'mnist')

	return dict_inputs_batches['input_placeholders'], loss_train, eval_train, loss_validation, eval_validation
