import tensorflow as tf

import InputDataCIFAR10
import Layers


NUM_CLASSES = 10


def interface_get_dataset(flag_tt_network = False):
	return InputDataCIFAR10.get_dataset(flag_tt_network)


def interface_get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	return InputDataCIFAR10.get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size)


def interface_get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	return InputDataCIFAR10.get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size)


def network_normal(data, labels, b_reuse, tfv_train_phase = None, name = None):
	if name is not None:
		name = 'network_normal' + '_' + name

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	slices = []
	for i in range(0,3):
		slice_data = tf.slice(data, [0, 0, i], [data.shape[0], data.shape[1], 1])
		slices.append(tf.squeeze(slice_data))

	with tf.variable_scope(name, reuse = b_reuse):
		l_whole_layers = []

		for i in range(0,3):
			with tf.variable_scope('stream_%d' % (i + 1)):
				l_layers = []
				l_layers.append(slices[i])

				l_layers.append(Layers.linear(l_layers[-1], 4096, name_scope = 'linear_1'))
				l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))
				l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_1'))

				l_layers.append(Layers.linear(l_layers[-1], 4096, name_scope = 'linear_2'))
				l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))
				l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_2'))

				l_layers.append(Layers.linear(l_layers[-1], 1296, name_scope = 'linear_3'))
				l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))
				l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_3'))

				l_layers.append(Layers.linear(l_layers[-1], 256, name_scope = 'linear_4'))
				l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_4'))
				l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_4'))

				l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

				l_whole_layers.append(l_layers)

		last_logits = ((l_whole_layers[0])[-1] + (l_whole_layers[1])[-1] + (l_whole_layers[2])[-1]) / 3

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = last_logits, name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(last_logits, labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


def get_network_output(dict_mean_std, flag_batch_size, flag_tt_network, tfv_train_phase):
	dict_inputs_batches = InputDataCIFAR10.construct_batch_part(dict_mean_std, flag_batch_size)

	t_data = dict_inputs_batches['batches']['batch_train_data']
	t_labels = dict_inputs_batches['batches']['batch_train_labels']
	v_data = dict_inputs_batches['batches']['batch_validation_data']
	v_labels = dict_inputs_batches['batches']['batch_validation_labels']

	with tf.device('/gpu:0'):
		loss_train, eval_train = network_normal(t_data, t_labels, False, tfv_train_phase, 'cifar10')
		loss_validation, eval_validation = network_normal(v_data, v_labels, True, tfv_train_phase, 'cifar10')

	return dict_inputs_batches['input_placeholders'], loss_train, eval_train, loss_validation, eval_validation
