import tensorflow as tf
import numpy as np

import Layers


dataset_path = 'D:/Datasets/GestureNumber/gesture.npz'
NUM_CLASSES = 5


# 获取数据集
def get_dataset():
	ds = np.load(dataset_path)
	train_images = np.reshape(ds['train_images'], [-1, 32, 32, 1])
	train_labels = ds['train_labels']	
	validation_images = np.reshape(ds['validation_images'], [-1, 32, 32, 1])
	validation_labels = ds['validation_labels']
	
	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : train_labels,
		'train_data' : train_images
		}
	dict_dataset['validation'] = {
		'validation_labels' : validation_labels,
		'validation_data' : validation_images
		}

	return dict_dataset


# 组织每次喂给神经网络的训练和验证数据，一次只喂flag_batch_size大小的数据
def construct_batch_part(sample_shape, flag_batch_size):
	# 训练集的placeholder
	tfph_train_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + sample_shape, name = 'ph_train_data')

	# 验证集的placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + sample_shape, name = 'ph_validation_data')

	# labels的placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	# 数据增强及标准分数化
	for k in range(flag_batch_size):
		# 训练集验证集各取出一条数据
		input_train_data = tfph_train_data[k]
		input_validation_data = tfph_validation_data[k]

		input_train_data = tf.image.convert_image_dtype(input_train_data, dtype = tf.float32)
		input_validation_data = tf.image.convert_image_dtype(input_validation_data, dtype = tf.float32)

		# 训练集增强
		input_train_data = tf.pad(input_train_data, [[4, 4], [4, 4], [0, 0]])
		input_train_data = tf.random_crop(input_train_data, [32, 32, 1])
		input_train_data = tf.image.random_flip_left_right(input_train_data)
		input_train_data = tf.image.random_flip_up_down(input_train_data)

		# to [-1, 1]
		input_train_data = tf.subtract(input_train_data, 0.5)
		input_train_data = tf.multiply(input_train_data, 2.0)
		input_validation_data = tf.subtract(input_validation_data, 0.5)
		input_validation_data = tf.multiply(input_validation_data, 2.0)

		if k == 0:
			batch_train_data = tf.expand_dims(input_train_data, 0)
			batch_validation_data = tf.expand_dims(input_validation_data, 0)
		else:
			batch_train_data = tf.concat([batch_train_data, tf.expand_dims(input_train_data, 0)], axis = 0)
			batch_validation_data = tf.concat([batch_validation_data, tf.expand_dims(input_validation_data, 0)], axis = 0)

	result = {}
	result['batches'] = {
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_labels' : tfph_validation_labels,
		'batch_train_data' : batch_train_data,
		'batch_validation_data': batch_validation_data
		}
	result['input_placeholders'] = {
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_train_data' : tfph_train_data,
		'tfph_validation_data': tfph_validation_data,
		}

	return result


# 组织一个flag_batch_size大小的训练数据，与construct_batch_part配套使用
def get_batch_part_train(dict_dataset, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到train的placeholders
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		}

	return dict_feeder


# 组织一个flag_batch_size大小的验证数据， 与construct_batch_part配套使用
def get_batch_part_validation(dict_dataset, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到validation的placeholders
	dict_feeder = {
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		}

	return dict_feeder


# 网络结构
def network(data, labels, b_reuse, tfv_train_phase = None, name = None):
	if name is None:
		name = 'network_normal'
	else:
		name = 'network' + '_' + name

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)
		l_layers.append(Layers.conv_2d(l_layers[-1], 64, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_1.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 128, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_1.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 128, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_1.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [3, 3], [2, 2], name_scope = 'maxpool_1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_2.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_2.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_2.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [3, 3], [2, 2], name_scope = 'maxpool_2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 384, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 384, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 384, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [3, 3], [2, 2], name_scope = 'maxpool_3'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_4.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_4.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_4.3'))
		l_layers.append(Layers.avgpool_2d(l_layers[-1], [4, 4], [1, 1], padding = 'VALID', is_gap = True, name_scope = 'avgpool'))
		l_layers.append(Layers.fc(l_layers[-1], NUM_CLASSES, act_last = False, name_scope = 'fc_out'))
		outputs = tf.identity(l_layers[-1], 'final_dense')

	onehot_labels = tf.one_hot(labels, NUM_CLASSES)
	xentropy = tf.losses.softmax_cross_entropy(logits = outputs, onehot_labels = onehot_labels)
	l_var = [v for v in tf.trainable_variables() if 'batch_norm' not in v.name]
	loss = xentropy + 0.0001 * tf.add_n([tf.nn.l2_loss(v) for v in l_var if 'var_biases' not in v.name])
	correct_flags = tf.nn.in_top_k(outputs, tf.argmax(onehot_labels, axis = 1), 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return loss, evaluation


# 获取网络结构及输出，flag_tt_network为False或True分别表示：原始或多非线性TT网络
def get_network_output(i, t_data, t_labels, v_data, v_labels, tfv_train_phase):
	b_reuse = i > 0

	loss_train, eval_train = network(t_data, t_labels, b_reuse, tfv_train_phase, 'cifar_conv')
	loss_validation, eval_validation = network(v_data, v_labels, True, tfv_train_phase, 'cifar_conv')
	
	return loss_train, eval_train, loss_validation, eval_validation
