import numpy as np
import tensorflow as tf


dataset_path = 'D:/Datasets/CIFAR10/'


# 构建数据集，若flag_tt_network为True则构建TT数据
def get_dataset(flag_tt_network = False):
	f = np.load(dataset_path + 'cifar.npz')
	arr_train_images = np.reshape(f['train_images'].astype('float32'), [-1, 32*32, 3])
	arr_train_labels = f['train_labels']
	
	arr_validation_images = np.reshape(f['validation_images'].astype('float32'), [-1, 32*32, 3])
	arr_validation_labels = f['validation_labels']
	
	mean = np.mean(arr_train_images, axis=0)
	std = np.std(arr_train_images, axis=0)
	
	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : arr_train_labels,
		'train_data' : arr_train_images
		}
	dict_dataset['validation'] = {
		'validation_labels' : arr_validation_labels,
		'validation_data' : arr_validation_images
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : mean
		}
	dict_mean_std['std'] = {
		'std' : std
		}

	return dict_dataset, dict_mean_std


# 组织每次喂给神经网络的训练和验证数据，一次只喂flag_batch_size大小的数据
def construct_batch_part(dict_mean_std, flag_batch_size):
	shape_data = list(dict_mean_std['mean']['mean'].shape)

	# 训练集的placeholder
	tfph_train_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')

	# 验证集的placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	# mean和std的placeholder
	tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean')
	tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std')

	# 标准分数化
	for k in range(0, flag_batch_size):
		input_train_data = tfph_train_data[k]
		input_validation_data = tfph_validation_data[k]

		aug_train_data = (input_train_data - tfph_mean) / tfph_std
		aug_validation_data = (input_validation_data - tfph_mean) / tfph_std

		if k == 0:
			batch_train_data = tf.expand_dims(aug_train_data, 0)
			batch_validation_data = tf.expand_dims(aug_validation_data, 0)
		else:
			batch_train_data = tf.concat([batch_train_data, tf.expand_dims(aug_train_data, 0)], axis = 0)
			batch_validation_data = tf.concat([batch_validation_data, tf.expand_dims(aug_validation_data, 0)], axis = 0)

	result = {}
	result['batches'] = {
		'batch_train_data' : batch_train_data,
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_data' : batch_validation_data,
		'batch_validation_labels' : tfph_validation_labels
		}
	result['input_placeholders'] = {
		'tfph_train_data' : tfph_train_data,
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_data' : tfph_validation_data,
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_mean' : tfph_mean,
		'tfph_std' : tfph_std
		}

	return result


# 组织一个flag_batch_size大小的训练数据，与construct_batch_part配套使用
def get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到train的placeholders
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# 组织一个flag_batch_size大小的验证数据， 与construct_batch_part配套使用
def get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到validation的placeholders
	dict_feeder = {
		dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder
