import numpy as np
import tensorflow as tf
import h5py


dataset_path = 'D:/Datasets/Mnist/'
NUM_DIM = 4


# 构建数据集，将TT数据集中的每个样本缩并还原，待输入标准形式网络
def get_dataset():
	l_train_data = []
	l_validation_data = []
	l_mean = []
	l_std = []
	with h5py.File(dataset_path + 'mnist_tt.h5', 'r') as file:
		for i in range(NUM_DIM):
			l_train_data.append(file.get('train_data_%d' % (i + 1)).value)
			l_validation_data.append(file.get('validation_data_%d' % (i + 1)).value)
			l_mean.append(file.get('mean_%d' % (i + 1)).value)
			l_std.append(file.get('std_%d' % (i + 1)).value)
		ds_train_labels = file.get('train_labels').value
		ds_validation_labels = file.get('validation_labels').value

		arr_train_data = l_train_data[0]
		arr_validation_data = l_validation_data[0]
		for i in range(1, NUM_DIM):
			arr_train_factor = l_train_data[i]
			arr_train_data = np.einsum('bimnj,bjpqk->bimpnqk', arr_train_data, arr_train_factor)
			cur_train_shape = (arr_train_data.shape[0], arr_train_data.shape[1], arr_train_data.shape[2] * arr_train_data.shape[3], arr_train_data.shape[4] * arr_train_data.shape[5], arr_train_data.shape[6])
			arr_train_data = np.reshape(arr_train_data, cur_train_shape)

			arr_validation_factor = l_validation_data[i]
			arr_validation_data = np.einsum('bimnj,bjpqk->bimpnqk', arr_validation_data, arr_validation_factor)
			cur_validation_shape = (arr_validation_data.shape[0], arr_validation_data.shape[1], arr_validation_data.shape[2] * arr_validation_data.shape[3], arr_validation_data.shape[4] * arr_validation_data.shape[5], arr_validation_data.shape[6])
			arr_validation_data = np.reshape(arr_validation_data, cur_validation_shape)

		ds_train_data = np.squeeze(arr_train_data)
		ds_validation_data = np.squeeze(arr_validation_data)
		ds_mean = np.mean(ds_train_data, axis = 0)
		ds_std = np.std(ds_train_data, axis = 0)

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : ds_train_labels,
		'train_data' : ds_train_data
		}
	dict_dataset['validation'] = {
		'validation_labels' : ds_validation_labels,
		'validation_data' : ds_validation_data
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : ds_mean
		}
	dict_mean_std['std'] = {
		'std' : ds_std
		}

	return dict_dataset, dict_mean_std


# 构建数据集，直接获取TT数据集，待输入TT形式网络
def get_dataset_tt():
	dsl_train_data = []
	dsl_validation_data = []
	dsl_mean = []
	dsl_std = []
	with h5py.File(dataset_path + 'mnist_tt.h5', 'r') as file:
		for i in range(NUM_DIM):
			dsl_train_data.append(file.get('train_data_%d' % (i + 1)).value)
			dsl_validation_data.append(file.get('validation_data_%d' % (i + 1)).value)
			dsl_mean.append(file.get('mean_%d' % (i + 1)).value)
			dsl_std.append(file.get('std_%d' % (i + 1)).value)
		ds_train_labels = file.get('train_labels').value
		ds_validation_labels = file.get('validation_labels').value

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : ds_train_labels,
		'train_data' : dsl_train_data
		}
	dict_dataset['validation'] = {
		'validation_labels' : ds_validation_labels,
		'validation_data' : dsl_validation_data
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : dsl_mean
		}
	dict_mean_std['std'] = {
		'std' : dsl_std
		}

	return dict_dataset, dict_mean_std


# 组织每次喂给神经网络的训练和验证数据，一次只喂flag_batch_size大小的数据
def construct_batch_part(dict_mean_std, flag_batch_size):
	shape_data = list(dict_mean_std['mean']['mean'].shape)

	# 训练集的placeholder
	tfph_train_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')

	# 验证集的placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# mean和std的placeholder
	tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean')
	tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std')

	# labels的placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	result = {}
	result['batches'] = {
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_labels' : tfph_validation_labels,
		'batch_train_data' : tfph_train_data,
		'batch_validation_data': tfph_validation_data
		}
	result['input_placeholders'] = {
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_train_data' : tfph_train_data,
		'tfph_validation_data': tfph_validation_data,
		'tfph_mean' : tfph_mean,
		'tfph_std' : tfph_std
		}
	return result


# 组织每次喂给神经网络的测试数据，正常格式，一次只喂flag_batch_size大小的数据
def construct_batch_normal(dict_mean_std, flag_batch_size):
	shape_data = list(dict_mean_std['mean']['mean'].shape)

	# 验证集的placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# mean和std的placeholder
	tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean')
	tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std')

	# labels的placeholder
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	result = {}
	result['batches'] = {
		'batch_validation_labels' : tfph_validation_labels,
		'batch_validation_data': tfph_validation_data
		}
	result['input_placeholders'] = {
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_validation_data': tfph_validation_data,
		'tfph_mean' : tfph_mean,
		'tfph_std' : tfph_std
		}
	return result


# 组织每次喂给神经网络的测试数据，TT格式，一次只喂flag_batch_size大小的数据
def construct_batch_tt(dict_mean_std, flag_batch_size):
	l_ph_validation_data = []
	l_ph_mean = []
	l_ph_std = []
	for i in range(NUM_DIM):
		core_shape = list(dict_mean_std['mean']['mean'][i].shape)

		# 验证集的placeholder
		tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + core_shape, name = 'ph_validation_data_%d' % (i + 1))

		# mean和std的placeholder
		tfph_mean = tf.placeholder(dtype = tf.float32, shape = core_shape, name = 'ph_mean_%d' % (i + 1))
		tfph_std = tf.placeholder(dtype = tf.float32, shape = core_shape, name = 'ph_std_%d' % (i + 1))

		l_ph_validation_data.append(tfph_validation_data)
		l_ph_mean.append(tfph_mean)
		l_ph_std.append(tfph_std)

	# labels的placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	result = {}
	result['batches'] = {
		'batch_validation_labels' : tfph_validation_labels
		}
	result['input_placeholders'] = {
		'tfph_validation_labels' : tfph_validation_labels
		}
	for i in range(NUM_DIM):
		result['batches'].update({'batch_validation_data_%d' % (i + 1) : l_ph_validation_data[i]})
		result['input_placeholders'].update({'tfph_validation_data_%d' % (i + 1) : l_ph_validation_data[i]})
		result['input_placeholders'].update({'tfph_mean_%d' % (i + 1) : l_ph_mean[i]})
		result['input_placeholders'].update({'tfph_std_%d' % (i + 1) : l_ph_std[i]})

	return result


# 组织一个flag_batch_size大小的训练数据，与construct_batch_part配套使用
def get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到train的placeholders
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# 组织一个flag_batch_size大小的验证数据， 与construct_batch_part或construct_batch_normal或construct_batch_tt配套使用
def get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到validation的placeholders
	dict_feeder = {
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end]
		}

	if flag_tt_network is True:
		for i in range(NUM_DIM):
			dict_feeder.update({dict_placeholders['tfph_validation_data_%d' % (i + 1)] : dict_dataset['validation']['validation_data'][i][n_index_head:n_index_end]})
			dict_feeder.update({dict_placeholders['tfph_mean_%d' % (i + 1)] : dict_mean_std['mean']['mean'][i]})
			dict_feeder.update({dict_placeholders['tfph_std_%d' % (i + 1)] : dict_mean_std['std']['std'][i]})
	else:
		dict_feeder.update({dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end]})
		dict_feeder.update({dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean']})
		dict_feeder.update({dict_placeholders['tfph_std'] : dict_mean_std['std']['std']})

	return dict_feeder
