import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import t3f
import h5py
import json


dataset_path = 'D:/Datasets/Mnist/'
NUM_DIM = 4
l_tt_train = []
l_tt_validation = []


# 构建数据集，若flag_tt_network为True则构建TT数据
def get_dataset(flag_tt_network = False):
	if flag_tt_network is True:
		dsl_train_data = []
		dsl_validation_data = []
		dsl_mean = []
		dsl_std = []
		with h5py.File(dataset_path + 'mnist_tt.h5', 'r') as file:
			for i in range(NUM_DIM):
				dsl_train_data.append(file.get('train_data_%d' % (i + 1)).value)
				dsl_validation_data.append(file.get('validation_data_%d' % (i + 1)).value)
				dsl_mean.append(file.get('mean_%d' % (i + 1)).value)
				dsl_std.append(file.get('std_%d' % (i + 1)).value)
			ds_train_labels = file.get('train_labels').value
			ds_validation_labels = file.get('validation_labels').value
	else:
		data_sets = input_data.read_data_sets(dataset_path, False)
		dsl_train_data = [data_sets.train.images]
		ds_train_labels = data_sets.train.labels
		dsl_validation_data = [np.vstack((data_sets.validation.images, data_sets.test.images))]
		ds_validation_labels = np.hstack((data_sets.validation.labels, data_sets.test.labels))
		dsl_mean = [np.mean(data_sets.train.images, axis = 0)]
		dsl_std = [np.std(data_sets.train.images, axis = 0)]

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : ds_train_labels,
		'train_data' : dsl_train_data
		}
	dict_dataset['validation'] = {
		'validation_labels' : ds_validation_labels,
		'validation_data' : dsl_validation_data
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : dsl_mean
		}
	dict_mean_std['std'] = {
		'std' : dsl_std
		}

	return dict_dataset, dict_mean_std


# 对输入数据进行黎曼优化，返回目标张量、损失函数和迭代等需要run的操作
def riemannian_data(ph_input_data):
	# svd相关计算要在cpu上进行
	with tf.device('/cpu:0'):
		with tf.variable_scope('riemannian'):
			# 基本参数
			t_shape = ((1, 1, 1, 1), (4, 7, 7, 4))
			t_rank = 4

			# input_data改写为tt格式
			tt_input = t3f.to_tt_matrix(ph_input_data, t_shape, 28, 0.01)

			# 优化目标张量(流形上的点)
			initializer_manifold = t3f.glorot_initializer(t_shape, tt_rank = t_rank)
			tt_manifold = t3f.get_variable('manifold', initializer = initializer_manifold)

			# SGD
			gradF = tt_manifold - tt_input
			riemannian_grad = t3f.riemannian.project(gradF, tt_manifold)
			train_step = t3f.assign(tt_manifold, t3f.round(tt_manifold - 0.1 * riemannian_grad, max_tt_rank = t_rank))
			lossF = 0.5 * t3f.frobenius_norm_squared(tt_manifold - tt_input)

		return list(tt_manifold._tt_cores), [lossF, train_step.op]


# 保存黎曼优化后的TT张量数据
def save_tt_data(l_tt_data, b_t_or_v):
	if b_t_or_v is True:
		if len(l_tt_train) == 0:
			for i in range(NUM_DIM):
				l_tt_train.append([l_tt_data[i]])
		else:
			for i in range(NUM_DIM):
				l_tt_train[i].append(l_tt_data[i])
	else:
		if len(l_tt_validation) == 0:
			for i in range(NUM_DIM):
				l_tt_validation.append([l_tt_data[i]])
		else:
			for i in range(NUM_DIM):
				l_tt_validation[i].append(l_tt_data[i])

	return None


# 保存黎曼优化后的TT张量数据集
def save_tt_dataset(l_tt_mean, l_tt_std, arr_train_labels, arr_validation_labels):
	with h5py.File(dataset_path + 'mnist_tt.h5', 'w') as file:
		for i in range(NUM_DIM):
			file.create_dataset('train_data_%d' % (i + 1), data = np.array(l_tt_train[i], dtype = np.float32))
			file.create_dataset('validation_data_%d' % (i + 1), data = np.array(l_tt_validation[i], dtype = np.float32))
			file.create_dataset('mean_%d' % (i + 1), data = np.array(l_tt_mean[i], dtype = np.float32))
			file.create_dataset('std_%d' % (i + 1), data = np.array(l_tt_std[i], dtype = np.float32))
		file.create_dataset('train_labels', data = arr_train_labels)
		file.create_dataset('validation_labels', data = arr_validation_labels)

	return None


# 组织每次喂给神经网络的训练和验证数据，一次只喂flag_batch_size大小的数据
def construct_batch_part(dict_mean_std, flag_batch_size, flag_tt_network):
	d = NUM_DIM
	if flag_tt_network is False:
		d = 1

	l_ph_train_data = []
	l_ph_validation_data = []
	l_ph_mean = []
	l_ph_std = []
	shape_data = []
	for i in range(d):
		shape_data = list(dict_mean_std['mean']['mean'][i].shape)

		# 训练集的placeholder
		tfph_train_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_train_data_%d' % (i + 1))

		# 验证集的placeholder
		tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data_%d' % (i + 1))

		# mean和std的placeholder
		tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean_%d' % (i + 1))
		tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std_%d' % (i + 1))

		l_ph_train_data.append(tfph_train_data)
		l_ph_validation_data.append(tfph_validation_data)
		l_ph_mean.append(tfph_mean)
		l_ph_std.append(tfph_std)

	# labels的placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	result = {}
	result['batches'] = {
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_labels' : tfph_validation_labels
		}
	result['input_placeholders'] = {
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_labels' : tfph_validation_labels,
		}
	for i in range(d):
		result['batches'].update({'batch_train_data_%d' % (i + 1) : l_ph_train_data[i]})
		result['batches'].update({'batch_validation_data_%d' % (i + 1) : l_ph_validation_data[i]})
		result['input_placeholders'].update({'tfph_train_data_%d' % (i + 1) : l_ph_train_data[i]})
		result['input_placeholders'].update({'tfph_validation_data_%d' % (i + 1) : l_ph_validation_data[i]})
		result['input_placeholders'].update({'tfph_mean_%d' % (i + 1) : l_ph_mean[i]})
		result['input_placeholders'].update({'tfph_std_%d' % (i + 1) : l_ph_std[i]})

	return result


# 组织一个flag_batch_size大小的训练数据，与construct_batch_part配套使用
def get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到train的placeholders
	dict_feeder = {
		#dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		#dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		#dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	d = NUM_DIM
	if flag_tt_network is False:
		d = 1
	for i in range(d):
		dict_feeder.update({dict_placeholders['tfph_train_data_%d' % (i + 1)] : dict_dataset['train']['train_data'][i][n_index_head:n_index_end]})
		dict_feeder.update({dict_placeholders['tfph_mean_%d' % (i + 1)] : dict_mean_std['mean']['mean'][i]})
		dict_feeder.update({dict_placeholders['tfph_std_%d' % (i + 1)] : dict_mean_std['std']['std'][i]})

	return dict_feeder


# 组织一个flag_batch_size大小的验证数据， 与construct_batch_part配套使用
def get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size, flag_tt_network):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到validation的placeholders
	dict_feeder = {
		#dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		#dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		#dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	d = NUM_DIM
	if flag_tt_network is False:
		d = 1
	for i in range(d):
		dict_feeder.update({dict_placeholders['tfph_validation_data_%d' % (i + 1)] : dict_dataset['validation']['validation_data'][i][n_index_head:n_index_end]})
		dict_feeder.update({dict_placeholders['tfph_mean_%d' % (i + 1)] : dict_mean_std['mean']['mean'][i]})
		dict_feeder.update({dict_placeholders['tfph_std_%d' % (i + 1)] : dict_mean_std['std']['std'][i]})

	return dict_feeder
