import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

import Layers


dataset_path = 'D:/Datasets/Mnist/'
NUM_CLASSES = 10


def get_dataset():
	data_sets = input_data.read_data_sets(dataset_path, False)
	ds_train_data = data_sets.train.images
	ds_train_labels = data_sets.train.labels
	ds_validation_data = np.vstack((data_sets.validation.images, data_sets.test.images))
	ds_validation_labels = np.hstack((data_sets.validation.labels, data_sets.test.labels))
	ds_mean = np.mean(data_sets.train.images, axis = 0)
	ds_std = np.std(data_sets.train.images, axis = 0)

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : ds_train_labels,
		'train_data' : ds_train_data
		}
	dict_dataset['validation'] = {
		'validation_labels' : ds_validation_labels,
		'validation_data' : ds_validation_data
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : ds_mean
		}
	dict_mean_std['std'] = {
		'std' : ds_std
		}

	return dict_dataset, dict_mean_std


# 组织每次喂给神经网络的训练和验证数据，一次只喂flag_batch_size大小的数据
def construct_batch_part(dict_mean_std, flag_batch_size):
	shape_data = list(dict_mean_std['mean']['mean'].shape)

	# 训练集的placeholder
	tfph_train_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')

	# 验证集的placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# mean和std的placeholder
	tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean')
	tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std')

	# labels的placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	result = {}
	result['input_placeholders'] = {
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_train_data' : tfph_train_data,
		'tfph_validation_data' : tfph_validation_data,
		'tfph_mean' : tfph_mean,
		'tfph_std' : tfph_std
		}

	return result


# 组织一个flag_batch_size大小的训练数据，与construct_batch_part配套使用
def get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到train的placeholders
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# 组织一个flag_batch_size大小的验证数据， 与construct_batch_part配套使用
def get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# 实际只用到validation的placeholders
	dict_feeder = {
		dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# 普通网络结构，用于训练以及作为压缩的基准
def network(data, labels, b_reuse, tfv_train_phase = None):
	name = 'network_mnist'

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.linear(l_layers[-1], 1296, name_scope = 'linear_1'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_1'))

		l_layers.append(Layers.linear(l_layers[-1], 4096, name_scope = 'linear_2'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_2'))

		l_layers.append(Layers.linear(l_layers[-1], 256, name_scope = 'linear_3'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))
		l_layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.9), name = 'dropout_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# 压缩网络结构，用于训练以及加速的基准
def network_tt(data, labels, b_reuse, tfv_train_phase = None):
	name = 'network_tt_mnist'

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.tt_linear(l_layers[-1], 1296, [7,4,4,7], [6,6,6,6], [24,24,24], name_scope = 'linear_1'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))

		l_layers.append(Layers.tt_linear(l_layers[-1], 4096, [6,6,6,6], [8,8,8,8], [48,48,48], name_scope = 'linear_2'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))

		l_layers.append(Layers.tt_linear(l_layers[-1], 256, [8,8,8,8], [4,4,4,4], [32,32,32], name_scope = 'linear_3'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))

		l_layers.append(Layers.linear(l_layers[-1], NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# 压缩网络结构，用于测试加速
def network_acc(data, labels):
	name = 'network_acc_mnist'

	with tf.variable_scope(name):
		l_layers = []
		l_layers.append(data)

		l_layers.append(Layers.vectors_to_tt(l_layers[-1], [7,4,4,7], [7,7,7], name_scope = 'infer_1'))
		l_layers.append(Layers.acc_linear(l_layers[-1], [7,4,4,7], [6,6,6,6], [24,24,24], name_scope = 'linear_1'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_1'))

		l_layers.append(Layers.vectors_to_tt(l_layers[-1], [6,6,6,6], [6,6,6], name_scope = 'infer_2'))
		l_layers.append(Layers.acc_linear(l_layers[-1], [6,6,6,6], [8,8,8,8], [48,48,48], name_scope = 'linear_2'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_2'))

		l_layers.append(Layers.vectors_to_tt(l_layers[-1], [8,8,8,8], [8,8,8], name_scope = 'infer_3'))
		l_layers.append(Layers.acc_linear(l_layers[-1], [8,8,8,8], [4,4,4,4], [32,32,32], name_scope = 'linear_3'))
		l_layers.append(tf.nn.relu(l_layers[-1], name = 'relu_linear_3'))
		
		l_layers.append(Layers.linear(tf.expand_dims(l_layers[-1], 0), NUM_CLASSES, name_scope = 'linear_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation



# 获取普通网络输出，包括训练与验证
def get_network_output(dict_mean_std, flag_batch_size, flag_is_tt, tfv_train_phase = None):
	dict_inputs_batches = construct_batch_part(dict_mean_std, flag_batch_size)

	t_labels = dict_inputs_batches['input_placeholders']['tfph_train_labels']
	v_labels = dict_inputs_batches['input_placeholders']['tfph_validation_labels']
	t_data = dict_inputs_batches['input_placeholders']['tfph_train_data']
	v_data = dict_inputs_batches['input_placeholders']['tfph_validation_data']

	with tf.device('/gpu:0'):
		if flag_is_tt is False:
			loss_train, eval_train = network(t_data, t_labels, False, tfv_train_phase)
			loss_validation, eval_validation = network(v_data, v_labels, True, tfv_train_phase)
		else:
			loss_train, eval_train = network_tt(t_data, t_labels, False, tfv_train_phase)
			loss_validation, eval_validation = network_tt(v_data, v_labels, True, tfv_train_phase)

	return dict_inputs_batches['input_placeholders'], loss_train, eval_train, loss_validation, eval_validation


# 获取加速网络输出，只验证
def get_network_acc(dict_mean_std):
	dict_inputs_batches = construct_batch_part(dict_mean_std, 1)

	v_labels = dict_inputs_batches['input_placeholders']['tfph_validation_labels']
	v_data = dict_inputs_batches['input_placeholders']['tfph_validation_data']

	loss_validation, eval_validation = network_acc(v_data, v_labels)

	return dict_inputs_batches['input_placeholders'], loss_validation, eval_validation


# 获取训练TT变量名
def get_tt_variables(n_dim):
	l_vars = []

	# linear_1
	for i in range (n_dim):
		l_vars.append('network_tt_mnist/linear_1/var_weight_core_%d:0' % (i + 1))
	l_vars.append('network_tt_mnist/linear_1/var_biases:0')

	# linear_2
	for i in range (n_dim):
		l_vars.append('network_tt_mnist/linear_2/var_weight_core_%d:0' % (i + 1))
	l_vars.append('network_tt_mnist/linear_2/var_biases:0')

	# linear_3
	for i in range (n_dim):
		l_vars.append('network_tt_mnist/linear_3/var_weight_core_%d:0' % (i + 1))
	l_vars.append('network_tt_mnist/linear_3/var_biases:0')

	# linear_out
	l_vars.append('network_tt_mnist/linear_out/var_weights:0')
	l_vars.append('network_tt_mnist/linear_out/var_biases:0')

	return l_vars
