import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

import Layers
import LSTMs

dataset_path = 'D:/Datasets/Mnist/'
NUM_CLASSES = 10


# get dataset (already in float32 by tensorflow.examples.tutorials.mnist)
def get_dataset(str_restore_ckpt = None, flag_sample_seed = None):
	data_sets = input_data.read_data_sets(dataset_path, False)
	ds_train_data = data_sets.train.images
	ds_train_labels = data_sets.train.labels
	ds_validation_data = np.vstack((data_sets.validation.images, data_sets.test.images))
	ds_validation_labels = np.hstack((data_sets.validation.labels, data_sets.test.labels))
	
	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : ds_train_labels,
		'train_data' : ds_train_data
		}
	dict_dataset['validation'] = {
		'validation_labels' : ds_validation_labels,
		'validation_data' : ds_validation_data
		}

	return dict_dataset


# orgnize train or val data each time, which is flag_batch_size
def construct_batch_part(flag_batch_size):
	shape_data = [28 * 28]

	# train placeholder
	tfph_train_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')

	# val placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.float32, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# labels placeholders
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	# images to list of vectors
	batch_train_data = tf.reshape(tfph_train_data, [-1, 28, 28])
	batch_validation_data = tf.reshape(tfph_validation_data, [-1, 28, 28])

	result = {}
	result['batches'] = {
		'batch_train_data' : batch_train_data,
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_data' : batch_validation_data,
		'batch_validation_labels' : tfph_validation_labels
		}
	result['input_placeholders'] = {
		'tfph_train_data' : tfph_train_data,
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_data' : tfph_validation_data,
		'tfph_validation_labels' : tfph_validation_labels
		}

	return result


# orgnize a batch of train data, combining with construct_batch_part
def get_batch_part_train(dict_dataset, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# only train placeholders are used
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		}

	return dict_feeder


# orgnize a batch of validation data, combining with construct_batch_part
def get_batch_part_validation(dict_dataset, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# only validation placeholders are used
	dict_feeder = {
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		}

	return dict_feeder


# common LSTM
def _network(data, labels, tfv_train_phase = None):
	name = 'network_normal_LSTM_MNIST'

	with tf.variable_scope(name, reuse = tf.AUTO_REUSE):
		l_layers = []
		l_layers.append(data)
		l_layers.append(LSTMs.lstm_layer(l_layers[-1], 1296, tfv_train_phase, 0.9, name_scope = 'lstm_1'))
		l_layers.append(LSTMs.lstm_layer(l_layers[-1], 256, tfv_train_phase, 0.9, name_scope = 'lstm_2'))
		l_layers.append(Layers.fc(tf.squeeze(l_layers[-1][:,:,-1]), NUM_CLASSES, act_last = False, name_scope = 'fc_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# KCP-LSTM
def _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, data, labels, tfv_train_phase = None):
	name = 'network_KCP_LSTM_MNIST'

	with tf.variable_scope(name, reuse = tf.AUTO_REUSE):
		l_layers = []
		l_layers.append(data)
		l_layers.append(LSTMs.lstm_layer(l_layers[-1], 1296, tfv_train_phase, 0.9, name_scope = 'lstm_1'))
		l_layers.append(LSTMs.kcp_lstm_layer(l_layers[-1], 256, [2,3,2,3,2,3,2,3], [2,2,2,2,2,2,2,2], 
			flag_KT_rank, [flag_rankA for i in range(flag_KT_rank)] + [flag_rankB for i in range(flag_KT_rank)], None, tfv_train_phase, 0.9, name_scope = 'lstm_2'))
		l_layers.append(Layers.fc(tf.squeeze(l_layers[-1][:,:,-1]), NUM_CLASSES, act_last = False, name_scope = 'fc_out'))
	
	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# get the network training and validation output respectively, including loss and evalation
def get_network_output(flag_model, flag_KT_rank, flag_rankA, flag_rankB, t_data, t_labels, v_data, v_labels, tfv_train_phase):
	if flag_model == 0:
		loss_train, eval_train = _network(t_data, t_labels, tfv_train_phase)
		loss_validation, eval_validation = _network(v_data, v_labels, tfv_train_phase)
	elif flag_model == 1:
		loss_train, eval_train = _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, t_data, t_labels, tfv_train_phase)
		loss_validation, eval_validation = _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, v_data, v_labels, tfv_train_phase)

	return loss_train, eval_train, loss_validation, eval_validation
