import tensorflow as tf
import numpy as np
import Layers


dataset_path = 'D:/Datasets/CIFAR10/cifar.npz'
NUM_CLASSES = 10

dict_shaperank = {
	'1.2' : [[4,4,4], [4,4,4], [9,16,16]],
	'2.1' : [[4,4,4], [4,4,8], [9,16,32]],
	'2.2' : [[8,4,4], [4,4,8], [9,32,32]],
	'3.1' : [[8,4,4], [4,8,8], [9,32,32]],
	'3.2' : [[4,4,4,4], [4,4,4,4], [9,16,16,16]],
	'3.3' : [[4,4,4,4], [4,4,4,4], [9,16,16,16]],
	'4.1' : [[4,4,4,4], [4,4,4,8], [9,16,16,32]],
	'4.2' : [[8,4,4,4], [4,4,4,8], [9,32,32,32]],
	'4.3' : [[8,4,4,4], [4,4,4,8], [9,32,32,32]],
	'5.1' : [[8,4,4,4], [4,4,4,8], [9,32,32,32]],
	'5.2' : [[8,4,4,4], [4,4,4,8], [9,32,32,32]],
	'5.3' : [[8,4,4,4], [4,4,4,8], [9,32,32,32]],
	}


# common VGG
def _network(data, labels, b_reuse, tfv_train_phase = None):
	name = 'network_normal'

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)
		l_layers.append(Layers.conv_2d(l_layers[-1], 64, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_1.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 64, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_1.2'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 128, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_2.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 128, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_2.2'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 256, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_3'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope='maxpool_4'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.1'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.2'))
		l_layers.append(Layers.conv_2d(l_layers[-1], 512, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.3'))
		l_layers.append(Layers.avgpool_2d(l_layers[-1], [2, 2], [2, 2], padding = 'VALID', is_gap = True, name_scope = 'avg_pool'))
		l_layers.append(Layers.fc(l_layers[-1], NUM_CLASSES, act_last = False, name_scope = 'fc_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# TT VGG
def _tt_network(data, labels, b_reuse, tfv_train_phase = None):
	name = 'network_tt'

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)
		l_layers.append(Layers.conv_2d(l_layers[-1], 64, [3, 3], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_1.1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 64, [3, 3], dict_shaperank['1.2'][0], dict_shaperank['1.2'][1], dict_shaperank['1.2'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_1.2'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 128, [3, 3], dict_shaperank['2.1'][0], dict_shaperank['2.1'][1], dict_shaperank['2.1'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_2.1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 128, [3, 3], dict_shaperank['2.2'][0], dict_shaperank['2.2'][1], dict_shaperank['2.2'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_2.2'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_2'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 256, [3, 3], dict_shaperank['3.1'][0], dict_shaperank['3.1'][1], dict_shaperank['3.1'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 256, [3, 3], dict_shaperank['3.2'][0], dict_shaperank['3.2'][1], dict_shaperank['3.2'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.2'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 256, [3, 3], dict_shaperank['3.3'][0], dict_shaperank['3.3'][1], dict_shaperank['3.3'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_3.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope = 'maxpool_3'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['4.1'][0], dict_shaperank['4.1'][1], dict_shaperank['4.1'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['4.2'][0], dict_shaperank['4.2'][1], dict_shaperank['4.2'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.2'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['4.3'][0], dict_shaperank['4.3'][1], dict_shaperank['4.3'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_4.3'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [2, 2], [2, 2], name_scope='maxpool_4'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['5.1'][0], dict_shaperank['5.1'][1], dict_shaperank['5.1'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.1'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['5.2'][0], dict_shaperank['5.2'][1], dict_shaperank['5.2'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.2'))
		l_layers.append(Layers.tt_conv_2d(l_layers[-1], 512, [3, 3], dict_shaperank['5.3'][0], dict_shaperank['5.3'][1], dict_shaperank['5.3'][2], tfv_train_phase = tfv_train_phase, name_scope = 'conv_3x3_5.3'))
		l_layers.append(Layers.avgpool_2d(l_layers[-1], [2, 2], [2, 2], padding = 'VALID', is_gap = True, name_scope = 'avg_pool'))
		l_layers.append(Layers.fc(l_layers[-1], NUM_CLASSES, act_last = False, name_scope = 'fc_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# get cifar dataset
def get_dataset():
	ds = np.load(dataset_path)
	train_images = np.reshape(ds['train_images'], [-1, 32, 32, 3])
	train_labels = ds['train_labels']	
	validation_images = np.reshape(ds['validation_images'], [-1, 32, 32, 3])
	validation_labels = ds['validation_labels']
	ds_mean = np.mean(train_images, axis = 0)
	ds_std = np.std(train_images, axis = 0)

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : train_labels,
		'train_data' : train_images
		}
	dict_dataset['validation'] = {
		'validation_labels' : validation_labels,
		'validation_data' : validation_images
		}

	dict_mean_std = {}
	dict_mean_std['mean'] = {
		'mean' : ds_mean
		}
	dict_mean_std['std'] = {
		'std' : ds_std
		}

	return dict_dataset, dict_mean_std


# construct dataset for each epoch
def construct_batch_part(dict_mean_std, flag_batch_size):
	shape_data = list(dict_mean_std['mean']['mean'].shape)

	# train placeholder
	tfph_train_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')

	# val placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# mean and std placeholder
	tfph_mean = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_mean')
	tfph_std = tf.placeholder(dtype = tf.float32, shape = shape_data, name = 'ph_std')

	# labels placeholder
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	# augmentation
	for k in range(flag_batch_size):
		# sample one data
		input_train_data = tfph_train_data[k]
		input_validation_data = tfph_validation_data[k]

		# to float
		input_train_data = tf.image.convert_image_dtype(input_train_data, dtype = tf.float32)		
		input_validation_data = tf.image.convert_image_dtype(input_validation_data, dtype = tf.float32)		

		# augmentation
		input_train_data = tf.pad(input_train_data, [[4, 4], [4, 4], [0, 0]])
		input_train_data = tf.random_crop(input_train_data, [32, 32, 3])
		input_train_data = tf.image.random_flip_left_right(input_train_data)	
		input_train_data = tf.image.random_brightness(input_train_data, max_delta=32. / 255.)
		input_train_data = tf.image.random_saturation(input_train_data, lower=0.5, upper=1.5)
		input_train_data = tf.image.random_hue(input_train_data, max_delta=0.2)
		input_train_data = tf.image.random_contrast(input_train_data, lower=0.5, upper=1.5)

		# to [-1,1]
		input_train_data = tf.subtract(input_train_data, 0.5)
		input_train_data = tf.multiply(input_train_data, 2.0)
		input_validation_data = tf.subtract(input_validation_data, 0.5)
		input_validation_data = tf.multiply(input_validation_data, 2.0)

		if k == 0:
			batch_train_data = tf.expand_dims(input_train_data, 0)
			batch_validation_data = tf.expand_dims(input_validation_data, 0)
		else:
			batch_train_data = tf.concat([batch_train_data, tf.expand_dims(input_train_data, 0)], axis = 0)
			batch_validation_data = tf.concat([batch_validation_data, tf.expand_dims(input_validation_data, 0)], axis = 0)

	result = {}
	result['batches'] = {
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_labels' : tfph_validation_labels,
		'batch_train_data' : batch_train_data,
		'batch_validation_data': batch_validation_data
		}
	result['input_placeholders'] = {
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_labels' : tfph_validation_labels,
		'tfph_train_data' : tfph_train_data,
		'tfph_validation_data': tfph_validation_data,
		'tfph_mean' : tfph_mean,
		'tfph_std' : tfph_std
		}

	return result


# get a flag_batch_size for train
def get_batch_part_train(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size
		n_index_head = n_index_end - flag_batch_size

	# only train placeholders
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : dict_dataset['train']['train_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# get a flag_batch_size for validation
def get_batch_part_validation(dict_dataset, dict_mean_std, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]
	assert n_size % flag_batch_size == 0, 'Batch size must be divided extractly.'

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size
		n_index_head = n_index_end - flag_batch_size

	# only validation placeholders
	dict_feeder = {
		dict_placeholders['tfph_validation_labels'] : dict_dataset['validation']['validation_labels'][n_index_head:n_index_end],
		dict_placeholders['tfph_validation_data'] : dict_dataset['validation']['validation_data'][n_index_head:n_index_end],
		dict_placeholders['tfph_mean'] : dict_mean_std['mean']['mean'],
		dict_placeholders['tfph_std'] : dict_mean_std['std']['std']
		}

	return dict_feeder


# get the network training and validation output respectively, including loss and evalation
def get_network_output(i, t_data, t_labels, v_data, v_labels, tfv_train_phase):
	b_reuse = i > 0

	loss_train, eval_train = _network(t_data, t_labels, b_reuse, tfv_train_phase)
	loss_validation, eval_validation = _network(v_data, v_labels, True, tfv_train_phase)

	return loss_train, eval_train, loss_validation, eval_validation


# get the tt network training and validation output respectively, including loss and evalation
def get_tt_network_output(i, t_data, t_labels, v_data, v_labels, tfv_train_phase):
	b_reuse = i > 0

	loss_train, eval_train = _tt_network(t_data, t_labels, b_reuse, tfv_train_phase)
	loss_validation, eval_validation = _tt_network(v_data, v_labels, True, tfv_train_phase)

	return loss_train, eval_train, loss_validation, eval_validation
