import os
import random
import h5py

import tensorflow as tf
import numpy as np

import Layers
import LSTMs


dataset_path = 'D:/Datasets/UCF11/stack_rnn/'
NUM_CLASSES = 11
NORM_FRAMES = 6
SAMPLE_WIDTH = 160
SAMPLE_HEIGHT = 120
N_VAL_CLIPS = 10


# augmentation
def _augmentation_online(input_frames, b_train):
	input_data = tf.reshape(input_frames, [SAMPLE_HEIGHT, SAMPLE_WIDTH, 3 * NORM_FRAMES])
	input_data = tf.image.convert_image_dtype(input_data, dtype = tf.float32)

	if b_train is True:
		input_data = tf.image.random_flip_left_right(input_data)
		
	input_data = tf.subtract(input_data, 0.5)
	input_data = tf.multiply(input_data, 2.0)
	return tf.reshape(input_data, [SAMPLE_HEIGHT, SAMPLE_WIDTH, 3, NORM_FRAMES])


# get dataset
def get_dataset(str_restore_ckpt = None, flag_sample_seed = None):
	lst_train =[]
	lst_validation = []
	lst_train_label = []
	lst_validation_label = []

	if str_restore_ckpt is None:
		dict_labels = {}

		lst_classes = os.listdir(dataset_path)
		i = 0
		for k in range(0, len(lst_classes)):
			if os.path.isdir(dataset_path + lst_classes[k]) == True:
				dict_labels[lst_classes[k]] = i
				i = i + 1

		# extract 29 samples from each class as validation set, make train:val nearly 4:1
		for word in dict_labels:
			class_path = dataset_path + word +'/'
			lst_contents = os.listdir(class_path)
			if flag_sample_seed is not None:
				random.seed(flag_sample_seed)
			random.shuffle(lst_contents)
			n_val = 0
			for k in range(0, len(lst_contents)):
				if os.path.isfile(class_path + lst_contents[k]) is True:
					if n_val >= 29:
						lst_train.append(word + '/' + lst_contents[k])
						lst_train_label.append(dict_labels[word])
					else:
						lst_validation.append(word + '/' + lst_contents[k])
						lst_validation_label.append(dict_labels[word])
						n_val += 1

		# complement, 1280:320
		if flag_sample_seed is not None:
			np.random.seed(flag_sample_seed)
		t = np.random.randint(0, len(lst_train_label))
		lst_train.append(lst_train[t])
		lst_train_label.append(lst_train_label[t])
		if flag_sample_seed is not None:
			np.random.seed(flag_sample_seed)
		v = np.random.randint(0, len(lst_validation_label))
		lst_validation.append(lst_validation[v])
		lst_validation_label.append(lst_validation_label[v])

		# shuffle training set
		if flag_sample_seed is not None:
			np.random.seed(flag_sample_seed)
		n_seed = np.random.randint(0, 100)
		random.seed(n_seed)
		random.shuffle(lst_train)
		random.seed(n_seed)
		random.shuffle(lst_train_label)

		# shuffle validation set
		if flag_sample_seed is not None:
			np.random.seed(flag_sample_seed)
		n_seed = np.random.randint(0, 100)
		random.seed(n_seed)
		random.shuffle(lst_validation)
		random.seed(n_seed)
		random.shuffle(lst_validation_label)

		with h5py.File(dataset_path + './labels.h5', 'w') as file:
			arr_train_label = np.array(lst_train_label, dtype = np.int8)
			arr_validation_label = np.array(lst_validation_label, dtype = np.int8)
			file.create_dataset('train_label_list', data = arr_train_label)
			file.create_dataset('test_label_list', data = arr_validation_label)

		file_train_list = open(dataset_path + 'train_data_list.txt','w')
		i = 0
		for data in lst_train:
			file_train_list.write(data)
			file_train_list.write('\n')
			i = i + 1
		file_train_list.close()

		file_test_list = open(dataset_path + 'test_data_list.txt','w')
		i = 0
		for data in lst_validation:
			file_test_list.write(data)
			file_test_list.write('\n')
			i = i + 1
		file_test_list.close()
	else:
		# get data split lists of training
		f_train_list = open(dataset_path + 'train_data_list.txt','r')
		for data in f_train_list:
			if data.strip() == '':
				break
			else:
				lst_train.append(data.strip('\n'))
		f_train_list.close()

		# get data split lists of validation
		f_test_list = open(dataset_path + 'test_data_list.txt','r')
		for data in f_test_list:
			if data.strip() == '':
				break
			else:
				lst_validation.append(data.strip('\n'))
		f_test_list.close()

		# get labels
		with h5py.File(dataset_path + 'labels.h5','r') as file:
			arr_train_label = file.get('train_label_list').value
			arr_validation_label = file.get('test_label_list').value

	dict_dataset = {}
	dict_dataset['train'] = {
		'train_labels' : arr_train_label,
		'train_data_list' : lst_train
		}
	dict_dataset['validation'] = {
		'validation_labels' : arr_validation_label,
		'validation_data_list' : lst_validation
		}

	return dict_dataset


# orgnize train or val data each time, which is flag_batch_size
def construct_batch_part(flag_batch_size):
	shape_data = [SAMPLE_HEIGHT, SAMPLE_WIDTH, 3, NORM_FRAMES]

	# train placeholder
	tfph_train_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + shape_data, name = 'ph_train_data')

	# val placeholder
	tfph_validation_data = tf.placeholder(dtype = tf.uint8, shape = [flag_batch_size] + shape_data, name = 'ph_validation_data')

	# labels placeholders
	tfph_train_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_train_labels')
	tfph_validation_labels = tf.placeholder(dtype = tf.int32, shape = [flag_batch_size], name = 'ph_validation_labels')

	# transfer to float32 and augmentation
	for k in range(flag_batch_size):
		tfph_train_frames = tfph_train_data[k]
		tfph_validation_frames = tfph_validation_data[k]

		# online augmentation
		aug_train_frames = _augmentation_online(tfph_train_frames, True)
		aug_validation_frames = _augmentation_online(tfph_validation_frames, False)

		# recover to batch data
		if k == 0:
			batch_train_data = tf.expand_dims(aug_train_frames, 0)
			batch_validation_data = tf.expand_dims(aug_validation_frames, 0)
		else:
			batch_train_data = tf.concat([batch_train_data, tf.expand_dims(aug_train_frames, 0)], axis = 0)
			batch_validation_data = tf.concat([batch_validation_data, tf.expand_dims(aug_validation_frames, 0)], axis = 0)

	result = {}
	result['batches'] = {
		'batch_train_data' : tf.reshape(batch_train_data, [-1, SAMPLE_HEIGHT * SAMPLE_WIDTH * 3, NORM_FRAMES]),
		'batch_train_labels' : tfph_train_labels,
		'batch_validation_data' : tf.reshape(batch_validation_data, [-1, SAMPLE_HEIGHT * SAMPLE_WIDTH * 3, NORM_FRAMES]),
		'batch_validation_labels' : tfph_validation_labels
		}
	result['input_placeholders'] = {
		'tfph_train_data' : tfph_train_data,
		'tfph_train_labels' : tfph_train_labels,
		'tfph_validation_data' : tfph_validation_data,
		'tfph_validation_labels' : tfph_validation_labels
		}
	return result


# orgnize a batch of train data, combining with construct_batch_part
def get_batch_part_train(dict_dataset, dict_placeholders, n_index_head, flag_batch_size):
	n_size = dict_dataset['train']['train_labels'].shape[0]

	n_index_end = n_index_head + flag_batch_size
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - flag_batch_size

	# read the data will be fed from ['train']['train_data_list']
	lst_volume = []
	for data_str in dict_dataset['train']['train_data_list'][n_index_head:n_index_end]:
		if os.path.exists(dataset_path + data_str) is True:
			with h5py.File(dataset_path + data_str,'r') as file:
				arr_ori_frames = file.get('stack').value

			# randomly extract NORM_FRAMES frames
			n_frame_num = arr_ori_frames.shape[0]
			n_anchors = np.random.randint(0, n_frame_num, size = NORM_FRAMES)
			n_anchors.sort()
			lst_frames = []
			for f in range(NORM_FRAMES):
				lst_frames.append(arr_ori_frames[n_anchors[f]])
			arr_frames = np.array(lst_frames, dtype = np.uint8)

			# random crop
			#y_random = np.random.randint(0, arr_frames.shape[1] - SAMPLE_HEIGHT)
			#x_random = np.random.randint(0, arr_frames.shape[2] - SAMPLE_WIDTH)
			#arr_cropped_frames = arr_frames[:, y_random:(y_random + SAMPLE_HEIGHT), x_random:(x_random + SAMPLE_WIDTH), :]
			lst_volume.append(arr_frames)
		else:
			return None

	arr_volume = np.transpose(np.array(lst_volume), [0,2,3,4,1])

	# only train placeholders are used
	dict_feeder = {
		dict_placeholders['tfph_train_data'] : arr_volume,
		dict_placeholders['tfph_train_labels'] : dict_dataset['train']['train_labels'][n_index_head:n_index_end],
	}

	return dict_feeder


# orgnize a batch of validation data, combining with construct_batch_part
def get_batch_part_validation(dict_dataset, dict_placeholders, n_index_head, n_true_samples):
	n_size = dict_dataset['validation']['validation_labels'].shape[0]

	n_index_end = n_index_head + n_true_samples
	if n_index_end > n_size:
		n_index_end = n_size - 1
		n_index_head = n_index_end - n_true_samples

	# read the data will be fed from ['validation']['validation_data_list']
	lst_volume = []
	for data_str in dict_dataset['validation']['validation_data_list'][n_index_head:n_index_end]:
		if os.path.exists(dataset_path + data_str) is True:
			with h5py.File(dataset_path + data_str,'r') as file:
				arr_ori_frames = file.get('stack').value

			# fixedly extract N_VAL_CLIPS clips each of which includes NORM_FRAMES frames following NNI strategy
			n_frame_num = arr_ori_frames.shape[0]
			l_anchors = []
			if N_VAL_CLIPS * NORM_FRAMES - n_frame_num > 0:
				n_overlap = int(np.floor((N_VAL_CLIPS * NORM_FRAMES - n_frame_num) / (N_VAL_CLIPS - 1)))
				for i in range(N_VAL_CLIPS):
					l_anchors.append(i * (NORM_FRAMES - n_overlap))
					if l_anchors[-1] + NORM_FRAMES > n_frame_num:
						diff = l_anchors[-1] + NORM_FRAMES - n_frame_num
						l_anchors[-1] = l_anchors[-1] - diff
						if l_anchors[-1] >= n_frame_num:
							l_anchors[-1] = n_frame_num - 1
						if l_anchors[-1] < 0:
							l_anchors[-1] = 0
			else:
				n_overlap = int(np.floor((n_frame_num - N_VAL_CLIPS * NORM_FRAMES) / (N_VAL_CLIPS - 1)))
				for i in range(N_VAL_CLIPS):
					l_anchors.append(i * (NORM_FRAMES + n_overlap))
					if l_anchors[-1] + NORM_FRAMES > n_frame_num:
						diff = l_anchors[-1] + NORM_FRAMES - n_frame_num
						l_anchors[-1] = l_anchors[-1] - diff
						if l_anchors[-1] >= n_frame_num:
							l_anchors[-1] = n_frame_num - 1
						if l_anchors[-1] < 0:
							l_anchors[-1] = 0
			l_anchors.append(n_frame_num - 1)

			# NNI in each CLIP
			#n_x = int(np.floor((arr_ori_frames.shape[2] - SAMPLE_WIDTH) / 2))
			#n_y = int(np.floor((arr_ori_frames.shape[1] - SAMPLE_HEIGHT) / 2))
			for i in range(N_VAL_CLIPS):
				lst_frames = []
				n_anchor = l_anchors[i]
				n_range = l_anchors[i + 1] - l_anchors[i]
				n_step = int(np.floor(n_range / NORM_FRAMES))
				if n_step == 0:
					n_step = 1
				for f in range(NORM_FRAMES):
					#lst_frames.append((arr_ori_frames[n_anchor])[n_y:(n_y + SAMPLE_HEIGHT), n_x:(n_x + SAMPLE_WIDTH), :])
					lst_frames.append(arr_ori_frames[n_anchor])
					n_anchor = n_anchor + n_step
					if n_anchor >= n_frame_num:
						n_anchor = n_frame_num - 1
				arr_frames = np.array(lst_frames)
				lst_volume.append(arr_frames)
		else:
			return None

	arr_volume = np.transpose(np.array(lst_volume), [0,2,3,4,1])
	l_val_labels = []
	arr_labels = dict_dataset['validation']['validation_labels'][n_index_head:n_index_end]
	for i in range(arr_labels.shape[0]):
		for j in range(N_VAL_CLIPS):
			l_val_labels.append(arr_labels[i])

	# only validation placeholders are used
	dict_feeder = {
		dict_placeholders['tfph_validation_data'] : arr_volume,
		dict_placeholders['tfph_validation_labels'] : np.array(l_val_labels),
	}

	return dict_feeder


# common LSTM
def _network(data, labels, tfv_train_phase = None):
	name = 'network_normal_LSTM_UCF11'

	with tf.variable_scope(name, reuse = tf.AUTO_REUSE):
		l_layers = []
		l_layers.append(data)
		l_layers.append(LSTMs.lstm_layer(l_layers[-1], 256, tfv_train_phase, 0.75, name_scope = 'lstm_1'))
		l_layers.append(Layers.fc(tf.squeeze(l_layers[-1][:,:,-1]), NUM_CLASSES, act_last = False, name_scope = 'fc_out'))

	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# KCP-LSTM
def _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, data, labels, tfv_train_phase = None):
	name = 'network_KCP_LSTM_UCF11'

	with tf.variable_scope(name, reuse = tf.AUTO_REUSE):
		l_layers = []
		l_layers.append(data)
		l_layers.append(LSTMs.kcp_lstm_layer(l_layers[-1], 256, [8,5,6,6,5,8], [2,4,2,2,4,2], 
			flag_KT_rank, [flag_rankA for i in range(flag_KT_rank)] + [flag_rankB for i in range(flag_KT_rank)], None, tfv_train_phase, 0.75, name_scope = 'lstm_1'))
		l_layers.append(Layers.fc(tf.squeeze(l_layers[-1][:,:,-1]), NUM_CLASSES, act_last = False, name_scope = 'fc_out'))
	
	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = labels, logits = l_layers[-1], name = 'softmax_xentropy' + name)
	losses = tf.reduce_mean(xentropy, name = 'losses' + name)
	total_loss = tf.add_n([losses], name = 'total_loss' + name)
	loss_averages = tf.train.ExponentialMovingAverage(0.99, name = 'avg_loss' + name)
	tfop_loss_averages = loss_averages.apply([losses] + [total_loss])
	with tf.control_dependencies([tfop_loss_averages]):
		total_loss = tf.identity(total_loss)
	correct_flags = tf.nn.in_top_k(l_layers[-1], labels, 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return total_loss, evaluation


# get the network training and validation output respectively, including loss and evalation
def get_network_output(flag_model, flag_KT_rank, flag_rankA, flag_rankB, t_data, t_labels, v_data, v_labels, tfv_train_phase):
	if flag_model == 0:
		loss_train, eval_train = _network(t_data, t_labels, tfv_train_phase)
		loss_validation, eval_validation = _network(v_data, v_labels, tfv_train_phase)
	elif flag_model == 1:
		loss_train, eval_train = _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, t_data, t_labels, tfv_train_phase)
		loss_validation, eval_validation = _network_kcp(flag_KT_rank, flag_rankA, flag_rankB, v_data, v_labels, tfv_train_phase)

	return loss_train, eval_train, loss_validation, eval_validation
