# This script is to prepare UCF50 dataset into 160x120 sequenced frames.

import os
import cv2
import h5py
import numpy as np


dataset_path = 'D:/Datasets/UCF50/video/'
stack_path = 'D:/Datasets/UCF50/stack/'


# dictionary of class name and label
g_dict_labels = {}

# batch size of dealing with for each time
g_batch_size = 100

TARGET_WIDTH = 160
TARGET_HEIGHT = 120


# construct dictionary of class name and label
def construct_dict():
	if os.path.exists(dataset_path) == False:
		return False

	lst_contents = os.listdir(dataset_path)
	i = 0
	for k in range(0, len(lst_contents)):
		if os.path.isdir(dataset_path + lst_contents[k]) == True:
			g_dict_labels[lst_contents[k]] = i
			i = i + 1

	return True


# traverse dataset to calculate their donwsampled frames and stack them
def Calc_frame_and_stack(n_nums, n_count, l_data = None, l_label = None):
	if l_data is None or l_label is None:
		return None
	if len(l_data) != len(l_label):
		return None

	for i in range(0,len(l_data)):
		lst_frames_rgb = []
		str_class_name = list(g_dict_labels.keys())[list(g_dict_labels.values()).index(l_label[i])]

		# jump to next if the h5 fill is already existed
		rgb_stack_str = stack_path + str_class_name + '/' + l_data[i].split('.')[0] + '.h5'
		if os.path.exists(rgb_stack_str):
			n_count = n_count + 1
			print('%d/%d. Stacking process for %s has done.'%(n_count, n_nums, l_data[i]))
			continue

		data_str = dataset_path + str_class_name + '/' + l_data[i]
		cv_cap = cv2.VideoCapture(data_str)
		n_frame_num = int(cv_cap.get(7))
		if n_frame_num < 2:
			n_count = n_count + 1
			print('%d/%d. Stacking process for %s has done.'%(n_count, n_nums, l_data[i]))
			continue

		# -- extracting frames begin --
		lst_frames_rgb = []
		lst_frames_nums = []
		for k in range(n_frame_num):
			cv_cap.set(cv2.CAP_PROP_POS_FRAMES, k)
			b_flag, frame = cv_cap.read()
			if frame is not None:
				rs_frame = cv2.resize(frame, (TARGET_WIDTH,TARGET_HEIGHT), 0, 0, interpolation = cv2.INTER_CUBIC)
				lst_frames_rgb.append(rs_frame)
				lst_frames_nums.append(k)

		if len(lst_frames_nums) == 0:
			n_count = n_count + 1
			print('%d/%d. Stacking process for %s has done.'%(n_count, n_nums, l_data[i]))
			continue

		arr_frames_rgb = np.array(lst_frames_rgb, dtype = np.uint8)
		# -- extracting frames end --

		data_name = l_data[i].split('.')[0]
		str_rgb_path = stack_path + str_class_name + '/'
		if os.path.exists(str_rgb_path) == False:
			os.mkdir(str_rgb_path)
		with h5py.File(str_rgb_path + data_name + '.h5', 'w') as file:
			file.create_dataset('stack', data = arr_frames_rgb)
		n_count = n_count + 1
		print('%d/%d. Stacking process for %s has done.'%(n_count, n_nums, l_data[i]))

	return n_count


# read file list, begin at 0
def read_file_list():
	lst_data = []
	lst_label = []

	for word in g_dict_labels:
		class_path = dataset_path + word + '/'

		if os.path.exists(class_path) == False:
			return None

		lst_contents = os.listdir(class_path)
		for k in range(0, len(lst_contents)):
			if os.path.isfile(class_path + lst_contents[k]) == True:
				lst_data.append(lst_contents[k])
				lst_label.append(g_dict_labels[word])

	return (lst_data, lst_label)


def main():
	if os.path.exists(stack_path) == False:
		os.mkdir(stack_path)

	if construct_dict() == False:
		print('There is no classInd file.')
		return None

	tuple_data_label = read_file_list()
	if tuple_data_label is None:
		print('There is no data or label file.')
		return None

	nlen = len(tuple_data_label[0]) // g_batch_size
	n_nums = len(tuple_data_label[0])
	n_count = 0
	for i in range(nlen):
		begin = i * g_batch_size
		end = begin + g_batch_size
		n_count = Calc_frame_and_stack(n_nums, n_count, (tuple_data_label[0])[begin:end], (tuple_data_label[1])[begin:end])

	if len(tuple_data_label[0]) % g_batch_size != 0:
		begin = nlen * g_batch_size
		n_count = Calc_frame_and_stack(n_nums, n_count, (tuple_data_label[0])[begin:], (tuple_data_label[1])[begin:])

	print("h5 prepare data file is saved.")


if __name__ == '__main__':
    main()
