import os
import pickle

import cv2


class TestingDataSet(object):
    def __init__(self, img_dir, csv_path, batch_size):
        # il_path: image_label_file path
        # self.index_in_epoch = 0
        # self.epochs_completed = 0
        self.batch_size = batch_size
        self.image_dir = img_dir
        print("Reading testing data list from " + csv_path)
        self.semantic_size = 4800
        csv = pickle.load(open(csv_path, 'rb'), encoding='bytes')
        self.clip_sentence_pairs = []
        for l in csv:
            clip_name = l[0]
            sent_vecs = l[1]
            for sent_vec in sent_vecs:
                self.clip_sentence_pairs.append((clip_name, sent_vec))
        print(str(len(self.clip_sentence_pairs)) + " pairs are readed")
        movie_names_set = set()
        self.movie_clip_names = {}
        for k in range(len(self.clip_sentence_pairs)):
            clip_name = self.clip_sentence_pairs[k][0]
            movie_name = clip_name.split(b'_')[0]
            if not movie_name in movie_names_set:
                movie_names_set.add(movie_name)
                self.movie_clip_names[movie_name] = []
            self.movie_clip_names[movie_name].append(k)
        self.movie_names = list(movie_names_set)

        self.clip_num_per_movie_max = 0
        for movie_name in self.movie_clip_names:
            if len(self.movie_clip_names[movie_name]) > self.clip_num_per_movie_max: self.clip_num_per_movie_max = len(
                self.movie_clip_names[movie_name])
        print("Max number of clips in a movie is " + str(self.clip_num_per_movie_max))

        self.sliding_clip_path = img_dir
        sliding_clips_tmp = os.listdir(self.sliding_clip_path)
        self.sliding_clip_names = []
        for clip_name in sliding_clips_tmp:
            if clip_name.split(".")[2] == "npy":
                movie_name = clip_name.split("_")[0].encode('utf-8')
                if movie_name in self.movie_clip_names:
                    self.sliding_clip_names.append(clip_name.split(".")[0] + "." + clip_name.split(".")[1])
        self.num_samples = len(self.clip_sentence_pairs)
        print("sliding clips number: " + str(len(self.sliding_clip_names)))
        assert self.batch_size <= self.num_samples

test_csv_path = "./exp_data/TACoS/test_clip-sentvec.pkl"
test_feature_dir = "./TACOS/Interval128_256_overlap0.8_c3d_fc6/"
# test_dataset = TestingDataSet(test_feature_dir,test_csv_path,batch_size=16)
# order = [b's27-d50.avi', b's27-d54.avi', b's27-d70.avi', b's28-d25.avi', b's28-d27.avi', b's28-d39.avi', b's28-d46.avi', b's28-d51.avi', b's29-d31.avi', b's29-d39.avi', b's29-d42.avi', b's29-d50.avi', b's29-d52.avi', b's30-d26.avi', b's30-d29.avi', b's30-d40.avi', b's30-d41.avi', b's30-d43.avi', b's30-d52.avi', b's30-d53.avi', b's31-d25.avi', b's31-d28.avi', b's31-d31.avi', b's32-d27.avi', b's32-d52.avi']
# stes = b's26-d26.avi'
# print(stes in order)
import numpy as np

def numpy_array_to_video(numpy_array,video_out_path):
    numpy_array = np.load(numpy_array)
    video_height = numpy_array.shape[1]
    video_width = numpy_array.shape[2]

    out_video_size = (video_width,video_height)
    output_video_fourcc = int(cv2.VideoWriter_fourcc(*'mp4v'))
    video_write_capture = cv2.VideoWriter(video_out_path, output_video_fourcc, 30, out_video_size)

    for frame in numpy_array:
        video_write_capture.write(frame)

    video_write_capture.release()

if __name__ == '__main__':
    video_out_path = 'video/out.map4'
    video_np_array = 'TACOS/Interval128_256_overlap0.8_c3d_fc6/s13-d21.avi_1_129.npy'
    numpy_array_to_video(video_np_array,video_out_path)
    # numpy_array = np.load(video_np_array)
    # print(numpy_array)
