
from platform import version
from mindspore.ops.composite.array_ops import sequence_mask
import numpy as np
import os
import mindspore.dataset as ds
from PIL import Image
import cv2
import random
from mindvideo import datasets
from mindvideo.common.utils.class_factory import ClassFactory, ModuleType
import mindspore.dataset.vision.c_transforms as c_vision
import csv
import json
import copy

@ClassFactory.register(ModuleType.DATASET)
class Kinetic400Dataset:
    """Kinetic400 dataset, it samples frames from raw video.
    Args: 
        dataset_dir(str): Path to train/valid/test split of kinetic400.
        class_list(str): A txt file that containing classes and their index.
        split_list(str): Path to official train/valid/test data lists.  
        seq_len(int): Number of frames to be sampled in a video. 
    """
    def __init__(self, dataset_dir, class_list, split_list, seq_len=16):
        self.label2digit=self.load_label(class_list)
        self.name2label = self.load_split(split_list)
        self.filelist=self.get_filelist(dataset_dir)
            
        self.seq_len = seq_len
    def __getitem__(self, index):
        path=self.filelist[index]
        self.data=self.load_video(path)
        # print(path, self.data.shape)
        video_name=path.split('/')[-1]
        video_name=video_name[0:11]

        # print(video_name, type(self.data), self.data.shape)
        return self.data, self.label2digit[self.name2label[video_name]]

    def __len__(self):
        return len(self.filelist)
    
    def get_filelist(self, sample_path): 
        # filelist = []
        files = os.listdir(sample_path)
        filelist = [os.path.join(sample_path, f) for f in files]

        return filelist

    def load_video(self, video_path):
        data=[]
        idx=0
        cap = cv2.VideoCapture(video_path)

        video_len=cap.get(cv2.CAP_PROP_FRAME_COUNT)
        # print(video_len)
        clipnum=int(video_len/self.seq_len)
        # filename=video_path.split('/')[-1]
        # print(video_path, video_len)
        if clipnum==1 or clipnum==0:
            idx=0
        else:
            idx=random.randint(0,clipnum)
        cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
        for i in range (self.seq_len*idx, self.seq_len*idx+self.seq_len):
            # img = Image.open(path+'/'+filename+"_"'{}.jpg'.format(i))	#读取文件
            ret, img = cap.read()
            if ret:
                # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img = np.array(img, dtype = np.float64)	#转为float64类型的Numpy数组
                data.append(img)
            elif len(data):
                data.append(data[-1])
            else:
                print("Something is wrong with data:", video_path)
        return np.array(data)

    def load_label(self, label_path):     
        label={}
        with open(label_path, 'r') as f:
            labels = json.loads(f.read())
            for idx, label_name in enumerate(labels):
                label[label_name] = idx
        return label
    def load_split(self, split_path):     
        vname2label={}
        with open(split_path, 'r')as f:
            reader = csv.reader(f)
            for line in reader:
                vname2label[line[1]]=line[0]
        return vname2label

# from mindvideo.datasets.preprocess import ReOrder, VideoShortEdgeResize
# dataset_generator = Kinetic400Dataset(dataset_dir = "/home/kinetics-dataset/val",
#                                       class_list = "mindvideo/datasets/Kinetics/classes.json",
#                                       split_list = "mindvideo/datasets/Kinetics/kinetics-400_val.csv",
#                                       seq_len=64)
# transforms_list = [#VideoShortEdgeResize(256),
#                    ReOrder([3,0,1,2])]
# dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False)
# dataset = dataset.map(operations=transforms_list, input_columns="data")
# dataset = dataset.batch(batch_size=1, drop_remainder=True)

# try:
#     for idx, data in enumerate(dataset.create_dict_iterator()):
#         print(idx, "video shape: {}".format(data['data'].shape), ", Label: {}".format(data['label']))
# except:
#     print("?????")
