import os

import cv2
import torch
from torch.utils.data import Dataset


# 用于读取视频数据集
class VideoDataset(Dataset):
    def __init__(self, dirs):
        super(VideoDataset, self).__init__()
        self.base_dir = dirs

        self.video_data = []
        self.video_label = []
        videos = []
        for video in os.listdir(self.base_dir):
            videos.append(self.base_dir + '/' + video)

        '''
        读取数据，it will take a long time
        '''
        for i, video_line in enumerate(videos):
            frames, label = self.get_video_and_label(video_line)
            self.video_data.append(frames)
            self.video_label.append(label)

    def __getitem__(self, index):

        frames = self.video_data[index]
        label = self.video_label[index]

        frames = torch.Tensor(frames)
        frames = frames.permute(3, 0, 1, 2)
        for frame in frames:
            frame.div_(127.5).sub_(1)

        return frames, label

    def __len__(self):
        return len(self.video_label)

    def get_video_and_label(self, video):
        frames = []
        label = 0 if "fi" in video.split('/')[-1] else 1
        cap = cv2.VideoCapture(video)
        isOpened = cap.isOpened()
        # print(video + " is opened : " + str(isOpened))
        fps = cap.get(cv2.CAP_PROP_FPS)
        assert fps > 16, "video length should greater than 16"
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        i = 0
        while isOpened:
            if i == 16:
                # 截取前16帧的连续图片
                break
            else:
                i += 1
            flag, frame = cap.read()
            # frame=cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
            frame = frame[width // 2 - 56:width // 2 + 56, height // 2 - 56:height // 2 + 56]
            frames.append(frame)
            # cv2.imwrite("./weights/"+str(uuid1())+".jpg",frame)

        return frames, label

    def onehot_encode(self, label, n_class=2):
        diag = torch.eye(n_class)
        oh_vector = diag[label].view(n_class)
        return oh_vector


# 用于读取已经被制备成图片的数据集
class VideoDataset2(Dataset):
    def __init__(self, dirs):
        super(VideoDataset2, self).__init__()
        self.base_dir = dirs

        self.video_data = []
        self.video_label = []
        videos = []
        for video in os.listdir(self.base_dir):
            videos.append(self.base_dir + '/' + video)

        '''
        读取数据，it will take a long time
        '''
        for i, video_line in enumerate(videos):
            frames, label = self.get_video_and_label_from_dir(video_line)
            self.video_data.append(frames)
            self.video_label.append(label)

    def __getitem__(self, index):

        frames = self.video_data[index]
        label = self.video_label[index]

        frames = torch.Tensor(frames)
        frames = frames.permute(3, 0, 1, 2)
        for frame in frames:
            frame.div_(127.5).sub_(1)

        # return frames, self.onehot_encode(label)
        return frames,label

    def __len__(self):
        return len(self.video_label)

    def get_video_and_label_from_dir(self, video):
        frames = []
        label = 0 if "fi" in video.split('/')[-1] else 1
        for image in os.listdir(video):
            frame = cv2.imread(video + "/" + image)
            height, width, _ = frame.shape
            frame = frame[width // 2 - 56:width // 2 + 56, height // 2 - 56:height // 2 + 56]
            frames.append(frame)
            if len(frames) == 16:
                break

        return frames, label

    def onehot_encode(self, label, n_class=2):
        diag = torch.eye(n_class)
        oh_vector = diag[label].view(n_class)
        return oh_vector
