import os
import cv2
from torchvision import transforms
from PIL import Image
import numpy as np
from tqdm import tqdm
import os
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms

def one_hot(x:int):
    the_list = np.zeros(5, dtype=np.float32)
    if x == 5:
        return the_list
    the_list[x] = 1
    return the_list

class CustomDataset:
    def __init__(self, path, preprocessing, split_text=None):
        self.classes = {
            0 : '抽烟',
            1 : '赤膊',
            2 : '老鼠',
            3 : '猫',
            4 : '狗',
            5 : '正常'
        }
        self.class_to_label = {v:k for k, v in self.classes.items()}
        self.img_paths  =[]
        
        if split_text:
            # load the split_text to image_paths
            print(f"loading image path from {split_text}")
            with open(split_text, 'r') as f:
                self.img_paths = f.read().splitlines()
        else:
            for dirpath, dirs, files in os.walk(path):
                self.img_paths.extend([os.path.join(dirpath, file) for file in files if file.endswith('jpg')])
                
        print(f'found {len(self.img_paths)} images')    
        
        self.preprocessing = preprocessing
        self.imgs = []
        print("loading imgs into memory")
        for img_path in tqdm(self.img_paths):
            img = Image.open(img_path).convert('RGB')
            self.imgs.append(img)
       
        
    def __getitem__(self, idx):
        # img_path = self.imgs[idx]
        # img = Image.open(img_path)
        img_path = self.img_paths[idx]
        img = self.imgs[idx]
        img = self.preprocessing(img)
        # get the first parent dirname of img_path
        label = os.path.basename(os.path.dirname(img_path))
        label = self.class_to_label[label]
        
        label = one_hot(label)
        
        return img, label
    
    def __len__(self):
        return len(self.imgs)
        

class TestDataset(CustomDataset):
         
        
    def __getitem__(self, idx):
        # img_path = self.imgs[idx]
        # img = Image.open(img_path)
        img = self.imgs[idx]
        img = self.preprocessing(img)
      
        
        return img
        
        
        
        
        


class VideoDataset(Dataset):
    def __init__(self, data_folder, num_frames=4, transform=None):
        self.data_folder = data_folder
        self.num_frames = num_frames
        self.transform = transform

        # Get a list of video files in the data folder
        self.video_files = [f for f in os.listdir(data_folder)]

    def __len__(self):
        return len(self.video_files)

    def __getitem__(self, idx):
        video_file = os.path.join(self.data_folder, self.video_files[idx])

        # Open the video file
        cap = cv2.VideoCapture(video_file)

        # Get the total number of frames in the video
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        # Sample 8 frames uniformly from the video
        indices = torch.linspace(1, total_frames - 2, steps=self.num_frames).int().tolist()
        frames = []

        for i in indices:
            # Set the frame position
            cap.set(cv2.CAP_PROP_POS_FRAMES, i)

            # Read the frame
            ret, frame = cap.read()

            if ret:
                frames.append(frame)
        
        # Close the video file
        cap.release()

        # Apply transformations if provided
        if self.transform:
            frames = [self.transform(frame) for frame in frames]

        # Convert frames to a tensor
        frames = torch.stack(frames)

        return frames
    
if __name__ == '__main__':

    # Example usage:
    data_folder = '/root/code/tianchi/data/test/video'
    transform =  transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((224, 224)),
            # transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            transforms.Normalize( mean = [0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225])
        ])


    dataset = VideoDataset(data_folder, transform=transform)
    print(dataset)
    # Access a sample
    sample = dataset[0]
    print(sample.shape)  # It should print torch.Size([8, height, width, channels])

    loader = DataLoader(dataset=dataset, batch_size=1)
    for x in loader:
        print(x.shape)
        # break