import os

from PIL import Image
from typing import Any, Dict
from torch.utils.data import Dataset

from ..ext_transforms import *


class Animals(Dataset):
    def __init__(self, split: str, cfg: Dict[str, Any]):
        if split == 'val':
            return
        
        assert split in ['train', 'val'], "split must be one of 'train', 'val'"
        
        self.root_dir = cfg.get('data_root')
        self.size = cfg.get('input_size', (512, 512))
        
        self.image_paths = self._gather_image_paths()
        
        
        self.transform = ExtCompose([
            ExtRandomCrop(size=self.size),
            ExtRandomHorizontalFlip(),
            ExtToTensor(),
            ExtNormalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225]),
        ])

    def _gather_image_paths(self):
        image_paths = []
        valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff')
        
        for root, _, files in os.walk(self.root_dir):
            for file in files:
                if file.lower().endswith(valid_extensions):
                    image_paths.append(os.path.join(root, file))
        
        return image_paths

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, index):
        img_path = self.image_paths[index]
        
        image = Image.open(img_path).convert('RGB')
        
        if self.transform:
            image = self.transform(image)
        
        return {
            'x': image
        }
