import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from albumentations.pytorch import ToTensorV2


class SegmentationDataset(Dataset):
    """Semantic segmentation dataset based on camvid_segmentation_multiclass.ipynb"""
    
    def __init__(self, images_dir, masks_dir, classes=None, augmentation=None, 
                 image_size=(256, 256), use_dummy_data=False, num_classes=2):
        self.images_dir = images_dir
        self.masks_dir = masks_dir
        self.augmentation = augmentation
        self.image_size = image_size
        self.use_dummy_data = use_dummy_data
        self.num_classes = num_classes
        
        # Check if we should use dummy data
        if use_dummy_data or not os.path.exists(images_dir):
            self.use_dummy_data = True
            self.num_samples = 10  # Create 10 dummy samples
            print(f"Using dummy dataset with {self.num_samples} samples")
        else:
            self.use_dummy_data = False
            self.ids = os.listdir(images_dir)
            self.num_samples = len(self.ids)
            print(f"Dataset loaded: {images_dir} with {self.num_samples} samples")
    
    def __len__(self):
        return self.num_samples
    
    def __getitem__(self, idx):
        if self.use_dummy_data:
            # Generate dummy data - ensure mask values are within valid range
            image = np.random.randint(0, 255, (*self.image_size, 3), dtype=np.uint8)
            mask = np.random.randint(0, self.num_classes, self.image_size, dtype=np.uint8)
        else:
            # Load actual data
            image_id = self.ids[idx]
            image_path = os.path.join(self.images_dir, image_id)
            
            # Handle different file extensions for masks
            mask_filename = image_id
            for ext in ['.jpg', '.jpeg', '.png']:
                if image_id.endswith(ext):
                    mask_filename = image_id.replace(ext, '.png')
                    break
            
            mask_path = os.path.join(self.masks_dir, mask_filename)
            
            # Load image
            image = cv2.imread(image_path)
            if image is None:
                raise ValueError(f"Failed to load image from {image_path}")
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            
            # Load mask
            mask = cv2.imread(mask_path, 0)  # Load as grayscale
            
            # Check if mask was loaded successfully
            if mask is None:
                # If mask not found, create a dummy mask
                print(f"Warning: Mask not found at {mask_path}, creating dummy mask")
                mask = np.zeros(image.shape[:2], dtype=np.uint8)
            
            # Ensure mask values are within valid range
            if self.num_classes is not None:
                mask = np.clip(mask, 0, self.num_classes - 1)
        
        # Apply augmentations (including resizing)
        if self.augmentation:
            # Add resizing to the augmentation pipeline if image_size is specified
            if self.image_size and not any(isinstance(t, A.Resize) for t in self.augmentation.transforms):
                # Create a new augmentation pipeline with resizing
                resize_transform = A.Resize(height=self.image_size[0], width=self.image_size[1])
                
                # Insert resize at the beginning of the transforms
                if hasattr(self.augmentation, 'transforms'):
                    self.augmentation.transforms.insert(0, resize_transform)
                else:
                    # If it's a Compose object, add resize to the beginning
                    self.augmentation = A.Compose([resize_transform] + self.augmentation.transforms)
            
            sample = self.augmentation(image=image, mask=mask)
            image, mask = sample["image"], sample["mask"]
        else:
            # If no augmentation, resize and convert to tensor manually
            if self.image_size:
                image = cv2.resize(image, self.image_size)
                mask = cv2.resize(mask, self.image_size, interpolation=cv2.INTER_NEAREST)
            
            # Convert to tensor
            image = torch.tensor(image.transpose((2, 0, 1)).astype(np.float32) / 255.0)
            mask = torch.tensor(mask.astype(np.int64))
        
        return image, mask


class ADE20KDataset(SegmentationDataset):
    """ADE20K Dataset Class"""
    
    def __init__(self, images_dir, masks_dir, transform=None, image_size=(512, 512)):
        super().__init__(
            images_dir=images_dir,
            masks_dir=masks_dir,
            transform=transform,
            image_size=image_size,
            image_extensions=['.jpg'],
            mask_extensions=['.png'],
            num_classes=150,
            ignore_index=255
        )

class SegmentationDataLoader:
    """Data loader for segmentation datasets"""
    
    def __init__(self, config):
        self.config = config
        self.dataset_config = config.DATASET_CONFIG
    
    def get_data_loaders(self):
        """Get training and validation data loaders"""
        
        # Training augmentations
        train_transform = A.Compose([
            A.Resize(height=self.config.IMAGE_SIZE[0], width=self.config.IMAGE_SIZE[1]),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            # Remove RandomRotate90 as it causes dimension mismatch in batches
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
            A.RandomBrightnessContrast(p=0.3),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(),
        ])
        
        # Validation augmentations (only normalization and resize)
        val_transform = A.Compose([
            A.Resize(height=self.config.IMAGE_SIZE[0], width=self.config.IMAGE_SIZE[1]),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(),
        ])
        
        # Check if dataset directories exist
        use_dummy_data = False
        if not os.path.exists(self.dataset_config.TRAIN_IMAGES_DIR):
            print(f"Warning: Training images directory not found: {self.dataset_config.TRAIN_IMAGES_DIR}")
            print("Using dummy data for training")
            use_dummy_data = True
        
        # Create datasets
        train_dataset = SegmentationDataset(
            images_dir=self.dataset_config.TRAIN_IMAGES_DIR,
            masks_dir=self.dataset_config.TRAIN_MASKS_DIR,
            augmentation=train_transform,
            image_size=self.config.IMAGE_SIZE,
            use_dummy_data=use_dummy_data,
            num_classes=self.config.NUM_CLASSES
        )
        
        val_use_dummy_data = False
        if not os.path.exists(self.dataset_config.VAL_IMAGES_DIR):
            print(f"Warning: Validation images directory not found: {self.dataset_config.VAL_IMAGES_DIR}")
            print("Using dummy data for validation")
            val_use_dummy_data = True
        
        val_dataset = SegmentationDataset(
            images_dir=self.dataset_config.VAL_IMAGES_DIR,
            masks_dir=self.dataset_config.VAL_MASKS_DIR,
            augmentation=val_transform,
            image_size=self.config.IMAGE_SIZE,
            use_dummy_data=val_use_dummy_data,
            num_classes=self.config.NUM_CLASSES
        )
        
        # Create data loaders
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.BATCH_SIZE,
            shuffle=True,
            num_workers=min(4, os.cpu_count()),
            pin_memory=True
        )
        
        val_loader = DataLoader(
            val_dataset,
            batch_size=self.config.BATCH_SIZE,
            shuffle=False,
            num_workers=min(4, os.cpu_count()),
            pin_memory=True
        )
        
        return train_loader, val_loader
    
    def get_test_loader(self):
        """Get test data loader"""
        test_transform = A.Compose([
            A.Resize(height=self.config.IMAGE_SIZE[0], width=self.config.IMAGE_SIZE[1]),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(),
        ])
        
        test_use_dummy_data = False
        if not os.path.exists(self.dataset_config.TEST_IMAGES_DIR):
            print(f"Warning: Test images directory not found: {self.dataset_config.TEST_IMAGES_DIR}")
            print("Using dummy data for testing")
            test_use_dummy_data = True
        
        test_dataset = SegmentationDataset(
            images_dir=self.dataset_config.TEST_IMAGES_DIR,
            masks_dir=self.dataset_config.TEST_MASKS_DIR,
            augmentation=test_transform,
            image_size=self.config.IMAGE_SIZE,
            use_dummy_data=test_use_dummy_data,
            num_classes=self.config.NUM_CLASSES
        )
        
        test_loader = DataLoader(
            test_dataset,
            batch_size=self.config.BATCH_SIZE,
            shuffle=False,
            num_workers=min(4, os.cpu_count()),
            pin_memory=True
        )
        
        return test_loader


class ADE20KDataLoader(SegmentationDataLoader):
    """ADE20K Data Loader (for backward compatibility)"""
    pass