import os
import torch

class DatasetConfig:
    """Base dataset configuration class"""
    
    # Dataset basic information
    DATASET_NAME = "base_dataset"
    DATASET_TYPE = "segmentation"  # segmentation, classification, detection, etc.
    
    # Dataset paths
    DATA_DIR = "/home/fqyang/workspace/segment"
    TRAIN_IMAGES_DIR = ""
    TRAIN_MASKS_DIR = ""
    VAL_IMAGES_DIR = ""
    VAL_MASKS_DIR = ""
    TEST_IMAGES_DIR = ""
    TEST_MASKS_DIR = ""
    
    # Dataset properties
    NUM_CLASSES = 1
    IMAGE_SIZE = (256, 256)
    IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.png']
    MASK_EXTENSIONS = ['.png', '.jpg']
    
    # Data loader configuration
    BATCH_SIZE = 4
    NUM_WORKERS = 4
    PIN_MEMORY = True
    
    # Data augmentation configuration
    USE_AUGMENTATION = True
    AUGMENTATION_PROBABILITY = 0.5
    
    # Normalization parameters (ImageNet defaults)
    NORMALIZE_MEAN = (0.485, 0.456, 0.406)
    NORMALIZE_STD = (0.229, 0.224, 0.225)
    
    @classmethod
    def validate_paths(cls):
        """Validate dataset paths"""
        required_paths = [
            cls.TRAIN_IMAGES_DIR,
            cls.TRAIN_MASKS_DIR,
            cls.VAL_IMAGES_DIR,
            cls.VAL_MASKS_DIR
        ]
        
        for path in required_paths:
            if not os.path.exists(path):
                raise FileNotFoundError(f"Dataset path does not exist: {path}")
        
        return True
    
    @classmethod
    def get_dataset_info(cls):
        """Get dataset information"""
        return {
            "name": cls.DATASET_NAME,
            "type": cls.DATASET_TYPE,
            "num_classes": cls.NUM_CLASSES,
            "image_size": cls.IMAGE_SIZE,
            "paths": {
                "train_images": cls.TRAIN_IMAGES_DIR,
                "train_masks": cls.TRAIN_MASKS_DIR,
                "val_images": cls.VAL_IMAGES_DIR,
                "val_masks": cls.VAL_MASKS_DIR,
                "test_images": cls.TEST_IMAGES_DIR,
                "test_masks": cls.TEST_MASKS_DIR
            }
        }


class ADE20KConfig(DatasetConfig):
    """ADE20K dataset configuration"""
    
    DATASET_NAME = "ADE20K"
    NUM_CLASSES = 150
    
    # Dataset paths
    DATA_DIR = "data/ade20k"
    TRAIN_IMAGES_DIR = os.path.join(DATA_DIR, "images/training")
    TRAIN_MASKS_DIR = os.path.join(DATA_DIR, "annotations/training")
    VAL_IMAGES_DIR = os.path.join(DATA_DIR, "images/validation")
    VAL_MASKS_DIR = os.path.join(DATA_DIR, "annotations/validation")
    TEST_IMAGES_DIR = os.path.join(DATA_DIR, "images/validation")
    TEST_MASKS_DIR = os.path.join(DATA_DIR, "annotations/validation")
    
    # Dataset specific properties
    IMAGE_EXTENSIONS = ['.jpg']
    MASK_EXTENSIONS = ['.png']


class CityscapesConfig(DatasetConfig):
    """Cityscapes dataset configuration"""
    
    DATASET_NAME = "Cityscapes"
    NUM_CLASSES = 19
    
    # Dataset paths
    DATA_DIR = "data/cityscapes"
    TRAIN_IMAGES_DIR = os.path.join(DATA_DIR, "leftImg8bit/train")
    TRAIN_MASKS_DIR = os.path.join(DATA_DIR, "gtFine/train")
    VAL_IMAGES_DIR = os.path.join(DATA_DIR, "leftImg8bit/val")
    VAL_MASKS_DIR = os.path.join(DATA_DIR, "gtFine/val")
    
    # Dataset specific properties
    IMAGE_EXTENSIONS = ['.png']
    MASK_EXTENSIONS = ['.png']


class PascalVOCConfig(DatasetConfig):
    """Pascal VOC dataset configuration"""
    
    DATASET_NAME = "PascalVOC"
    NUM_CLASSES = 21
    
    # Dataset paths
    DATA_DIR = "data/pascal_voc"
    TRAIN_IMAGES_DIR = os.path.join(DATA_DIR, "JPEGImages")
    TRAIN_MASKS_DIR = os.path.join(DATA_DIR, "SegmentationClass")
    VAL_IMAGES_DIR = os.path.join(DATA_DIR, "JPEGImages")
    VAL_MASKS_DIR = os.path.join(DATA_DIR, "SegmentationClass")
    
    # Dataset specific properties
    IMAGE_EXTENSIONS = ['.jpg']
    MASK_EXTENSIONS = ['.png']


class CamVidConfig(DatasetConfig):
    """CamVid dataset configuration - Small real dataset for validation (701 images)"""
    
    DATASET_NAME = "CamVid"
    NUM_CLASSES = 32  # CamVid has 32 semantic classes
    
    # Dataset paths
    DATA_DIR = "data/camvid"
    TRAIN_IMAGES_DIR = os.path.join(DATA_DIR, "train")
    TRAIN_MASKS_DIR = os.path.join(DATA_DIR, "trainannot")
    VAL_IMAGES_DIR = os.path.join(DATA_DIR, "val")
    VAL_MASKS_DIR = os.path.join(DATA_DIR, "valannot")
    TEST_IMAGES_DIR = os.path.join(DATA_DIR, "test")
    TEST_MASKS_DIR = os.path.join(DATA_DIR, "testannot")
    
    # Dataset specific properties
    IMAGE_EXTENSIONS = ['.png']
    MASK_EXTENSIONS = ['.png']
    
    # CamVid specific settings
    IMAGE_SIZE = (384, 480)  # Original CamVid image size
    BATCH_SIZE = 2  # Smaller batch size for smaller dataset


# Dataset registry for easy access
DATASET_REGISTRY = {
    "ade20k": ADE20KConfig,
    "cityscapes": CityscapesConfig,
    "pascal_voc": PascalVOCConfig,
    "camvid": CamVidConfig,
}


def get_dataset_config(dataset_name):
    """Get dataset configuration by name"""
    if dataset_name not in DATASET_REGISTRY:
        available_datasets = list(DATASET_REGISTRY.keys())
        raise ValueError(f"Dataset '{dataset_name}' not found. Available datasets: {available_datasets}")
    
    return DATASET_REGISTRY[dataset_name]


def list_available_datasets():
    """List all available datasets"""
    return list(DATASET_REGISTRY.keys())