import os
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from PIL import Image
import yaml
import warnings
from dataset.augmentation.classification_preset_train import ClassificationPresetTrain
from dataset.augmentation.classification_preset_eval import ClassificationPresetEval

# 全局PIL配置 - 消除警告并提升性能
Image.MAX_IMAGE_PIXELS = None  # 禁用DecompressionBombWarning
warnings.filterwarnings("ignore", category=UserWarning, module="PIL")
warnings.filterwarnings("ignore", category=UserWarning, module="PIL.TiffImagePlugin")
warnings.filterwarnings("ignore", category=UserWarning, module="PIL.Image")

# Custom Dataset class
default_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

class CustomImageDataset(Dataset):
    def __init__(self, img_dir, transform=default_transform):
        self.img_dir = img_dir
        self.transform = transform
        self.img_labels = self._load_labels()

    def _load_labels(self):
        img_labels = []
        # 只处理图片文件，避免处理其他文件
        image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp')
        
        for label in os.listdir(self.img_dir):
            label_dir = os.path.join(self.img_dir, label)
            if os.path.isdir(label_dir):
                for img_file in os.listdir(label_dir):
                    if img_file.lower().endswith(image_extensions):
                        img_path = os.path.join(label_dir, img_file)
                        img_labels.append((img_path, int(label)))
        return img_labels

    def __len__(self):
        return len(self.img_labels)

    def __getitem__(self, idx):
        img_path, label = self.img_labels[idx]
        try:
            # 优化图片加载 - 快速处理各种图像格式
            with Image.open(img_path) as img:
                # 确保所有图像都转换为RGB格式
                if img.mode == 'RGB':
                    # RGB图像直接使用
                    image = img.copy()
                elif img.mode == 'L':
                    # 灰度图转换为RGB
                    image = img.convert('RGB')
                elif img.mode == 'P':
                    # 调色板图像转换为RGBA再转RGB
                    image = img.convert('RGBA').convert('RGB')
                elif img.mode == 'RGBA':
                    # RGBA图像转换为RGB
                    image = img.convert('RGB')
                else:
                    # 其他模式转换为RGB
                    image = img.convert('RGB')
                        
        except (OSError, IOError, Exception):
            # 静默处理损坏图片，快速跳过
            return self.__getitem__((idx + 1) % len(self))

        # 确保图像是RGB格式
        if image.mode != 'RGB':
            image = image.convert('RGB')
        
        if self.transform:
            image = self.transform(image)
        return image, label

# Function to create DataLoader
def create_dataloader(args, batch_size=None, augmentation_config_path=None):
    data_dir = args.data_dir
    batch_size = batch_size or args.batch_size
    augmentation_config_path = augmentation_config_path or args.augmentation_config

    # 检查是否在分布式环境中
    import torch.distributed as dist
    is_distributed = dist.is_initialized()
    rank = int(os.environ.get('RANK', 0)) if is_distributed else 0

    # 优化YAML加载，添加错误处理
    try:
        with open(augmentation_config_path, 'r', encoding='utf-8') as file:
            augmentation_config = yaml.safe_load(file)
    except Exception as e:
        if rank == 0:
            print(f"加载增强配置失败: {e}，使用默认配置")
        augmentation_config = {
            'train': 'ClassificationPresetTrain(crop_size=224, auto_augment_policy="ra")',
            'val': 'ClassificationPresetEval(crop_size=224)'
        }

    # 选择增强策略，添加错误处理
    augmentation = {}
    if 'train' in augmentation_config:
        try:
            augmentation['train'] = eval(augmentation_config['train'])
        except Exception as e:
            if rank == 0:
                print(f"训练增强配置错误: {e}，使用默认配置")
            augmentation['train'] = ClassificationPresetTrain(crop_size=224)
    
    if 'val' in augmentation_config:
        try:
            augmentation['val'] = eval(augmentation_config['val'])
        except Exception as e:
            if rank == 0:
                print(f"验证增强配置错误: {e}，使用默认配置")
            augmentation['val'] = ClassificationPresetEval(crop_size=224)

    # 创建数据集
    if rank == 0:
        print("创建训练数据集...")
    train_dataset = CustomImageDataset(
        img_dir=os.path.join(data_dir, 'train'),
        transform=augmentation['train']
    )
    
    if rank == 0:
        print("创建验证数据集...")
    val_dataset = CustomImageDataset(
        img_dir=os.path.join(data_dir, 'val'),
        transform=augmentation['val']
    )

    # 创建分布式采样器
    train_sampler = None
    val_sampler = None
    if is_distributed:
        train_sampler = DistributedSampler(train_dataset, shuffle=True)
        val_sampler = DistributedSampler(val_dataset, shuffle=False)

    # 优化DataLoader参数 - 保守设置避免资源冲突
    import multiprocessing as mp
    cpu_count = mp.cpu_count()
    
    # 不设置OpenBLAS线程数，使用默认值
    
    # 简单有效的worker设置
    train_workers = min(8, cpu_count)  # 简单设置
    val_workers = min(4, cpu_count // 2)  # 验证时使用更少worker
    
    print(f"🔧 CPU核心数: {cpu_count}")
    print(f"🔧 训练workers: {train_workers}, 验证workers: {val_workers}")

    train_loader = DataLoader(
        train_dataset, 
        batch_size=batch_size, 
        shuffle=(train_sampler is None),  # 如果使用分布式采样器，则不shuffle
        sampler=train_sampler,  # 使用分布式采样器
        num_workers=train_workers, 
        pin_memory=True, 
        persistent_workers=True,
        prefetch_factor=2,  # 简单设置
        drop_last=True,
       
    )
    
    val_loader = DataLoader(
        val_dataset, 
        batch_size=batch_size, 
        shuffle=False,
        sampler=val_sampler,  # 使用分布式采样器
        num_workers=val_workers, 
        pin_memory=True, 
        persistent_workers=True,
        prefetch_factor=2,  # 简单设置
        drop_last=True,
        
    )

    if rank == 0:
        print(f"训练集: {len(train_dataset)} 张图片")
        print(f"验证集: {len(val_dataset)} 张图片")
        print(f"训练DataLoader: {train_workers} workers, batch_size={batch_size}")
        print(f"验证DataLoader: {val_workers} workers, batch_size={batch_size}")
        if is_distributed:
            print(f"分布式训练: {dist.get_world_size()} GPUs")

    return train_loader, val_loader

# Example usage
# if __name__ == "__main__":
#     train_loader, val_loader = create_dataloader()
