# datasets/dataset.py
import os
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torchvision.transforms as transforms
from config import args

class SuperResolutionDataset(Dataset):
    """超分辨率数据集类"""
    def __init__(self, data_dir, scale_factor=4, patch_size=128, train=True, augment=True, cache_in_memory=False):
        """
        初始化数据集
        
        Args:
            data_dir:           数据目录
            scale_factor:       超分辨率缩放因子
            patch_size:         训练patch大小
            train:              是否为训练模式
            augment:            是否使用数据增强
            cache_in_memory:    是否缓存到内存
        """
        self.data_dir = data_dir
        self.scale_factor = scale_factor
        self.patch_size = patch_size
        self.train = train
        self.augment = augment
        self.cache_in_memory = cache_in_memory
        
        # 获取图像文件列表
        self.image_files = self._get_image_files(data_dir)
        
        if len(self.image_files) == 0:
            raise ValueError(f"在目录 {data_dir} 中没有找到图像文件")
        
        # 内存缓存
        self.cache = {} if cache_in_memory else None
        
        # 图像转换
        self.to_tensor = transforms.ToTensor()  # 转为[0, 1]范围的张量
        self.normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])    # 缩放到[-1, 1]
        
        print(f"初始化数据集：{len(self.image_files)} 张图像，模式：{'训练' if train else '验证'}")
        
    def _get_image_files(self, data_dir):
        """获取目录中的所有图像文件"""
        supported_formats = ('.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.tif')
        image_files = []
        
        for root, dirs, files in os.walk(data_dir):
            for file in files:
                if file.lower().endswith(supported_formats):
                    image_files.append(os.path.join(root, file))
                    
        return sorted(image_files)
    
    def _random_crop(self, img, hr_patch_size):
        """随机裁剪"""
        w, h = img.size
        hr_patch = hr_patch_size
        lr_patch = hr_patch // self.scale_factor
        
        # 确保有足够的空间进行裁剪
        if w < hr_patch or h < hr_patch:
            # 如果图像太小，进行缩放
            min_dim = min(w, h)
            scale = hr_patch / min_dim
            new_w, new_h = int(w * scale), int(h * scale)
            img = img.resize((new_w, new_h), Image.BICUBIC)
            w, h = new_w, new_h
        
        # 随机裁剪位置
        x = random.randint(0, w - hr_patch)
        y = random.randint(0, h - hr_patch)
        
        hr_img = img.crop((x, y, x + hr_patch, y + hr_patch))
        lr_img = hr_img.resize((lr_patch, lr_patch), Image.BICUBIC)
        
        return lr_img, hr_img
    
    def _center_crop(self, img, hr_patch_size):
        """中心裁剪（用于验证）"""
        w, h = img.size
        hr_patch = hr_patch_size
        lr_patch = hr_patch // self.scale_factor
        
        # 中心裁剪
        x = (w - hr_patch) // 2
        y = (h - hr_patch) // 2
        
        hr_img = img.crop((x, y, x + hr_patch, y + hr_patch))
        lr_img = hr_img.resize((lr_patch, lr_patch), Image.BICUBIC)
        
        return lr_img, hr_img
    
    def _augment_images(self, lr_img, hr_img):
        """数据增强"""
        if not self.augment:
            return lr_img, hr_img
        
        # 随机水平翻转
        if random.random() > 0.5:
            lr_img = lr_img.transpose(Image.FLIP_LEFT_RIGHT)
            hr_img = hr_img.transpose(Image.FLIP_LEFT_RIGHT)
            
        # 随机垂直翻转
        if random.random() > 0.5:
            lr_img = lr_img.transpose(Image.FLIP_TOP_BOTTOM)
            hr_img = hr_img.transpose(Image.FLIP_TOP_BOTTOM)
            
        # 随机旋转（0，90，180，270度）
        if random.random() > 0.5:
            angle = random.choice([0, 90, 180, 270])
            lr_img = lr_img.rotate(angle)
            hr_img = hr_img.rotate(angle)
            
        return lr_img, hr_img
    
    def __len__(self):
        return len(self.image_files)
    
    def __getitem__(self, idx):
        if self.cache is not None and idx in self.cache:
            return self.cache[idx]
        
        # 加载图像
        img_path = self.image_files[idx]
        try:
            img = Image.open(img_path).convert('RGB')
        except Exception as e:
            print(f"无法加载图像 {img_path}：{e}")
            # 返回一个随机图像作为替代
            return self._get_random_fallback()
        
        # 裁剪
        if self.train:
            lr_img, hr_img = self._random_crop(img, self.patch_size)
            lr_img, hr_img = self._augment_images(lr_img, hr_img)
        else:
            lr_img, hr_img = self._center_crop(img, self.patch_size)
            
        # 转换为张量并归一化
        lr_tensor = self.normalize(self.to_tensor(lr_img))
        hr_tensor = self.normalize(self.to_tensor(hr_img))
        
        result = (lr_tensor, hr_tensor)
        
        if self.cache is not None:
            self.cache[idx] = result
            
        return result
    
    def _get_random_fallback(self):
        """生成随机图像作为回退"""
        lr_tensor = torch.rand(3, self.patch_size // self.scale_factor, self.patch_size // self.scale_factor) * 2 - 1
        hr_tensor = torch.rand(3, self.patch_size, self.patch_size) * 2 - 1
        return lr_tensor, hr_tensor

class CombinedDataset(Dataset):
    """组合多个数据集的包装类"""
    def __init__(self, dataset_paths, scale_factor=4, patch_size=128, train=True, 
                 augment=True, cache_in_memory=False):
        self.datasets = []
        for path in dataset_paths:
            # 路径检查
            if os.path.exists(path):
                dataset = SuperResolutionDataset(
                    path, 
                    scale_factor=scale_factor,
                    patch_size=patch_size,
                    train=train,
                    augment=augment,
                    cache_in_memory=cache_in_memory
                )
                self.datasets.append(dataset)
            else:
                print(f"警告: 数据集路径不存在: {path}")
                
        
        if not self.datasets:
            raise ValueError("没有有效的数据集路径")
        self.combined = ConcatDataset(self.datasets)
        
    def __len__(self):
        return len(self.combined)
    
    def __getitem__(self, idx):
        return self.combined[idx]
    
# 优化的数据加载器
def create_optimized_dataloaders(batch_size, num_workers=8, pin_memory=True):
    # 组合多个大型数据集
    dataset_paths = [
        os.path.join(args.dataset_path, "DIV2K"),
        os.path.join(args.dataset_path, "Flickr2K")
    ]
    
    train_dataset = CombinedDataset(
        dataset_paths,
        scale_factor=args.scale_factor,
        patch_size=args.patch_size,
        train=True
    )
    
    val_dataset = SuperResolutionDataset(
        os.path.join(args.dataset_path, "DIV2K"),
        train=False,
        scale_factor=args.scale_factor,
        patch_size=args.patch_size
    )
    
    # 使用预加载和多进程加速
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=2,  # 预加载下一批数据
        persistent_workers=True  # 保持工作进程存活
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_memory
    )
    
    return train_loader, val_loader 