from torchvision import transforms
from torch.utils.data import Dataset, random_split
import os
from PIL import Image
import random

# 获取项目根目录的绝对路径
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

imgsz = 224
data_transforms = {
    "train": transforms.Compose([
        transforms.Resize((imgsz, imgsz)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomRotation(10),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        transforms.RandomErasing(p=0.2)
    ]),
    "test": transforms.Compose([
        transforms.Resize((imgsz, imgsz)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}


class TestDataset(Dataset):
    def __init__(self, test_dir, transform=None):
        self.test_dir = test_dir
        self.transform = transform
        self.image_paths = []
        for img_name in os.listdir(test_dir):
            img_path = os.path.join(test_dir, img_name)
            self.image_paths.append(img_path)

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)
        id = os.path.splitext(os.path.basename(img_path))[0]
        return image, id


class AIDataset(Dataset):
    def __init__(self, root_dir, transform=None):
        self.root_dir = os.path.abspath(root_dir)  # 转换为绝对路径
        self.transform = transform
        self.labels = {"fake": 1, "real": 0}
        self.image_paths = []
        self.labels_list = []
        
        # 检查数据集目录是否存在
        if not os.path.exists(self.root_dir):
            raise FileNotFoundError(f"数据集根目录不存在: {self.root_dir}")
        
        # 检查real和fake子目录
        for label in ["real", "fake"]:
            folder_path = os.path.join(self.root_dir, label)
            if not os.path.exists(folder_path):
                raise FileNotFoundError(f"数据集子目录不存在: {folder_path}")
            
            # 检查目录中是否有图片
            image_files = [f for f in os.listdir(folder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
            if not image_files:
                raise ValueError(f"目录中没有找到图片文件: {folder_path}")
            
            for img_name in image_files:
                img_path = os.path.join(folder_path, img_name)
                self.image_paths.append(img_path)
                self.labels_list.append(self.labels[label])
        
        if not self.image_paths:
            raise ValueError(f"没有找到任何图片文件在目录: {self.root_dir}")
        
        print(f"成功加载数据集: {len(self.image_paths)} 张图片")

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)
        label = self.labels_list[idx]
        return image, label

def get_train_val_datasets(root_dir, train_ratio=0.8, seed=42):
    """
    将数据集划分为训练集和验证集
    Args:
        root_dir: 数据集根目录
        train_ratio: 训练集比例
        seed: 随机种子
    Returns:
        train_dataset: 训练集
        val_dataset: 验证集
    """
    # 设置随机种子以确保可重复性
    random.seed(seed)
    
    # 转换为绝对路径
    root_dir = os.path.abspath(root_dir)
    print(f"使用数据集目录: {root_dir}")
    
    # 创建完整数据集
    full_dataset = AIDataset(root_dir=root_dir, transform=data_transforms["train"])
    
    # 计算训练集和验证集的大小
    total_size = len(full_dataset)
    train_size = int(train_ratio * total_size)
    val_size = total_size - train_size
    
    print(f"数据集大小: {total_size}")
    print(f"训练集大小: {train_size}")
    print(f"验证集大小: {val_size}")
    
    # 划分数据集
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])
    
    return train_dataset, val_dataset
