import os
import csv
import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from PIL import Image
from torchvision.transforms import v2
import pickle

def compute_mean_std(loader):
    channels_sum, channels_sqrd_sum, num_batches = 0.0, 0.0, 0
    for data, _ in loader:
        channels_sum += torch.mean(data, dim=[0, 2, 3])
        channels_sqrd_sum += torch.mean(data**2, dim=[0, 2, 3])
        num_batches += 1

    mean = channels_sum / num_batches
    std = (channels_sqrd_sum / num_batches - mean**2)**0.5
    return mean.tolist(), std.tolist()

class TinyImageNetValDataset(Dataset):
    def __init__(self, root, wnid_to_idx, transform=None):
        super().__init__()
        self.root = root
        self.transform = transform
        self.wnid_to_idx = wnid_to_idx

        self.annotations_file = os.path.join(root, "val_annotations.txt")
        self.images_dir = os.path.join(root, "images")

        self.samples = []
        with open(self.annotations_file, "r") as f:
            reader = csv.reader(f, delimiter="\t")
            for row in reader:
                img_name = row[0]
                wnid = row[1]
                img_path = os.path.join(self.images_dir, img_name)

                label = self.wnid_to_idx[wnid]  
                self.samples.append((img_path, label))

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        img_path, label = self.samples[idx]
        with open(img_path, 'rb') as f:
            img = Image.open(f).convert('RGB')
        if self.transform:
            img = self.transform(img)
        return img, label

def prepare_data(
    data_set="cifar10",
    data_root="./data",
    train_batch_size=128,
    test_batch_size=100,
    num_workers=16,
    pin_memory=True
):
    print(f"==> Preparing {data_set} data..")
    size = 32 if "cifar" in data_set else 64
        
    cache_dir = os.path.join(data_root, "cache", data_set)
    os.makedirs(cache_dir, exist_ok=True)
    trainset_cache_path = os.path.join(cache_dir, f"trainset.pkl")
    testset_cache_path = os.path.join(cache_dir, f"testset.pkl")
    
    trainset = None
    testset = None
    
    # 尝试加载“不含标准化的数据集”以及"mean/std"
    cache_exists = (os.path.exists(trainset_cache_path) and
                    os.path.exists(testset_cache_path)
                    )

    if cache_exists:
        try:
            print("==> Found cache files, trying to load them...")
            # 读 trainset_no_norm
            with open(trainset_cache_path, "rb") as f:
                trainset = pickle.load(f)
            # 读 testset_no_norm
            with open(testset_cache_path, "rb") as f:
                testset = pickle.load(f)
            print("==> Loaded dataset and stats from cache.")
        except Exception as e:
            print(f"==> Failed to load cache due to {e}. Reprocessing data...")
            cache_exists = False  # 触发重新处理数据

    if not cache_exists:
        # 不带Normalize的 transform，用于计算 mean/std
        transform_train_no_norm = v2.Compose([
            v2.RandomResizedCrop(size),
            v2.RandomHorizontalFlip(),
            v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])
        ])

        if data_set == "cifar10":
            trainset_no_norm = torchvision.datasets.CIFAR10(
                root=data_root,
                train=True,
                download=True,
                transform=transform_train_no_norm
            )
        elif data_set == "cifar100":
            trainset_no_norm = torchvision.datasets.CIFAR100(
                root=data_root,
                train=True,
                download=True,
                transform=transform_train_no_norm
            )
        elif data_set == "tiny-imagenet":
            trainset_no_norm = torchvision.datasets.ImageFolder(
                root=os.path.join(data_root, "tiny-imagenet-200", "train"),
                transform=transform_train_no_norm
            )
        else:
            raise ValueError(f"Unsupported dataset: {data_set}")

        trainloader_no_norm = DataLoader(
            trainset_no_norm,
            batch_size=train_batch_size,
            shuffle=True,
            num_workers=num_workers,
            pin_memory=pin_memory
        )
        mean, std = compute_mean_std(trainloader_no_norm)
        print(f"Calculated mean: {mean}, std: {std}")

        # 定义带Normalize的 transform
        transform_train = v2.Compose([
            v2.RandomResizedCrop(size),
            v2.RandomHorizontalFlip(),
            v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)]),
            v2.Normalize(mean, std)
        ])
        transform_test = v2.Compose([
            v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)]),
            v2.Normalize(mean, std)
        ])

        # 再次加载“带Normalize”的训练集 / 测试(验证)集
        if data_set == "cifar10":
            trainset = torchvision.datasets.CIFAR10(
                root=data_root, train=True, download=False, transform=transform_train)
            testset = torchvision.datasets.CIFAR10(
                root=data_root, train=False, download=True, transform=transform_test)

        elif data_set == "cifar100":
            trainset = torchvision.datasets.CIFAR100(
                root=data_root, train=True, download=False, transform=transform_train)
            testset = torchvision.datasets.CIFAR100(
                root=data_root, train=False, download=True, transform=transform_test)

        else:  # tiny-imagenet-200
            train_dir = os.path.join(data_root, "tiny-imagenet-200", "train")
            val_dir   = os.path.join(data_root, "tiny-imagenet-200", "val")

            # 再次用 ImageFolder + 自定义 ValDataset，但这次使用带Normalize的 transform
            trainset = torchvision.datasets.ImageFolder(
                root=train_dir,
                transform=transform_train
            )
            wnid_to_idx = trainset.class_to_idx  # 必须跟 trainset 的类别映射一致

            testset  = TinyImageNetValDataset(
                root=val_dir,
                wnid_to_idx=wnid_to_idx,
                transform=transform_test
            )
        
        # 将处理好的数据集写入缓存
        try:
            with open(trainset_cache_path, "wb") as f:
                pickle.dump(trainset, f)
            with open(testset_cache_path, "wb") as f:
                pickle.dump(testset, f)
            print("==> Saved dataset and stats to cache.")
        except Exception as e:
            print(f"==> Failed to save cache due to {e}.")

    # 构建 DataLoader
    trainloader = DataLoader(
        trainset,
        batch_size=train_batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_memory,
    )
    testloader = DataLoader(
        testset,
        batch_size=test_batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_memory,
    )

    return trainloader, testloader