import os
import time
import random
import numpy as np
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets

def get_dataloaders(args):
    train_loader, val_loader, test_loader = None, None, None
    
    # 数据集定义部分保持不变
    if args.data == 'cifar10':
        normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
                                         std=[0.2471, 0.2435, 0.2616])
        train_set = datasets.CIFAR10(args.data_root, train=True,download=True,
                                     transform=transforms.Compose([
                                        transforms.RandomCrop(32, padding=4),
                                        transforms.RandomHorizontalFlip(),
                                        transforms.ToTensor(),
                                        normalize
                                     ]))
        val_set = datasets.CIFAR10(args.data_root, train=False,download=True,
                                   transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    normalize
                                   ]))
    elif args.data == 'cifar100':
        normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
                                         std=[0.2675, 0.2565, 0.2761])
        train_set = datasets.CIFAR100(args.data_root, train=True,download=True,
                                      transform=transforms.Compose([
                                        transforms.RandomCrop(32, padding=4),
                                        transforms.RandomHorizontalFlip(),
                                        transforms.ToTensor(),
                                        normalize
                                      ]))
        val_set = datasets.CIFAR100(args.data_root, train=False,download=True,
                                    transform=transforms.Compose([
                                        transforms.ToTensor(),
                                        normalize
                                    ]))
    else:
        # ImageNet 数据集定义
        traindir = os.path.join(args.data_root, 'train')
        valdir = os.path.join(args.data_root, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        train_set = datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize
        ]))
        val_set = datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize
        ]))
    
    if args.use_valid:
        index_path = os.path.join(args.save, 'index.pth')
        os.makedirs(args.save, exist_ok=True)
        
        # 生成或加载索引
        if not os.path.exists(index_path):
            print('Generating new indices...')
            dataset_size = len(train_set)
            indices = list(range(dataset_size))
            random.shuffle(indices)
            # 保存为Python列表
            torch.save(indices, index_path)
            train_set_index = indices
        else:
            print('Loading existing indices...')
            train_set_index = torch.load(index_path)
        
        # 确保是Python列表
        if isinstance(train_set_index, torch.Tensor):
            train_set_index = train_set_index.tolist()
        
        print(f"Dataset size: {len(train_set)}")
        print(f"Index length: {len(train_set_index)}")
        print(f"Index type: {type(train_set_index)}")
        print(f"First few indices: {train_set_index[:5]}")
        
        # 设置验证集大小
        if args.data.startswith('cifar') or args.data.startswith('MiNi'):
            num_sample_valid = 5000
        else:
            num_sample_valid = 50000
        
        # 验证索引范围（使用Python内置函数）
        max_index = max(train_set_index)
        min_index = min(train_set_index)
        
        if len(train_set_index) != len(train_set) or max_index >= len(train_set) or min_index < 0:
            print(f"Warning: Index validation failed. Regenerating index...")
            print(f"Index length: {len(train_set_index)}, Dataset size: {len(train_set)}")
            print(f"Index range: {min_index} - {max_index}")
            # 重新生成索引
            dataset_size = len(train_set)
            indices = list(range(dataset_size))
            random.shuffle(indices)
            train_set_index = indices
            torch.save(train_set_index, index_path)
        
        # 最终验证
        assert len(train_set_index) == len(train_set), f"Index length mismatch"
        assert max(train_set_index) < len(train_set), f"Max index out of bounds"
        assert min(train_set_index) >= 0, f"Min index out of bounds"
        
        # 分割数据集
        train_indices = train_set_index[:-num_sample_valid]
        val_indices = train_set_index[-num_sample_valid:]
        
        print(f"Training samples: {len(train_indices)}")
        print(f"Validation samples: {len(val_indices)}")
        print(f"Train indices range: {min(train_indices)} - {max(train_indices)}")
        print(f"Val indices range: {min(val_indices)} - {max(val_indices)}")
        
        # 创建数据加载器 - 使用单进程避免多进程问题
        if 'train' in args.splits:
            train_loader = torch.utils.data.DataLoader(
                train_set, batch_size=args.batch_size,
                sampler=torch.utils.data.sampler.SubsetRandomSampler(train_indices),
                num_workers=0,  # 单进程
                pin_memory=False,
                drop_last=True)
            print(f"Train loader created with {len(train_indices)} samples")
        
        if 'val' in args.splits:
            val_loader = torch.utils.data.DataLoader(
                train_set, batch_size=args.batch_size,
                sampler=torch.utils.data.sampler.SubsetRandomSampler(val_indices),
                num_workers=0,  # 单进程
                pin_memory=False,
                drop_last=True)
            print(f"Val loader created with {len(val_indices)} samples")
        
        if 'test' in args.splits:
            test_loader = torch.utils.data.DataLoader(
                val_set, batch_size=128, shuffle=False,
                num_workers=0,  # 单进程
                pin_memory=False)
            print(f"Test loader created with {len(val_set)} samples")
    else:
        # 不使用验证集的情况
        if 'train' in args.splits:
            train_loader = torch.utils.data.DataLoader(
                train_set, batch_size=args.batch_size, shuffle=True,
                num_workers=0, pin_memory=False)  # 单进程
            print(f"Train loader created with {len(train_set)} samples")
        
        if 'val' in args.splits or 'test' in args.splits:
            val_loader = torch.utils.data.DataLoader(
                val_set, batch_size=args.batch_size, shuffle=False,
                num_workers=0, pin_memory=False)  # 单进程
            test_loader = val_loader
            print(f"Val/Test loader created with {len(val_set)} samples")

    # 最终检查
    print(f"Final - Train batches: {len(train_loader) if train_loader else 0}, "
          f"Val batches: {len(val_loader) if val_loader else 0}, "
          f"Test batches: {len(test_loader) if test_loader else 0}")

    return train_loader, val_loader, test_loader