import multiprocessing
from torch.utils.data import DataLoader

from .simple_dataset import SimpleDataset
from lib.utils import dual_print

simple_image_segmentation_datasets = ["ISIC", "BUSI", "COMMON"]


def _get_optimal_num_workers():
    """自动计算最优worker数量"""
    cpu_count = multiprocessing.cpu_count()
    optimal_workers = min(cpu_count * 2, 16)
    return optimal_workers


def _optimize_dataloader_config(opt):
    """优化DataLoader配置"""
    # 自动优化worker数量
    num_workers = opt.get("num_workers", 8)
    if num_workers <= 0:
        opt["num_workers"] = _get_optimal_num_workers()
        dual_print(f"🔧 自动设置 num_workers = {opt['num_workers']}")
    
    # 设置优化参数
    prefetch_factor = opt.get("prefetch_factor", 4) if opt["num_workers"] > 0 else None
    persistent_workers = opt.get("persistent_workers", True) if opt["num_workers"] > 0 else False
    
    return prefetch_factor, persistent_workers


def get_data_loader(opt):
    """
    get optimized dataloader
    Args:
        opt: params dict
    Returns:
    """
    # 优化配置
    prefetch_factor, persistent_workers = _optimize_dataloader_config(opt)
    
    for dataset in simple_image_segmentation_datasets:
        if dataset in opt["dataset_name"]:
            train_set = SimpleDataset(opt, mode="train")
            valid_set = SimpleDataset(opt, mode="valid")

            # 优化的训练DataLoader
            train_loader = DataLoader(
                train_set, 
                batch_size=opt["batch_size"], 
                shuffle=True,
                num_workers=opt["num_workers"], 
                pin_memory=True, 
                drop_last=opt.get("drop_last", False),
                prefetch_factor=prefetch_factor,
                persistent_workers=persistent_workers
            )
            
            # 优化的验证DataLoader
            valid_loader = DataLoader(
                valid_set, 
                batch_size=1, 
                shuffle=False, 
                num_workers=min(opt["num_workers"], 4),
                pin_memory=True,
                prefetch_factor=2 if opt["num_workers"] > 0 else None,
                persistent_workers=False
            )

            opt["steps_per_epoch"] = len(train_loader)
            
            # 显示配置信息
            dual_print("⚡ DataLoader优化配置:")
            dual_print(f"   - 训练 num_workers: {opt['num_workers']}")
            dual_print(f"   - 验证 num_workers: {min(opt['num_workers'], 4)}")
            dual_print(f"   - prefetch_factor: {prefetch_factor}")
            dual_print(f"   - persistent_workers: {persistent_workers}")

            return train_loader, valid_loader

    raise RuntimeError(f"No {opt['dataset_name']} dataloader available")


def get_test_data_loader(opt):
    """
    get optimized test dataloader
    :param opt: params dict
    :return:
    """
    for dataset in simple_image_segmentation_datasets:
        if dataset in opt["dataset_name"]:
            test_set = SimpleDataset(opt, mode="test", auto_append=not opt.get("forbid_auto_append", False),
                                  need_metrics=not opt.get("forbid_metrics", False))
            
            # 测试时用较少worker
            test_workers = min(opt.get("num_workers", 4), 2)
            
            test_loader = DataLoader(
                test_set, 
                batch_size=1, 
                shuffle=False, 
                num_workers=test_workers,
                pin_memory=True,
                prefetch_factor=2 if test_workers > 0 else None
            )
            
            dual_print(f"📊 测试DataLoader: num_workers={test_workers}")
            return test_loader

    raise RuntimeError(f"No {opt['dataset_name']} dataloader available")
