from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR


def create_optimizer(model, config):
    """创建优化器与调度器

    Args:
        model: 待优化模型
        config: 配置字典，示例：
            {
                'type': 'adam',
                'lr': 0.001,
                'weight_decay': 1e-5,
                'scheduler': {
                    'type': 'cosine',
                    'T_max': 10
                }
            }
    """
    # 参数分组（卷积层与全连接层不同配置）
    param_groups = [
        {'params': [p for n, p in model.named_parameters() if 'conv' in n],
         'weight_decay': config.get('weight_decay', 1e-5)},
        {'params': [p for n, p in model.named_parameters() if 'fc' in n],
         'lr': config['lr'] * 10}  # 全连接层更大学习率
    ]

    # 优化器选择
    if config['type'] == 'adam':
        optimizer = Adam(
            param_groups,
            lr=config['lr'],
            betas=config.get('betas', (0.9, 0.999)),
            eps=config.get('eps', 1e-8),
            weight_decay=config.get('weight_decay', 0)
        )
    else:
        optimizer = SGD(
            param_groups,
            lr=config['lr'],
            momentum=config.get('momentum', 0.9),
            weight_decay=config.get('weight_decay', 0)  # 显式添加 weight_decay
        )

    # 学习率调度器
    scheduler_config = config.get('scheduler', {})
    if scheduler_config.get('type') == 'cosine':
        scheduler = CosineAnnealingLR(
            optimizer,
            T_max=scheduler_config['T_max'],
            eta_min=scheduler_config.get('eta_min', 1e-6)
        )
    else:
        scheduler = StepLR(
            optimizer,
            step_size=scheduler_config.get('step_size', 5),
            gamma=scheduler_config.get('gamma', 0.1)
        )

    return optimizer, scheduler