import torch
import math

class WarmupScheduler:
    """支持预热的调度器包装器"""
    def __init__(self, scheduler, warmup_epochs, warmup_start_lr=1e-7, warmup_type='linear'):
        self.scheduler = scheduler
        self.warmup_epochs = warmup_epochs
        self.warmup_start_lr = warmup_start_lr
        self.warmup_type = warmup_type
        self.base_lr = scheduler.optimizer.param_groups[0]['lr']
        self.current_epoch = 0
        
    def step(self, metric=None):
        if self.current_epoch < self.warmup_epochs:
            # 预热阶段
            if self.warmup_type == 'linear':
                lr = self.warmup_start_lr + (self.base_lr - self.warmup_start_lr) * self.current_epoch / self.warmup_epochs
            elif self.warmup_type == 'cosine':
                lr = self.warmup_start_lr + (self.base_lr - self.warmup_start_lr) * 0.5 * (1 - math.cos(math.pi * self.current_epoch / self.warmup_epochs))
            else:  # constant
                lr = self.warmup_start_lr
                
            for param_group in self.scheduler.optimizer.param_groups:
                param_group['lr'] = lr
        else:
            # 正常调度阶段
            if isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
                if metric is not None:
                    self.scheduler.step(metric)
            else:
                self.scheduler.step()
        
        self.current_epoch += 1
    
    def get_last_lr(self):
        return [group['lr'] for group in self.scheduler.optimizer.param_groups]
    
    def state_dict(self):
        return {
            'scheduler_state': self.scheduler.state_dict(),
            'warmup_epochs': self.warmup_epochs,
            'warmup_start_lr': self.warmup_start_lr,
            'warmup_type': self.warmup_type,
            'base_lr': self.base_lr,
            'current_epoch': self.current_epoch
        }
    
    def load_state_dict(self, state_dict):
        self.scheduler.load_state_dict(state_dict['scheduler_state'])
        self.warmup_epochs = state_dict['warmup_epochs']
        self.warmup_start_lr = state_dict['warmup_start_lr']
        self.warmup_type = state_dict['warmup_type']
        self.base_lr = state_dict['base_lr']
        self.current_epoch = state_dict['current_epoch']

# 获取学习率调度器的函数
# 这个函数根据提供的配置返回一个学习率调度器。
# 输入：优化器，包含调度器类型和超参数的配置
# 输出：调度器实例
def get_scheduler(optimizer, config):
    # 创建基础调度器
    if config.scheduler == 'CosineAnnealingLR':
        base_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.T_max, config.eta_min)
    elif config.scheduler == 'StepLR':
        base_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config.step_size, gamma=config.gamma)
    elif config.scheduler == 'ExponentialLR':
        base_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=config.gamma)
    elif config.scheduler == 'ReduceLROnPlateau':
        base_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=config.factor, patience=config.patience)
    else:
        # 默认使用StepLR
        base_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
    
    # 检查是否需要预热
    if hasattr(config, 'enable_warmup') and config.enable_warmup and hasattr(config, 'warmup_epochs') and config.warmup_epochs > 0:
        warmup_start_lr = getattr(config, 'warmup_start_lr', 1e-7)
        warmup_type = getattr(config, 'warmup_type', 'linear')
        return WarmupScheduler(base_scheduler, config.warmup_epochs, warmup_start_lr, warmup_type)
    else:
        return base_scheduler
