import torch

# 训练一个epoch
# 在文件开头添加AlphaScheduler类
class AlphaScheduler:
    def __init__(self, strategy='cyclic', initial_alpha=0.5, cycle_length=10, **kwargs):
        """
        初始化动态alpha调度器
        
        参数:
        - strategy: 动态策略，可选 'cyclic'(周期切换), 'linear'(线性过渡), 'adaptive'(自适应), 'stochastic'(随机)
        - initial_alpha: 初始alpha值
        - cycle_length: 周期长度(用于cyclic策略)
        - **kwargs: 其他策略特定参数
        """
        self.strategy = strategy
        self.current_alpha = initial_alpha
        self.cycle_length = cycle_length
        self.epoch = 0
        self.kwargs = kwargs
        
        # 自适应策略相关参数
        self.contrastive_loss_history = []
        self.feature_matching_loss_history = []
        self.history_window = kwargs.get('history_window', 3)
        
    def update(self, epoch, contrastive_loss=None, feature_matching_loss=None):
        """更新当前epoch的alpha值"""
        self.epoch = epoch
        
        if self.strategy == 'cyclic':
            # 周期切换策略：在不同周期交替专注于不同损失
            cycle_position = (epoch % (2 * self.cycle_length)) / self.cycle_length
            if cycle_position < 1:
                # 第一阶段：逐渐增加对contrastive_loss的权重
                self.current_alpha = 0.1 + 0.8 * cycle_position
            else:
                # 第二阶段：逐渐增加对feature_matching_loss的权重
                self.current_alpha = 0.9 - 0.8 * (cycle_position - 1)
                
        elif self.strategy == 'linear':
            # 线性过渡策略：从专注一个损失线性过渡到专注另一个损失
            total_epochs = self.kwargs.get('total_epochs', 100)
            phase_length = self.kwargs.get('phase_length', total_epochs // 4)
            
            if epoch < phase_length:
                self.current_alpha = 0.9  # 初始阶段：专注全局对比损失
            elif epoch < 2 * phase_length:
                self.current_alpha = 0.9 - 0.8 * (epoch - phase_length) / phase_length  # 过渡阶段1
            elif epoch < 3 * phase_length:
                self.current_alpha = 0.1  # 中间阶段：专注局部特征匹配损失
            else:
                self.current_alpha = 0.1 + 0.8 * (epoch - 3 * phase_length) / (total_epochs - 3 * phase_length)  # 过渡阶段2
                
        elif self.strategy == 'adaptive':
            # 自适应策略：根据损失历史动态调整
            if contrastive_loss is not None and feature_matching_loss is not None:
                self.contrastive_loss_history.append(contrastive_loss)
                self.feature_matching_loss_history.append(feature_matching_loss)
                
                # 只保留最近的history_window个值
                if len(self.contrastive_loss_history) > self.history_window:
                    self.contrastive_loss_history.pop(0)
                    self.feature_matching_loss_history.pop(0)
                
                if len(self.contrastive_loss_history) >= self.history_window:
                    # 计算最近的损失变化趋势
                    contrastive_trend = self.contrastive_loss_history[-1] / self.contrastive_loss_history[0]
                    feature_matching_trend = self.feature_matching_loss_history[-1] / self.feature_matching_loss_history[0]
                    
                    # 根据损失变化趋势调整alpha
                    # 如果一个损失下降得更快，则减小它的权重，反之亦然
                    if contrastive_trend < feature_matching_trend:
                        self.current_alpha = max(0.1, min(0.9, self.current_alpha * 0.95))
                    else:
                        self.current_alpha = max(0.1, min(0.9, self.current_alpha * 1.05))
                        
        elif self.strategy == 'stochastic':
            # 随机波动策略：在一定范围内随机调整alpha
            alpha_range = self.kwargs.get('alpha_range', 0.3)
            min_alpha = max(0.1, self.current_alpha - alpha_range)
            max_alpha = min(0.9, self.current_alpha + alpha_range)
            self.current_alpha = torch.rand(1).item() * (max_alpha - min_alpha) + min_alpha
        
        elif self.strategy == 'constant':
            # 保持常量alpha值
            self.current_alpha = self.kwargs.get('constant_alpha', 0.5)
        else:
            raise ValueError(f"未知的alpha调度策略: {self.strategy}")
        # 确保alpha在有效范围内
        self.current_alpha = max(0.1, min(0.9, self.current_alpha))
        
        return self.current_alpha
