"""
GradNorm Weighting Strategy
GradNorm权重策略实现

Reference: GradNorm: Gradient Normalization for Adaptive Loss Balancing in Deep Multitask Networks
"""

from typing import Dict, List, Optional
import numpy as np
from ml_lib.core import Tensor
from ml_lib.nn.module import Parameter
from .base import WeightingStrategy


class GradNorm(WeightingStrategy):
    """
    GradNorm权重策略
    
    通过平衡梯度范数来自适应调整任务权重，使得不同任务的训练速度保持平衡。
    """
    
    def __init__(self, task_names: List[str], alpha: float = 0.12, **kwargs):
        """
        初始化GradNorm策略
        
        Args:
            task_names: 任务名称列表
            alpha: 恢复速率参数，控制权重更新的强度
        """
        super().__init__(task_names, **kwargs)
        self.alpha = alpha
        
        # 初始化任务权重参数（可学习）
        self.task_weights_param = {
            name: Parameter(Tensor([1.0], requires_grad=True)) 
            for name in task_names
        }
        
        # 记录初始损失值
        self.initial_losses = None
        self.loss_history = {name: [] for name in task_names}
        
    def compute_weights(self, losses: Dict[str, Tensor], 
                       gradients: Optional[Dict[str, List[np.ndarray]]] = None,
                       shared_params: Optional[List[Parameter]] = None,
                       **kwargs) -> Dict[str, float]:
        """
        计算任务权重
        
        Args:
            losses: 各任务的损失值字典
            gradients: 各任务的梯度字典（可选）
            shared_params: 共享参数列表
            **kwargs: 其他参数
            
        Returns:
            任务权重字典
        """
        if shared_params is None:
            # 如果没有提供共享参数，返回当前权重
            return {name: float(self.task_weights_param[name].data.data) 
                   for name in self.task_names}
        
        # 记录损失历史
        for name, loss in losses.items():
            loss_val = float(loss.data) if hasattr(loss.data, '__float__') else float(loss.data.item())
            self.loss_history[name].append(loss_val)
        
        # 初始化初始损失
        if self.initial_losses is None:
            self.initial_losses = {
                name: max(float(losses[name].data) if hasattr(losses[name].data, '__float__') 
                         else float(losses[name].data.item()), 1e-8)  # 添加最小值防止除零
                for name in self.task_names
            }
            return {name: 1.0 for name in self.task_names}
        
        # 计算相对损失率
        relative_loss_rates = {}
        for name in self.task_names:
            if len(self.loss_history[name]) >= 2:
                current_loss = self.loss_history[name][-1]
                prev_loss = self.loss_history[name][-2]
                if prev_loss > 0:
                    # 防止除零错误
                    initial_loss = max(self.initial_losses[name], 1e-8)
                    relative_loss_rates[name] = current_loss / initial_loss
                else:
                    relative_loss_rates[name] = 1.0
            else:
                relative_loss_rates[name] = 1.0
        
        # 计算平均相对损失率
        avg_relative_rate = np.mean(list(relative_loss_rates.values()))
        
        # 计算每个任务的梯度范数
        grad_norms = {}
        for name in self.task_names:
            if gradients and name in gradients:
                # 使用提供的梯度
                task_grads = gradients[name]
                grad_norm = np.sqrt(sum(np.sum(g**2) for g in task_grads))
            else:
                # 计算当前任务对共享参数的梯度范数
                task_loss = losses[name] * self.task_weights_param[name].data
                
                # 简化的梯度范数计算
                # 在实际实现中，这里应该计算task_loss对shared_params的梯度
                grad_norm = 1.0  # 占位符
            
            grad_norms[name] = grad_norm
        
        # 计算目标梯度范数
        avg_grad_norm = np.mean(list(grad_norms.values()))
        target_grad_norms = {}
        for name in self.task_names:
            target_grad_norms[name] = avg_grad_norm * (relative_loss_rates[name] / avg_relative_rate) ** self.alpha
        
        # 更新权重
        new_weights = {}
        for name in self.task_names:
            if grad_norms[name] > 0:
                # 计算权重更新
                ratio = target_grad_norms[name] / grad_norms[name]
                current_weight = float(self.task_weights_param[name].data.data)
                new_weight = current_weight * ratio
                
                # 更新权重参数
                self.task_weights_param[name].data.data = np.array([new_weight])
                new_weights[name] = new_weight
            else:
                new_weights[name] = float(self.task_weights_param[name].data.data)
        
        # 归一化权重
        total_weight = sum(new_weights.values())
        if total_weight > 0:
            new_weights = {name: w * self.num_tasks / total_weight 
                          for name, w in new_weights.items()}
        
        return new_weights
    
    def get_learnable_params(self) -> List[Parameter]:
        """获取可学习的权重参数"""
        return list(self.task_weights_param.values())
    
    def reset(self):
        """重置权重策略状态"""
        super().reset()
        self.initial_losses = None
        self.loss_history = {name: [] for name in self.task_names}
        for name in self.task_names:
            self.task_weights_param[name].data.data = np.array([1.0])
    
    def __repr__(self):
        return f"GradNorm(num_tasks={self.num_tasks}, alpha={self.alpha})" 