"""
Multi-Task Learning Trainer
多任务学习训练器
"""

import time
from typing import Dict, List, Any, Optional, Tuple
import numpy as np
from ml_lib.core import Tensor
from ml_lib.nn.loss import MSELoss, CrossEntropyLoss, BCELoss
from ml_lib.optim import SGD, Adam
from .config import MTLConfig, TaskConfig
from .architectures import MTLArchitecture, HardParameterSharing
from .weighting import WeightingStrategy, EqualWeighting, GradNorm
from .utils import MTLDataset, compute_task_metrics


class MTLTrainer:
    """多任务学习训练器"""
    
    def __init__(self, config: MTLConfig, model: Optional[MTLArchitecture] = None):
        """
        初始化MTL训练器
        
        Args:
            config: MTL配置对象
            model: 用户自定义的MTL模型（可选，如果不提供则根据配置自动构建）
        """
        self.config = config
        config.validate()
        
        # 初始化模型架构
        if model is not None:
            self.model = model
        else:
            self.model = self._build_model()
        
        # 初始化损失函数
        self.loss_functions = self._build_loss_functions()
        
        # 初始化权重策略
        self.weighting_strategy = self._build_weighting_strategy()
        
        # 初始化优化器
        self.optimizer = self._build_optimizer()
        
        # 训练状态
        self.current_epoch = 0
        self.training_history = {
            'losses': {name: [] for name in config.get_task_names()},
            'weights': {name: [] for name in config.get_task_names()},
            'metrics': {name: {} for name in config.get_task_names()}
        }
        
    def _build_model(self) -> MTLArchitecture:
        """构建模型架构"""
        task_configs = {name: task.__dict__ for name, task in self.config.tasks.items()}
        
        if self.config.architecture.name == 'HPS':
            return HardParameterSharing(
                input_dim=self.config.input_dim,
                task_configs=task_configs,
                shared_layers=self.config.architecture.shared_layers,
                task_specific_layers=self.config.architecture.task_specific_layers
            )
        else:
            raise ValueError(f"不支持的架构: {self.config.architecture.name}")
    
    def _build_loss_functions(self) -> Dict[str, Any]:
        """构建损失函数"""
        loss_functions = {}
        
        for task_name, task_config in self.config.tasks.items():
            if task_config.loss_fn == 'mse':
                loss_functions[task_name] = MSELoss()
            elif task_config.loss_fn == 'cross_entropy':
                loss_functions[task_name] = CrossEntropyLoss()
            elif task_config.loss_fn == 'bce':
                loss_functions[task_name] = BCELoss()
            else:
                raise ValueError(f"不支持的损失函数: {task_config.loss_fn}")
        
        return loss_functions
    
    def _build_weighting_strategy(self) -> WeightingStrategy:
        """构建权重策略"""
        task_names = self.config.get_task_names()
        
        if self.config.weighting.name == 'EW':
            return EqualWeighting(task_names)
        elif self.config.weighting.name == 'GradNorm':
            return GradNorm(task_names, alpha=self.config.weighting.alpha)
        else:
            raise ValueError(f"不支持的权重策略: {self.config.weighting.name}")
    
    def _build_optimizer(self) -> Any:
        """构建优化器"""
        # 收集所有参数
        params = list(self.model.parameters())
        
        # 如果权重策略有可学习参数，也加入优化器
        if hasattr(self.weighting_strategy, 'get_learnable_params'):
            params.extend(self.weighting_strategy.get_learnable_params())
        
        if self.config.training.optimizer == 'sgd':
            return SGD(
                params,
                lr=self.config.training.learning_rate,
                momentum=self.config.training.momentum,
                weight_decay=self.config.training.weight_decay
            )
        elif self.config.training.optimizer == 'adam':
            return Adam(
                params,
                lr=self.config.training.learning_rate,
                weight_decay=self.config.training.weight_decay
            )
        else:
            raise ValueError(f"不支持的优化器: {self.config.training.optimizer}")
    
    def train_step(self, batch_data: Dict[str, Tensor], 
                   batch_targets: Dict[str, Tensor]) -> Dict[str, float]:
        """
        执行一个训练步骤
        
        Args:
            batch_data: 批次输入数据
            batch_targets: 批次目标数据
            
        Returns:
            各任务的损失值字典
        """
        # 前向传播
        if len(batch_data) == 1:
            # 单输入情况
            input_data = list(batch_data.values())[0]
            outputs = self.model(input_data)
        else:
            # 多输入情况（暂不支持）
            raise NotImplementedError("多输入情况暂未实现")
        
        # 计算各任务损失
        task_losses = {}
        for task_name in self.config.get_task_names():
            if task_name in outputs and task_name in batch_targets:
                pred = outputs[task_name]
                target = batch_targets[task_name]
                loss = self.loss_functions[task_name](pred, target)
                task_losses[task_name] = loss
        
        # 计算任务权重
        task_weights = self.weighting_strategy.update_weights(
            task_losses,
            shared_params=self.model.get_shared_params()
        )
        
        # 计算加权总损失
        total_loss = None
        for task_name, loss in task_losses.items():
            weight = task_weights.get(task_name, 1.0)
            weighted_loss = loss * weight
            if total_loss is None:
                total_loss = weighted_loss
            else:
                total_loss = total_loss + weighted_loss
        
        # 反向传播
        self.optimizer.zero_grad()
        total_loss.backward()
        
        # 梯度裁剪
        if self.config.training.grad_clip is not None:
            self._clip_gradients(self.config.training.grad_clip)
        
        # 参数更新
        self.optimizer.step()
        
        # 返回损失值
        loss_values = {}
        for task_name, loss in task_losses.items():
            loss_val = float(loss.data) if hasattr(loss.data, '__float__') else float(loss.data.item())
            loss_values[task_name] = loss_val
        
        return loss_values
    
    def _clip_gradients(self, max_norm: float):
        """梯度裁剪"""
        total_norm = 0.0
        for param in self.model.parameters():
            # 正确访问Parameter对象的梯度
            grad = None
            if hasattr(param, 'data') and hasattr(param.data, 'grad') and param.data.grad is not None:
                grad = param.data.grad
            elif hasattr(param, 'grad') and param.grad is not None:
                grad = param.grad
            
            if grad is not None:
                param_norm = np.linalg.norm(grad)
                total_norm += param_norm ** 2
        
        total_norm = np.sqrt(total_norm)
        
        if total_norm > max_norm:
            clip_coef = max_norm / (total_norm + 1e-6)
            for param in self.model.parameters():
                # 正确访问和修改Parameter对象的梯度
                if hasattr(param, 'data') and hasattr(param.data, 'grad') and param.data.grad is not None:
                    param.data.grad *= clip_coef
                elif hasattr(param, 'grad') and param.grad is not None:
                    param.grad *= clip_coef
    
    def evaluate(self, dataset: MTLDataset) -> Dict[str, Dict[str, float]]:
        """
        评估模型
        
        Args:
            dataset: 评估数据集
            
        Returns:
            各任务的评估指标字典
        """
        self.model.eval()
        
        all_predictions = {name: [] for name in self.config.get_task_names()}
        all_targets = {name: [] for name in self.config.get_task_names()}
        total_losses = {name: 0.0 for name in self.config.get_task_names()}
        num_batches = 0
        
        for batch_data, batch_targets in dataset:
            # 前向传播
            if len(batch_data) == 1:
                input_data = list(batch_data.values())[0]
                outputs = self.model(input_data)
            else:
                raise NotImplementedError("多输入情况暂未实现")
            
            # 收集预测和目标
            for task_name in self.config.get_task_names():
                if task_name in outputs and task_name in batch_targets:
                    pred = outputs[task_name]
                    target = batch_targets[task_name]
                    
                    all_predictions[task_name].append(pred.data)
                    all_targets[task_name].append(target.data)
                    
                    # 计算损失
                    loss = self.loss_functions[task_name](pred, target)
                    loss_val = float(loss.data) if hasattr(loss.data, '__float__') else float(loss.data.item())
                    total_losses[task_name] += loss_val
            
            num_batches += 1
        
        # 计算平均损失
        avg_losses = {name: total_losses[name] / num_batches 
                     for name in self.config.get_task_names()}
        
        # 计算其他指标
        metrics = {}
        for task_name in self.config.get_task_names():
            if all_predictions[task_name] and all_targets[task_name]:
                # 合并所有批次的预测和目标
                pred_all = np.concatenate(all_predictions[task_name], axis=0)
                target_all = np.concatenate(all_targets[task_name], axis=0)
                
                task_config = self.config.tasks[task_name]
                task_metrics = compute_task_metrics(
                    pred_all, target_all, 
                    task_config.task_type, 
                    task_config.metrics
                )
                task_metrics['loss'] = avg_losses[task_name]
                metrics[task_name] = task_metrics
        
        self.model.train()
        return metrics
    
    def train(self, train_dataset: MTLDataset, 
              val_dataset: Optional[MTLDataset] = None,
              verbose: bool = True) -> Dict[str, Any]:
        """
        训练模型
        
        Args:
            train_dataset: 训练数据集
            val_dataset: 验证数据集（可选）
            verbose: 是否打印训练信息
            
        Returns:
            训练历史字典
        """
        if verbose:
            print(f"开始训练，共 {self.config.training.num_epochs} 个epoch")
            print(f"模型架构: {self.config.architecture.name}")
            print(f"权重策略: {self.config.weighting.name}")
            print(f"任务数量: {self.config.get_num_tasks()}")
            print(f"任务列表: {self.config.get_task_names()}")
        
        best_val_metrics = None
        patience_counter = 0
        
        for epoch in range(self.config.training.num_epochs):
            self.current_epoch = epoch
            epoch_start_time = time.time()
            
            # 训练阶段
            self.model.train()
            epoch_losses = {name: [] for name in self.config.get_task_names()}
            epoch_weights = {name: [] for name in self.config.get_task_names()}
            
            for batch_data, batch_targets in train_dataset:
                batch_losses = self.train_step(batch_data, batch_targets)
                current_weights = self.weighting_strategy.get_weights()
                
                for task_name in self.config.get_task_names():
                    if task_name in batch_losses:
                        epoch_losses[task_name].append(batch_losses[task_name])
                        epoch_weights[task_name].append(current_weights.get(task_name, 1.0))
            
            # 计算平均损失和权重
            avg_losses = {name: np.mean(losses) if losses else 0.0 
                         for name, losses in epoch_losses.items()}
            avg_weights = {name: np.mean(weights) if weights else 1.0 
                          for name, weights in epoch_weights.items()}
            
            # 记录训练历史
            for task_name in self.config.get_task_names():
                self.training_history['losses'][task_name].append(avg_losses.get(task_name, 0.0))
                self.training_history['weights'][task_name].append(avg_weights.get(task_name, 1.0))
            
            # 计算训练集指标（每隔几个epoch计算一次以节省时间）
            train_metrics = None
            if verbose and (epoch + 1) % 5 == 0:  # 每5个epoch计算一次训练指标
                train_metrics = self.evaluate(train_dataset)
            
            # 验证阶段
            val_metrics = None
            if val_dataset is not None:
                val_metrics = self.evaluate(val_dataset)
                
                # 早停检查
                # if self.config.training.early_stopping:
                #     if best_val_metrics is None:
                #         best_val_metrics = val_metrics
                #         patience_counter = 0
                #     else:
                #         # 简单的早停策略：比较所有任务的平均损失
                #         current_avg_loss = np.mean([metrics['loss'] for metrics in val_metrics.values()])
                #         best_avg_loss = np.mean([metrics['loss'] for metrics in best_val_metrics.values()])
                        
                #         if current_avg_loss < best_avg_loss:
                #             best_val_metrics = val_metrics
                #             patience_counter = 0
                #         else:
                #             patience_counter += 1
                            
                #         if patience_counter >= self.config.training.patience:
                #             if verbose:
                #                 print(f"早停触发，在第 {epoch+1} 个epoch停止训练")
                #             break
            
            # 打印训练信息
            if verbose and (epoch + 1) % 5 == 0:  # 每5个epoch打印一次
                epoch_time = time.time() - epoch_start_time
                print(f"\nEpoch {epoch+1}/{self.config.training.num_epochs} ({epoch_time:.2f}s)")
                print(f"  训练损失: {avg_losses}")
                print(f"  任务权重: {avg_weights}")
                
                # 打印训练指标
                if train_metrics:
                    print("  训练指标:")
                    for task_name, metrics in train_metrics.items():
                        metric_str = ", ".join([f"{k}: {v:.4f}" for k, v in metrics.items() if k != 'loss'])
                        print(f"    {task_name}: {metric_str}")
                
                # 打印验证指标
                if val_metrics:
                    print("  验证指标:")
                    for task_name, metrics in val_metrics.items():
                        metric_str = ", ".join([f"{k}: {v:.4f}" for k, v in metrics.items() if k != 'loss'])
                        print(f"    {task_name}: {metric_str}")
        
        if verbose:
            print("训练完成!")
        
        return self.training_history
    
    def predict(self, data: Tensor) -> Dict[str, Tensor]:
        """
        预测
        
        Args:
            data: 输入数据
            
        Returns:
            各任务的预测结果字典
        """
        self.model.eval()
        outputs = self.model(data)
        self.model.train()
        return outputs
    
    def save_model(self, filepath: str):
        """保存模型（简化实现）"""
        # 在实际实现中，这里应该保存模型参数
        print(f"模型保存功能待实现: {filepath}")
    
    def load_model(self, filepath: str):
        """加载模型（简化实现）"""
        # 在实际实现中，这里应该加载模型参数
        print(f"模型加载功能待实现: {filepath}")
    
    def get_task_weights(self) -> Dict[str, float]:
        """获取当前任务权重"""
        return self.weighting_strategy.get_weights()
    
    def get_training_history(self) -> Dict[str, Any]:
        """获取训练历史"""
        return self.training_history 