"""
Multi-Task Learning Utilities
多任务学习工具函数
"""

from typing import Dict, List, Any, Tuple, Iterator, Optional
import numpy as np
from ml_lib.core import Tensor
from ml_lib.nn.module import Module
from ml_lib.mtl.architectures import HardParameterSharing


class MTLDataset:
    """多任务学习数据集类"""
    
    def __init__(self, data: Dict[str, np.ndarray], targets: Dict[str, np.ndarray], 
                 batch_size: int = 32, shuffle: bool = True):
        """
        初始化MTL数据集
        
        Args:
            data: 输入数据字典，键为数据源名称，值为数据数组
            targets: 目标数据字典，键为任务名称，值为目标数组
            batch_size: 批次大小
            shuffle: 是否打乱数据
        """
        self.data = data
        self.targets = targets
        self.batch_size = batch_size
        self.shuffle = shuffle
        
        # 验证数据一致性
        self._validate_data()
        
        # 获取数据长度
        self.length = len(list(data.values())[0])
        self.num_batches = (self.length + batch_size - 1) // batch_size
        
        # 创建索引
        self.indices = np.arange(self.length)
        if shuffle:
            np.random.shuffle(self.indices)
        
        self.current_batch = 0
    
    def _validate_data(self):
        """验证数据一致性"""
        if not self.data:
            raise ValueError("数据不能为空")
        if not self.targets:
            raise ValueError("目标不能为空")
        
        # 检查所有数据源的长度是否一致
        data_lengths = [len(arr) for arr in self.data.values()]
        target_lengths = [len(arr) for arr in self.targets.values()]
        
        if len(set(data_lengths + target_lengths)) > 1:
            raise ValueError("所有数据和目标的长度必须一致")
    
    def __iter__(self) -> Iterator[Tuple[Dict[str, Tensor], Dict[str, Tensor]]]:
        """迭代器"""
        self.current_batch = 0
        if self.shuffle:
            np.random.shuffle(self.indices)
        return self
    
    def __next__(self) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]:
        """获取下一个批次"""
        if self.current_batch >= self.num_batches:
            raise StopIteration
        
        start_idx = self.current_batch * self.batch_size
        end_idx = min(start_idx + self.batch_size, self.length)
        batch_indices = self.indices[start_idx:end_idx]
        
        # 构建批次数据
        batch_data = {}
        for key, arr in self.data.items():
            batch_data[key] = Tensor(arr[batch_indices])
        
        batch_targets = {}
        for key, arr in self.targets.items():
            batch_targets[key] = Tensor(arr[batch_indices])
        
        self.current_batch += 1
        return batch_data, batch_targets
    
    def __len__(self) -> int:
        """数据集长度（批次数）"""
        return self.num_batches
    
    def get_sample_count(self) -> int:
        """获取样本总数"""
        return self.length


def compute_task_metrics(predictions: np.ndarray, targets: np.ndarray, 
                        task_type: str, metrics: List[str]) -> Dict[str, float]:
    """
    计算任务指标
    
    Args:
        predictions: 预测结果
        targets: 真实目标
        task_type: 任务类型 ('classification', 'regression', 'segmentation')
        metrics: 要计算的指标列表
        
    Returns:
        指标字典
    """
    result = {}
    
    if task_type == 'classification':
        # 分类任务指标
        if len(predictions.shape) > 1 and predictions.shape[1] > 1:
            # 多类分类
            pred_classes = np.argmax(predictions, axis=1)
            if len(targets.shape) > 1 and targets.shape[1] > 1:
                # 目标是one-hot编码
                target_classes = np.argmax(targets, axis=1)
            else:
                # 目标是类别索引
                target_classes = targets.astype(int)
        else:
            # 二分类
            pred_classes = (predictions > 0.5).astype(int)
            target_classes = targets.astype(int)
        
        for metric in metrics:
            if metric == 'accuracy':
                result['accuracy'] = np.mean(pred_classes == target_classes)
    
    elif task_type == 'regression':
        # 回归任务指标
        for metric in metrics:
            if metric == 'mse':
                result['mse'] = np.mean((predictions - targets) ** 2)
            elif metric == 'mae':
                result['mae'] = np.mean(np.abs(predictions - targets))
    
    return result


def create_synthetic_mtl_data(num_samples: int = 1000, input_dim: int = 20, 
                             task_configs: Dict[str, Dict[str, Any]] = None,
                             noise_level: float = 0.1) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
    """
    创建合成的多任务学习数据
    
    Args:
        num_samples: 样本数量
        input_dim: 输入维度
        task_configs: 任务配置字典
        noise_level: 噪声水平
        
    Returns:
        (数据字典, 目标字典)
    """
    if task_configs is None:
        task_configs = {
            'classification': {'task_type': 'classification', 'num_classes': 3},
            'regression': {'task_type': 'regression'}
        }
    
    # 生成输入数据
    np.random.seed(42)
    X = np.random.randn(num_samples, input_dim).astype(np.float32)
    
    # 生成共享特征
    shared_weights = np.random.randn(input_dim, 10)
    shared_features = X @ shared_weights
    
    data = {'input': X}
    targets = {}
    
    for task_name, config in task_configs.items():
        if config['task_type'] == 'classification':
            # 分类任务
            num_classes = config.get('num_classes', 2)
            task_weights = np.random.randn(10, num_classes)
            logits = shared_features @ task_weights
            
            # 添加噪声
            logits += np.random.randn(*logits.shape) * noise_level
            
            # 生成标签
            labels = np.argmax(logits, axis=1)
            targets[task_name] = labels
            
        elif config['task_type'] == 'regression':
            # 回归任务
            task_weights = np.random.randn(10, 1)
            values = shared_features @ task_weights
            
            # 添加噪声
            values += np.random.randn(*values.shape) * noise_level
            
            targets[task_name] = values.flatten()
    
    return data, targets


def create_hard_parameter_sharing_model(input_dim: int, task_configs: Dict[str, Any],
                                       shared_encoder: Module, 
                                       task_heads: Dict[str, Module]) -> HardParameterSharing:
    """
    创建硬参数共享模型的辅助函数
    
    Args:
        input_dim: 输入维度
        task_configs: 任务配置字典
        shared_encoder: 共享编码器
        task_heads: 任务特定头部字典
        
    Returns:
        HardParameterSharing模型实例
    """
    return HardParameterSharing(
        input_dim=input_dim,
        task_configs=task_configs,
        shared_encoder=shared_encoder,
        task_heads=task_heads
    ) 