import numpy as np
from typing import Union, Optional, Tuple, List
from ml_lib.core import Tensor

def softmax(x: Tensor, dim: int = -1) -> Tensor:
    """
    Softmax函数
    
    Args:
        x: 输入张量
        dim: 应用softmax的维度
        
    Returns:
        softmax(x)
    """
    # 为数值稳定性减去最大值
    x_max = x.data.max(axis=dim, keepdims=True)
    
    # 使用带梯度的操作，而不是直接操作数据
    shifted_x = x - Tensor(x_max)
    exp_x = shifted_x.exp()
    sum_exp = exp_x.sum(dim=dim, keepdims=True)
    
    # 这里是元素级除法，会自动构建计算图
    return exp_x / sum_exp

def log_softmax(x: Tensor, dim: int = -1) -> Tensor:
    """
    Log-Softmax函数
    
    Args:
        x: 输入张量
        dim: 应用log-softmax的维度
        
    Returns:
        log(softmax(x))
    """
    # 数值稳定的log_softmax实现
    x_max = x.data.max(axis=dim, keepdims=True)
    
    # 使用带梯度的操作构建计算图
    shifted_x = x - Tensor(x_max)
    exp_shifted = shifted_x.exp()
    sum_exp = exp_shifted.sum(dim=dim, keepdims=True)
    
    # log_softmax(x) = x - max(x) - log(sum(exp(x - max(x))))
    return shifted_x - sum_exp.log()

def cross_entropy(logits: Tensor, target: Tensor, reduction: str = 'mean') -> Tensor:
    """
    交叉熵损失函数
    
    Args:
        logits: 模型的原始输出，未归一化的对数概率 (batch_size, num_classes)
        target: 目标类别，可以是类索引 (batch_size,) 或者one-hot编码 (batch_size, num_classes)
        reduction: 'none' | 'mean' | 'sum'，指定如何处理batch维度的损失
        
    Returns:
        损失值
    """
    # 使用log_softmax计算对数概率 - 数值更稳定
    log_probs = log_softmax(logits, dim=-1)
    
    # 处理一维或二维输入
    is_1d = len(logits.shape) == 1
    batch_size = 1 if is_1d else logits.shape[0]
    num_classes = logits.shape[0] if is_1d else logits.shape[1]
    
    # 处理目标 - 如果是类索引，转换为one-hot编码
    if len(target.shape) == 1 or (len(target.shape) > 1 and target.shape[1] == 1):
        # 目标是类别索引
        target_indices = target.data.astype(int)
        
        if is_1d:
            # 对于一维输入，创建一个一维的one-hot向量
            one_hot = np.zeros(num_classes)
            if len(target_indices) == 1:
                idx = target_indices[0]
                if idx < num_classes:  # 确保索引有效
                    one_hot[idx] = 1
            target_one_hot = Tensor(one_hot)
        else:
            # 对于二维输入，创建batch_size x num_classes的one-hot矩阵
            one_hot = np.zeros((batch_size, num_classes))
            for i, idx in enumerate(target_indices):
                if idx < num_classes:  # 确保索引有效
                    one_hot[i, idx] = 1
            target_one_hot = Tensor(one_hot)
    else:
        # 目标已经是one-hot编码
        target_one_hot = target
    
    # 计算交叉熵损失: -sum(target * log_softmax)
    losses = -(target_one_hot * log_probs).sum(dim=-1)
    
    # 根据指定方式处理batch维度
    if reduction == 'none':
        return losses
    elif reduction == 'sum':
        return losses.sum()
    elif reduction == 'mean':
        return losses.mean()
    else:
        raise ValueError(f"不支持的reduction模式: {reduction}")

def nll_loss(log_probs: Tensor, target: Tensor, reduction: str = 'mean') -> Tensor:
    """
    负对数似然损失
    
    Args:
        log_probs: 对数概率 (batch_size, num_classes)
        target: 目标类别索引 (batch_size,)
        reduction: 'none' | 'mean' | 'sum'
        
    Returns:
        损失值
    """
    batch_size = log_probs.shape[0] if len(log_probs.shape) > 1 else 1
    
    # 提取每个样本对应目标类别的对数概率
    if len(log_probs.shape) > 1:
        losses = np.zeros(batch_size)
        for i, idx in enumerate(target.data.astype(int)):
            losses[i] = -log_probs.data[i, idx]
    else:
        losses = -log_probs.data[target.data.astype(int)]
    
    losses = Tensor(losses)
    
    # 根据指定方式处理batch维度
    if reduction == 'none':
        return losses
    elif reduction == 'sum':
        return losses.sum()
    elif reduction == 'mean':
        return losses.mean()
    else:
        raise ValueError(f"不支持的reduction模式: {reduction}") 