import numpy as np
from ml_lib.core import Tensor
from ml_lib.nn.module import Module

def mse_loss(pred, target):
    """
    均方误差损失
    
    Args:
        pred: 预测张量
        target: 目标张量
        
    Returns:
        均方误差损失值
    """
    if not isinstance(pred, Tensor):
        pred = Tensor(pred)
    if not isinstance(target, Tensor):
        target = Tensor(target)
        
    diff = pred - target
    loss = (diff * diff).mean()
    return loss

def binary_cross_entropy(pred, target):
    """
    二元交叉熵损失
    
    Args:
        pred: 预测张量，值在[0,1]范围内
        target: 目标张量，值为0或1
        
    Returns:
        二元交叉熵损失值
    """
    if not isinstance(pred, Tensor):
        pred = Tensor(pred)
    if not isinstance(target, Tensor):
        target = Tensor(target)
        
    # 稳定性处理，避免取log(0)
    eps = 1e-12
    pred_clipped = np.clip(pred.data, eps, 1.0 - eps)
    
    # 计算损失: -[target * log(pred) + (1 - target) * log(1 - pred)]
    point_loss = -(target.data * np.log(pred_clipped) + (1 - target.data) * np.log(1 - pred_clipped))
    loss = Tensor(np.mean(point_loss))
    
    # 反向传播
    if pred.requires_grad:
        loss.requires_grad = True
        
        def _backward():
            if pred.grad is None:
                pred.grad = np.zeros_like(pred.data)
                
            grad = (pred_clipped - target.data) / (pred_clipped * (1 - pred_clipped))
            grad = grad / pred.data.size  # 平均梯度
            pred.grad += grad * loss.grad
            
        loss._backward = _backward
        
    return loss

def softmax(x, dim=-1):
    """
    Softmax函数
    
    Args:
        x: 输入张量
        dim: 计算softmax的维度
        
    Returns:
        应用softmax后的张量
    """
    # 数值稳定性，减去最大值
    x_max = np.max(x.data, axis=dim, keepdims=True)
    exp_x = np.exp(x.data - x_max)
    softmax_x = exp_x / np.sum(exp_x, axis=dim, keepdims=True)
    
    result = Tensor(softmax_x)
    
    if x.requires_grad:
        result.requires_grad = True
        
        def _backward():
            if x.grad is None:
                x.grad = np.zeros_like(x.data)
                
            # Softmax梯度计算较复杂，这里是简化实现
            # 完整实现需要考虑Jacobian矩阵
            # 暂时仅支持最后一个维度的梯度计算
            batch_size = x.data.shape[0]
            n_classes = x.data.shape[-1]
            
            for i in range(batch_size):
                S = softmax_x[i]
                jacobian = np.zeros((n_classes, n_classes))
                
                # 计算Jacobian矩阵: J[i,j] = S[i]*(1-S[i]) if i==j else -S[i]*S[j]
                for c1 in range(n_classes):
                    for c2 in range(n_classes):
                        if c1 == c2:
                            jacobian[c1, c2] = S[c1] * (1 - S[c1])
                        else:
                            jacobian[c1, c2] = -S[c1] * S[c2]
                
                x.grad[i] += np.dot(result.grad[i], jacobian)
        
        result._backward = _backward
    
    return result

def cross_entropy(logits, target, dim=-1):
    """
    交叉熵损失，结合了softmax和负对数似然
    直接实现梯度公式：dL/dlogits_i = softmax_i - one_hot_i
    
    Args:
        logits: 未归一化的预测值
        target: 真实标签，整数索引或已经是独热编码
        dim: 应用softmax的维度
        
    Returns:
        交叉熵损失值
    """
    if not isinstance(logits, Tensor):
        logits = Tensor(logits)
    if not isinstance(target, Tensor):
        target = Tensor(target)
        
    # 将整数索引转换为独热编码
    if len(target.shape) == 1 or (len(target.shape) > 1 and target.shape[1] == 1):
        # 目标是类别索引
        batch_size = logits.shape[0]
        num_classes = logits.shape[1]
        target_indices = target.data.astype(int)
        
        # 创建batch_size x num_classes的one-hot矩阵
        one_hot = np.zeros((batch_size, num_classes))
        for i, idx in enumerate(target_indices):
            if idx < num_classes:  # 确保索引有效
                one_hot[i, idx] = 1
        target = Tensor(one_hot)
        
    x_max = logits.max(dim=1, keepdims=True)
    shifted_x = logits - x_max
    exp_x = shifted_x.exp()
    sum_exp = exp_x.sum(dim=1, keepdims=True)
    softmax_output = exp_x / sum_exp
    
    # 交叉熵损失
    loss = -(target * softmax_output.log()).sum(dim=1).mean()
    return loss
class MSELoss(Module):
    """均方误差损失函数类"""
    def __init__(self):
        super().__init__()
    
    def forward(self, pred, target):
        return mse_loss(pred, target)

class BCELoss(Module):
    """二元交叉熵损失函数类"""
    def __init__(self):
        super().__init__()
    
    def forward(self, pred, target):
        return binary_cross_entropy(pred, target)

class CrossEntropyLoss(Module):
    """交叉熵损失函数类"""
    def __init__(self):
        super().__init__()
    
    def forward(self, logits, target):
        return cross_entropy(logits, target) 