import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from orbitP.script import config
import joblib
from joblib import load

class WeightedMSELoss(nn.Module):
    def __init__(self, alpha=1.0, beta=0.1):
        super(WeightedMSELoss, self).__init__()
        self.mse = nn.MSELoss()
        self.mae = nn.L1Loss()
        self.alpha = alpha  # 主损失权重
        self.beta = beta    # 辅助损失权重

    def forward(self, y_pred, y_true):
        loss = self.alpha * self.mse(y_pred, y_true) + self.beta * self.mae(y_pred, y_true)
        return loss

class MultiStepWeightedLoss(nn.Module):
    def __init__(self, loss_type="mse", weight_strategy="linear", tail_ratio=0.5, device="cpu"):
        """
        Multi-step Weighted Loss

        Args:
            loss_type (str): "mse" 或 "mae"
            weight_strategy (str): 权重分配方式
                - "linear": 时间步权重线性递增
                - "exp": 时间步权重指数递增
                - "uniform": 所有时间步相同
            tail_ratio (float): 后半段时间的权重增益（仅部分策略使用）
            device (str): 设备
        """
        super(MultiStepWeightedLoss, self).__init__()
        self.loss_type = loss_type
        self.weight_strategy = weight_strategy
        self.tail_ratio = tail_ratio
        self.device = device

        if loss_type == "mse":
            self.base_loss = nn.MSELoss(reduction='none')
        elif loss_type == "mae":
            self.base_loss = nn.L1Loss(reduction='none')
        else:
            raise ValueError("loss_type must be 'mse' or 'mae'")

    def _get_weights(self, T):
        """根据策略生成 [T] 权重向量"""
        if self.weight_strategy == "linear":
            weights = torch.linspace(1.0, 1.0 + self.tail_ratio, T, device=self.device)
        elif self.weight_strategy == "exp":
            weights = torch.exp(torch.linspace(0, self.tail_ratio, T, device=self.device))
        elif self.weight_strategy == "uniform":
            weights = torch.ones(T, device=self.device)
        else:
            raise ValueError("Unknown weight_strategy")

        # 归一化权重，避免过大缩放
        weights = weights / weights.sum() * T
        return weights.view(1, T, 1)  # shape [1,T,1]，方便广播

    def forward(self, pred, target):
        """
        pred: [B, T, C]
        target: [B, T, C]
        """
        B, T, C = pred.shape
        loss_per_step = self.base_loss(pred, target)  # [B,T,C]

        weights = self._get_weights(T)  # [1,T,1]
        weighted_loss = (loss_per_step * weights).mean()
        return weighted_loss

class PMLLoss(nn.Module):
    def __init__(self, beta=0.5, alpha=0.4, eps=1e-12, threshold=0.2, weight=1.25, use_smooth=True, use_weight=True, use_invScar=True):
        super(PMLLoss, self).__init__()
        self.eps = eps
        self.alpha = alpha
        self.threshold = threshold
        self.weight = weight
        self.smoothL1 = nn.SmoothL1Loss(beta=beta,reduction='none')
        self.use_smooth = use_smooth
        self.use_weight = use_weight
        self.use_invScar = use_invScar
        self.prdScaler = joblib.load(config.prdScalerPath)
        if config.outputSize == 1:
            self.mu = self.prdScaler.mean_[config.outputIdx:config.outputIdx+1]
            self.std = self.prdScaler.scale_[config.outputIdx:config.outputIdx+1]
        else:
            self.mu = self.prdScaler.mean_[:config.outputSize]
            self.std = self.prdScaler.scale_[:config.outputSize]
        self.mu = torch.tensor(self.mu, dtype=torch.float32).to(config.device)
        self.std = torch.tensor(self.std, dtype=torch.float32).to(config.device)

    def forward(self, pred, target):
        # pred, target: [B, T, N]
        # ---- PML 部分 ----
        if self.use_invScar == True:
            pred_invScar = pred * self.std + self.mu
            target_invScar = target * self.std + self.mu
            abs_err = torch.abs(pred_invScar - target_invScar)   # [B,T,N]
            num = abs_err.sum(dim=(1, 2))        # [B]
            den = target_invScar.abs().sum(dim=(1, 2)) + self.eps  # [B]
            pml_list = num / den         # [B]
        else:
            abs_err = torch.abs(pred - target)   # [B,T,N]
            num = abs_err.sum(dim=(1, 2))        # [B]
            den = target.abs().sum(dim=(1, 2)) + self.eps  # [B]
            pml_list = num / den         # [B]
        if self.use_weight == True:
            weights = torch.ones_like(pml_list)
            weights[pml_list > self.threshold] = self.weight
            pml_loss = (pml_list * weights).mean()     # 标量
        else:
            pml_loss = pml_list.mean()

        if self.use_smooth:
            sl1_loss = self.smoothL1(pred, target).mean()
            loss = self.alpha * sl1_loss + (1 - self.alpha) * pml_loss
        else:
            loss = pml_loss

        return loss

class HuberLoss(nn.Module):
    def __init__(self, delta=1.0, reduction='mean', apply_weights=False, weights_len=0.5):
        """
        Huber (Smooth L1) Loss with optional time-step weighting.

        Args:
            delta: 阈值 δ（误差小于 δ 用平方项，大于 δ 用绝对值项）
            reduction: 'mean' | 'sum' | 'none'
            apply_time_weights: 是否对长预测步 (t=64–95) 进行线性加权 (1.15 → 1.3)
        """
        super(HuberLoss, self).__init__()
        self.delta = delta
        self.reduction = reduction
        self.apply_weights = apply_weights
        self.weights_len = weights_len

    def forward(self, pred, target):
        # pred/target shape: (batch, time, features)
        error = pred - target
        abs_error = torch.abs(error)
        quadratic = torch.clamp(abs_error, max=self.delta)
        linear = abs_error - quadratic
        loss = 0.5 * quadratic ** 2 + self.delta * linear

        # 如果开启时间步加权
        if self.apply_weights:
            T = pred.size(1)
            halfT = int(T*self.weights_len)
            weights = torch.ones(T, device=pred.device)
            weights[halfT:] = torch.linspace(1.15, 1.3, T - halfT, device=pred.device)
            weights = weights.view(1, T, 1)  # broadcast
            loss = loss * weights

        # Reduction
        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        else:
            return loss

class nllLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.criterion = nn.GaussianNLLLoss(full=True, eps=1e-6)

    def forward(self, pred, target):
        # pred: [B, L, 2], 其中 pred[..., 0] = μ, pred[..., 1] = log(σ²)
        mu = pred[:, :, :config.outputSize]  # [B, L, 1]
        log_var = pred[:, :, config.outputSize:]  # [B, L, 1]

        # 将 log variance 转换为 variance
        var = torch.exp(log_var)

        return self.criterion(mu, target, var)

if __name__ == "__main__":
    criterion = WeightedMSELoss()
    pred = torch.zeros((3,4,1))
    target = torch.zeros((3,4,8))
    for i in range(4):
        for j in range(3):
            pred[j][i][0] = j
    for i in range(4):
        for j in range(3):
            target[j][i][0] = i
    sum = 0
    for i in range(4):
        for j in range(3):
            sum += (pred[j][i][0]-target[j][i][0])*(pred[j][i][0]-target[j][i][0])
    sum/=12
    print(sum)
    loss = criterion(pred.squeeze(-1), target[:, :, 0])
    print(loss)