"""自定义loss
"""
import torch
import plugin
import logging
from torch.nn import BCEWithLogitsLoss


def listmle_loss(
    logits: torch.Tensor,
    rankorder: torch.Tensor,
    lengths: list[int],
    redunction: str = "mean"
):
    """ListMLE损失函数, 适用于本模型, 只考虑分类正确的样本排序.

    Args:
        logits: 模型输出, [batch, seq_len, 2], 2分别代表是否为POI组成/排序分数.
        rankorder: 排序顺序, -1代表需要忽略, [total_ocr_num, ], 将一个batch进行拼接.
        lengths: 每个样本的候选框长度, 用于将rankorder改为[batch, seq]的形状.
        reduction: mean代表计算平均损失, sum代表计算和.
    
    Returns:
        loss: torch.Tensor.

    """
    assert redunction in ["mean", "sum"]
    assert logits.dim() == 3
    assert rankorder.dim() == 1

    seq_order = -torch.ones(logits.size()[:2]).type_as(logits)
    # seq_order [batch, seq]
    count = 0
    for i in range(len(lengths)):
        seq_order[i, :lengths[i]] = rankorder[count: count+lengths[i]]
        count += lengths[i]
    # 首先随机打乱顺序
    randindices = torch.randperm(logits.size(1))
    seq_order = seq_order[:, randindices]
    logits = logits[:, randindices, :]

    # 将seq_order中pred<=0的忽略
    seq_order[logits[:, :, 0] <= 0] = -1
    # 计算loss不考虑待排列数目小等1的
    valid_len = torch.sum(seq_order >= 0, dim=1)

    # 将得分按照poi_name的排列顺序进行排序
    rankscore = logits[:, :, -1]
    rankscore[seq_order < 0] = -float('inf')    # 将不用于排序的位置全部-inf
    seq_order[seq_order < 0] = logits.size(1) + 1   # 便于将没有用的值排在后面
    _, true_order = torch.sort(seq_order)
    rankscore = rankscore.gather(1, index=true_order)   # 按真实顺序排列，无用值全部在最后面
    
    mask = generate_mask(valid_len, logits.size(1)).to(rankscore.device)
    # 计算 log \sigma_{i=k}^m z_{ok}
    z = rankscore.exp().flip(1)
    z = torch.cumsum(z, 1).flip(1)
    
    batch_loss = torch.log(z + 1e-10) - rankscore
    batch_loss[mask == 0] = 0
    batch_loss = torch.sum(batch_loss, dim=1)
    
    if redunction == "mean":
        return torch.mean(batch_loss)
    else:
        return torch.sum(batch_loss)


def pairwise_ranking_loss(
    logits: torch.Tensor,
    rankorder: torch.Tensor,
    lengths: list[int],
    redunction: str = "mean",
    eta: float = 0.01,
    for_all: bool = False
):
    """基于pairwise的排序损失函数，参数同上.
    """
    assert redunction in ["mean", "sum"]
    assert logits.dim() == 3
    assert rankorder.dim() == 1

    seq_order = -torch.ones(logits.size()[:2]).type_as(logits)
    # seq_order [batch, seq]
    count = 0
    for i in range(len(lengths)):
        seq_order[i, :lengths[i]] = rankorder[count: count+lengths[i]]
        count += lengths[i]
    # 将seq_order中pred<=0的忽略
    if not for_all:
        seq_order[logits[:, :, 0] <= 0] = -1
    # 计算loss不考虑待排列数目小等1的
    valid_len = torch.sum(seq_order >= 0, dim=1)
    # 将有效得分位置排在前面，无效的扔在后面
    rankscore = logits[:, :, 1]
    
    seq_order[seq_order < 0] = logits.size(1) + 1   # 便于将没有用的值排在后面
    mask = seq_order.clone()
    mask[mask < logits.size(1) + 1] = 0
    # 有效值排前面，无效的排后面，无用值全部在最后面
    _, true_order = torch.sort(mask)
    rankscore = rankscore.gather(1, index=true_order)   
    seq_order = seq_order.gather(1, index=true_order)   
    mask = generate_mask(valid_len-1, logits.size(1)-1).to(rankscore.device)
    # 计算差值
    pairwise_diff = torch.diff(rankscore)
    pairwise_mask = torch.diff(seq_order)
    pairwise_mask[pairwise_mask < 0] = -1
    pairwise_mask[pairwise_mask > 0] = 1
    # 计算pairwise loss
    batch_loss = pairwise_diff * pairwise_mask + eta
    batch_loss[mask == 0] = 0   # 忽略无效位置
    batch_loss[batch_loss < 0] = 0  # max(0, eta + loss)
    batch_loss = torch.sum(batch_loss, dim=1)

    if redunction == "mean":
        return torch.mean(batch_loss)
    else:
        return torch.sum(batch_loss)


def generate_mask(valid_len: torch.Tensor, max_len: int):
    mask = torch.zeros((valid_len.size(0), max_len))
    for i in range(valid_len.size(0)):
        mask[i, :valid_len[i]] = 1
    return mask


@plugin.register_plugin("loss", "BCELogitLossWithListMLELoss")
class BCELogitLossWithListMLELoss:
    """适用于本比赛的Loss包装类，自动计算BCEloss与rankloss的加权和
    """

    def __init__(self, alpha=1, device="cuda", warmup=5) -> None:
        self.alpha = alpha
        self.bce_loss = BCEWithLogitsLoss()
        self.device = device
    
    def __call__(self, logits: torch.Tensor, rankorder: torch.Tensor, lengths: list[int], only_bce=False) -> torch.Tensor:
        poi_logits = torch.cat([logits[i, :lengths[i], 0].squeeze(-1) for i in range(len(lengths))])
        labels = (rankorder >= 0).float()
        bce_loss = self.bce_loss(poi_logits, labels)
        # if torch.isnan(bce_loss) or torch.isinf(bce_loss):
        #     logging.error(bce_loss)
        #     logging.error("logits: ")
        #     logging.error(poi_logits)
        #     logging.error(labels)

        if only_bce:
            return bce_loss + torch.Tensor([10]).to(logits.device), bce_loss, torch.Tensor([0])
        rankloss = listmle_loss(logits, rankorder, lengths)
        return bce_loss + self.alpha * rankloss, bce_loss, rankloss


@plugin.register_plugin("loss", "BCELogitWithPairwiseLoss")
class BCELogitWithPairwiseLoss:
    """适用于本比赛的Loss包装类，自动计算BCEloss与pairwise loss的加权和
    """

    def __init__(self, alpha=1, device="cuda", warmup=5, eta=0.001, for_all=False) -> None:
        self.alpha = alpha
        self.bce_loss = BCEWithLogitsLoss()
        self.device = device
        self.eta = eta
        self.for_all = for_all

    
    def __call__(self, logits: torch.Tensor, rankorder: torch.Tensor, lengths: list[int], only_bce=False) -> torch.Tensor:
        poi_logits = torch.cat([logits[i, :lengths[i], 0].squeeze(-1) for i in range(len(lengths))])
        labels = (rankorder >= 0).float()
        bce_loss = self.bce_loss(poi_logits, labels)
        # if torch.isnan(bce_loss) or torch.isinf(bce_loss):
        #     logging.error(bce_loss)
        #     logging.error("logits: ")
        #     logging.error(poi_logits)
        #     logging.error(labels)

        if only_bce:
            return bce_loss + torch.Tensor([2]).to(logits.device), bce_loss, torch.Tensor([0])
        rankloss = pairwise_ranking_loss(logits, rankorder, lengths, eta=self.eta, for_all=self.for_all)
        return bce_loss + self.alpha * rankloss, bce_loss, rankloss