import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class EfficientSiameseLoss(nn.Module):
    """
    高效的 Siamese 损失函数，从 Caffe 转换而来
    
    这个损失函数实现了类似于 RankIQA 中的 netloss_tid2013.py 的功能
    """
    def __init__(self, margin=10):
        super(EfficientSiameseLoss, self).__init__()
        self.margin = margin
        
    def forward(self, x, labels=None):
        """
        前向传播计算损失
        
        参数:
            x: 模型输出的分数，形状为 [batch_size, 1]
            labels: 不使用，保留参数以兼容训练循环
            
        返回:
            loss: 计算的损失值
        """
        batch_size = x.size(0)
        
        # TID2013 数据集参数 - 根据实际批次大小调整
        batch = 1
        level = 5
        dis = min(9, batch_size // (batch * level))  # 确保不超过批次大小
        SepSize = batch * level
        
        # 存储差异值
        diffs = []
        num = 0
        
        # 计算样本之间的差异
        for k in range(dis):
            start_idx = SepSize * k
            end_idx = min(SepSize * (k+1), batch_size)  # 确保不超过批次大小
            
            for i in range(start_idx, end_idx - batch):
                for j in range(start_idx + int((i-start_idx)/batch+1)*batch, end_idx):
                    # 确保索引在有效范围内
                    if i < batch_size and j < batch_size:
                        # 计算分数差异
                        diff = x[i] - x[j]
                        diffs.append(diff)
                        num += 1
        
        # 转换为张量
        if diffs:
            diffs = torch.cat(diffs)
            # 应用 margin hinge loss
            losses = F.relu(self.margin - diffs)
            # 计算平均损失
            loss = torch.sum(losses) / batch_size
        else:
            # 如果没有差异对，返回零损失
            loss = torch.tensor(0.0, device=x.device, requires_grad=True)
            
        return loss
