import torch



def calculate_kurtosis(tensor: torch.Tensor) -> torch.Tensor:

    tensor = tensor.squeeze(0)

    # 计算每个通道 (hidden_size) 的均值和标准差
    mean = torch.mean(tensor, dim=1, keepdim=True)
    std = torch.std(tensor, dim=1, keepdim=True) + 1e-8  # 避免除零错误

    # 标准化张量
    standardized_tensor = (tensor - mean) / std

    # 计算每个通道的峰度 (4 次方的均值)
    kurtosis = torch.mean(standardized_tensor ** 4, dim=1)
    # 计算损失: 峰度与目标值的均方误差 (MSE)
    return torch.mean(kurtosis)


def compute_loss_l1_with_kurtosis(R, X, l1_factor=1e-2, kurtosis_factor=1e-2):
    """结合 L1 损失和峰度正则化的总损失函数"""
    rotated_X = torch.matmul(X, R)

    # L1 损失
    l1_loss = torch.sum(torch.abs(rotated_X))

    # 峰度损失 (目标峰度为 3，接近高斯分布)
    kurtosis = calculate_kurtosis(rotated_X)
    kurtosis_loss = (kurtosis - 3.0) ** 2

    print("L1Loss",l1_loss.item())
    print("KurtosisLoss",kurtosis_loss.item())

    # 总损失函数，结合 L1 和 峰度正则化
    total_loss = l1_factor * l1_loss + kurtosis_factor * kurtosis_loss

    return total_loss

if __name__ == '__main__':
    tensor = torch.load('E:\\model\\models\llama2-7b\\ffn\\ffn\second_line_input\\1tensor0.pth', weights_only=True).to(
        torch.float32).to("cuda:0")
    #
    rt = torch.load("E:\\model\\models\\llama2-7b\\1-10-1 (1).pth", weights_only=True).to(torch.float32).to("cuda:0")
# 创建一个模拟的激活值张量 (形状为: (1, sequence_length=128, hidden_size=768))
    tensor1 = torch.randn(size=tensor.shape, requires_grad=True).to(torch.float32).to("cuda:0")

    compute_loss_l1_with_kurtosis(rt, tensor1)

# 计算损失 (目标峰度为 3.0)
#     print(tensor.shape)
    # print(kurtosis_loss(tensor, target_kurtosis=3.0))
    # print(kurtosis_loss(tensor1, target_kurtosis=3.0))

