import torch
from eval import show_without_plt
from eval import check_orthogonality

def cayley_update(R, grad, alpha=1e-4):
    """Cayley变换更新旋转矩阵"""
    # 计算反对称矩阵
    skew_symmetric = 0.5 * (grad - grad.T)

    # 保证 identity 和所有操作的数据类型一致
    identity = torch.eye(R.size(0), device=R.device)

    # Cayley更新
    updated_R = torch.linalg.inv(identity - alpha * skew_symmetric) @ (identity + alpha * skew_symmetric) @ R
    return updated_R

def project_to_orthogonal(R):
    """快速将矩阵 R 投影到正交矩阵（使用 QR 分解）"""
    Q, _ = torch.linalg.qr(R)  # QR 分解直接返回正交矩阵 Q
    return Q

def compute_loss(R, X, l1_factor=1e-2):
    """计算 L1 范数损失（基于 R * X）"""
    rotated_X = torch.matmul(X, R)  # 与R进行矩阵乘法
    l1_loss = torch.sum(torch.abs(rotated_X))  # L1正则化项
    total_loss = l1_factor * l1_loss  # 总损失为 L1 范数损失
    return total_loss

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载特征映射
    range_limit = 32
    load_fms = torch.stack([torch.load(f'ffn_fm/ffn_feature_map_{i}.pth', weights_only=False).to(torch.float32)
                            for i in range(0, range_limit)]).to(device)

    # 初始化旋转矩阵并启用梯度
    rm = torch.load('train_log/1213-x.pth', weights_only=True).to(device)  # 确保文件存在
    rm.requires_grad_()  # 启用梯度计算

    fm = load_fms[0]  # 使用第一张特征映射

    # 前向传播计算结果并可视化
    print(f"初始正交性误差: {check_orthogonality(rm)}")

    # 计算损失
    loss = compute_loss(rm, fm, l1_factor=1.0)  # 增大损失系数
    show_without_plt(torch.matmul(fm, rm))
    # 清零梯度（防止累积）
    if rm.grad is not None:
        rm.grad.zero_()

    # 反向传播计算梯度
    loss.backward()
    print(f"Gradient norm: {torch.norm(rm.grad).item()}")

    # 用 Cayley 变换更新旋转矩阵
    alpha = 1e-4  # 调整步长
    with torch.no_grad():
        rm.data = cayley_update(rm, rm.grad, alpha)  # Cayley 更新
        rm.data = project_to_orthogonal(rm)  # 显式正交化（使用 QR 分解）
        rm.grad.zero_()
    show_without_plt(torch.matmul(fm, rm))
    # 检查更新后的正交性
    print(f"更新后正交性误差: {check_orthogonality(rm)}")
