import torch
import torch.nn as nn

# 极分解函数
def polar_decomposition(weight):
    U_s, S, Vh = torch.linalg.svd(weight, full_matrices=False)
    U = U_s @ Vh
    P = Vh.T @ torch.diag(S) @ Vh
    return U, P

# 投影到 Stiefel 流形的切空间
def project_to_tangent(U, grad):
    return grad - U @ (U.T @ grad)

# 将矩阵重新收缩到 Stiefel 流形
def retraction(U):
    Q, _ = torch.linalg.qr(U)
    return Q

# PolarLinearStiefel 层
class PolarLinearStiefel(nn.Module):
    def __init__(self, in_features, out_features, bias=True):
        super(PolarLinearStiefel, self).__init__()
        self.linear = nn.Linear(in_features, out_features, bias=bias)
        with torch.no_grad():
            U, P = polar_decomposition(self.linear.weight.data)
            self.U = nn.Parameter(U, requires_grad=True)  # 正交部分
            self.P = P  # 非正交部分（被冻结）

    def forward(self, x):
        weight = self.U @ self.P
        return nn.functional.linear(x, weight, self.linear.bias)

# 多层模型
class StiefelMLP(nn.Module):
    def __init__(self, input_size, hidden_sizes, output_size, activation_fn=nn.ReLU):
        """
        Args:
            input_size (int): 输入特征数。
            hidden_sizes (list of int): 每个隐藏层的单元数。
            output_size (int): 输出特征数。
            activation_fn (class): 激活函数类（默认为 ReLU）。
        """
        super(StiefelMLP, self).__init__()
        layers = []
        in_features = input_size
        
        # 添加隐藏层
        for hidden_size in hidden_sizes:
            layers.append(PolarLinearStiefel(in_features, hidden_size))
            layers.append(activation_fn())  # 激活函数
            in_features = hidden_size
        
        # 添加输出层
        layers.append(PolarLinearStiefel(in_features, output_size))
        self.model = nn.Sequential(*layers)
    
    def forward(self, x):
        return self.model(x)

# 自定义优化器步骤
def custom_step(model, loss, learning_rate):
    """
    手动优化模型中所有 PolarLinearStiefel 层的 U 矩阵。
    Args:
        model (nn.Module): 包含 PolarLinearStiefel 层的模型。
        loss (torch.Tensor): 当前损失值。
        learning_rate (float): 学习率。
    """
    loss.backward()  # 计算梯度
    with torch.no_grad():
        for module in model.modules():
            if isinstance(module, PolarLinearStiefel):
                grad_U = module.U.grad
                tangent_grad = project_to_tangent(module.U.data, grad_U)  # 投影到切空间
                module.U.data -= learning_rate * tangent_grad  # 更新
                module.U.data = retraction(module.U.data)  # 收缩到 Stiefel 流形
    model.zero_grad()  # 清零梯度

# 测试
input_size = 5
hidden_sizes = [10, 8]
output_size = 3
batch_size = 16
num_epochs = 100
learning_rate = 0.01

# 创建模型
model = StiefelMLP(input_size, hidden_sizes, output_size)
loss_fn = nn.MSELoss()

# 创建数据
x = torch.randn(batch_size, input_size)
target = torch.randn(batch_size, output_size)

# 训练循环
for epoch in range(num_epochs):
    model.train()
    
    # 前向传播
    output = model(x)
    loss = loss_fn(output, target)
    
    # 自定义优化步骤
    custom_step(model, loss, learning_rate)
    
    # 打印损失
    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")

print("训练完成！")
