import torch
import torch.nn as nn

# 定义一个简单的全连接层
class SimpleLinearLayer(nn.Module):
    def __init__(self, in_features, out_features):
        super(SimpleLinearLayer, self).__init__()
        self.linear = nn.Linear(in_features, out_features)

    def forward(self, x):
        return self.linear(x)

# 设置输入和输出特征维度
in_features = 100
out_features = 50

# 创建一个随机权重矩阵并进行低秩分解
original_weights = torch.randn(out_features, in_features)

# 选择分解后的秩（即中间层的维度）
rank = 20

# 进行SVD分解
U, S, Vt = torch.svd(original_weights)

# 截取前rank个奇异值及其对应的奇异向量
U_r = U[:, :rank]
S_r = torch.diag(S[:rank])
Vt_r = Vt[:rank, :]

# 重构近似权重矩阵
approximated_weights = torch.matmul(torch.matmul(U_r, S_r), Vt_r)

# 打印原始权重和近似权重的形状
print(f"Original weights shape: {original_weights.shape}")
print(f"Approximated weights shape: {approximated_weights.shape}")

# 创建一个新的全连接层，使用近似权重进行初始化
approx_layer = nn.Linear(in_features, out_features)
approx_layer.weight.data = approximated_weights.t()  # 注意：PyTorch的Linear层权重是转置存储的

# 测试新层的前向传播
input_tensor = torch.randn(1, in_features)
output_tensor = approx_layer(input_tensor)
print(f"Output tensor shape: {output_tensor.shape}")

