import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os


# 定义模型
class MatrixFeatureNet(nn.Module):
    def __init__(self):
        super(MatrixFeatureNet, self).__init__()
        self.fc1 = nn.Linear(100, 256)  # 10x10矩阵展平为100维
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 64)
        self.fc4 = nn.Linear(64, 32)
        self.fc5 = nn.Linear(32, 10)  # 仅预测10个特征值
        self.dropout = nn.Dropout(0.5)  # 添加Dropout层

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.dropout(x)  # 应用Dropout
        x = torch.relu(self.fc2(x))
        x = self.dropout(x)  # 应用Dropout
        x = torch.relu(self.fc3(x))
        x = self.dropout(x)  # 应用Dropout
        x = torch.relu(self.fc4(x))
        x = self.fc5(x)
        return x


# 生成对称矩阵训练数据
def generate_symmetric_data(batch_size):
    matrices = []
    labels = []
    for _ in range(batch_size):
        A = np.random.randn(10, 10).astype(np.float32)
        sym_matrix = (A + A.T) / 2  # 生成对称矩阵
        eigenvalues, _ = np.linalg.eig(sym_matrix)
        eigenvalues = eigenvalues.astype(np.float32)
        matrices.append(sym_matrix)
        labels.append(eigenvalues)
    matrices = np.array(matrices)
    labels = np.array(labels)
    return torch.tensor(matrices), torch.tensor(labels)


# 保存和加载模型
def save_model(model, filename="model.pth"):
    torch.save(model.state_dict(), filename)


def load_model(model, filename="model.pth"):
    if os.path.exists(filename):
        model.load_state_dict(torch.load(filename, weights_only=True))
        model.eval()
        print(f"Model loaded from {filename}")
        return True
    return False


# 初始化模型、损失函数和优化器
model = MatrixFeatureNet()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)


# 训练模型
def train(model, criterion, optimizer, num_epochs=20, batch_size=64):
    model.train()
    for epoch in range(num_epochs):
        inputs, targets = generate_symmetric_data(batch_size)
        optimizer.zero_grad()
        outputs = model(inputs.view(batch_size, -1))
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")


# 测试模型
def test(model, matrix):
    model.eval()
    with torch.no_grad():
        matrix_tensor = torch.tensor(matrix).view(1, -1).float()
        output = model(matrix_tensor)
        return output[0].numpy()


# 计算实际的特征值
def compute_actual_eigen(matrix):
    eigenvalues, _ = np.linalg.eig(matrix)
    return eigenvalues


# 主程序
model_filename = "model.pth"
if not load_model(model, model_filename):
    print("Training the model...")
    train(model, criterion, optimizer, num_epochs=1000, batch_size=64)
    save_model(model, model_filename)

# 用一个新矩阵进行测试
test_matrix = np.random.randn(10, 10).astype(np.float32)
test_matrix = (test_matrix + test_matrix.T) / 2  # 生成对称矩阵
predicted_eigenvalues = test(model, test_matrix)
actual_eigenvalues = compute_actual_eigen(test_matrix)

# 打印结果
print("Actual Eigenvalues:")
print(actual_eigenvalues)
print("Predicted Eigenvalues:")
print(predicted_eigenvalues)

# 计算和打印误差
eigenvalue_error = np.linalg.norm(actual_eigenvalues - predicted_eigenvalues)
print(f"Eigenvalue Prediction Error: {eigenvalue_error:.4f}")
