
#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124 -i https://pypi.tuna.tsinghua.edu.cn/simple

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error

# --------------------- 数据读取和预处理 ---------------------
data = pd.read_csv("E:/GraduateDesign/LinearUse.csv")
X = data.drop(columns=['rrr']).values  # 转换为 numpy 数组
y = data['rrr'].values.reshape(-1, 1)  # 确保 y 是二维结构 (样本数, 1)

# 标准化数据
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X_scaled = scaler_X.fit_transform(X)
y_scaled = scaler_y.fit_transform(y)

# 转换为PyTorch张量，并适配Transformer输入格式: (序列长度, 批大小, 特征数)
# 若数据非时序，将每个样本视为长度为1的序列
X_reshaped = X_scaled.reshape(-1, 1, X_scaled.shape[1])  # (样本数, 1, 特征数)
X_tensor = torch.tensor(X_reshaped, dtype=torch.float32)
y_tensor = torch.tensor(y_scaled, dtype=torch.float32)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(
    X_tensor, y_tensor, test_size=0.2, random_state=42, shuffle=True
)


# --------------------- 定义Transformer模型 ---------------------
class TransformerRegressor(nn.Module):
    def __init__(self, input_dim, output_dim, num_heads=4, dim_feedforward=64, num_layers=2):
        super().__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=input_dim,
            nhead=num_heads,
            dim_feedforward=dim_feedforward,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(
            self.encoder_layer,
            num_layers=num_layers
        )
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        # x形状: (批大小, 序列长度, 特征数)
        out = self.transformer_encoder(x)  # 输出形状: (批大小, 序列长度, 特征数)
        out = self.fc(out[:, -1, :])  # 取序列最后位置输出
        return out


# 初始化模型
input_dim = X_train.shape[2]  # 输入特征数
model = TransformerRegressor(input_dim=input_dim, output_dim=1)
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()

# --------------------- 训练模型 ---------------------
train_losses = []
val_losses = []
num_epochs = 100
batch_size = 32

# 转换为数据集加载器
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

for epoch in range(num_epochs):
    model.train()
    epoch_loss = 0
    for X_batch, y_batch in train_loader:
        optimizer.zero_grad()
        y_pred = model(X_batch)
        loss = criterion(y_pred, y_batch)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.item()

    # 验证损失
    model.eval()
    with torch.no_grad():
        val_pred = model(X_train)
        val_loss = criterion(val_pred, y_train)

    train_losses.append(epoch_loss / len(train_loader))
    val_losses.append(val_loss.item())

    if (epoch + 1) % 10 == 0:
        print(f"Epoch {epoch + 1}/{num_epochs}, Train Loss: {train_losses[-1]:.4f}, Val Loss: {val_losses[-1]:.4f}")

# --------------------- 评估测试集 ---------------------
model.eval()
with torch.no_grad():
    y_pred_scaled = model(X_test).numpy()

# 反标准化
y_pred = scaler_y.inverse_transform(y_pred_scaled)
y_test_original = scaler_y.inverse_transform(y_test.numpy())

# 输出评估结果
print(f"\nRMSE: {mean_squared_error(y_test_original, y_pred, squared=False):.4f}")
print(f"MAE: {mean_absolute_error(y_test_original, y_pred):.4f}")

# --------------------- 可视化训练曲线（可选） ---------------------
# import matplotlib.pyplot as plt
#
# plt.plot(train_losses, label='Training Loss')
# plt.plot(val_losses, label='Validation Loss')
# plt.xlabel('Epoch')
# plt.ylabel('MSE Loss')
# plt.legend()
# plt.show()