import torch
import torch.nn as nn
import joblib
import pandas as pd


# 定义LSTM模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
        self.linear = nn.Linear(hidden_size, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.linear(out[-1])  # 只取最后一个时间步的输出，以此来实现所有时间序列的信息融合
        return out


# 初始化模型
input_size = 9  # 特征数量
hidden_size = 16  # LSTM隐藏层单元数量
num_layers = 2  # LSTM层数
model = LSTMModel(input_size, hidden_size, num_layers)

# 加载之前保存的模型
model_path = 'model.pth'
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))

print("Model loaded from", model_path)

# 在测试数据时加载实例
loaded_feature_scaler = joblib.load('feature_scaler.pkl')
loaded_target_scaler = joblib.load('target_scaler.pkl')

# 读取测试数据
test_data = pd.read_csv('./data-process/data/valid.csv')

# 提取特征列
test_features = test_data.iloc[:, 1:-1].values


# 使用加载的实例进行归一化
normalized_test_features = loaded_feature_scaler.transform(test_features)

# 转换为PyTorch张量
normalized_test_features_tensor = torch.tensor(normalized_test_features, dtype=torch.float32)

# 设置模型为评估模式（不使用 dropout 和 batch normalization）
model.eval()
prediction = model(normalized_test_features_tensor)
print(prediction)

# 反归一化预测结果
denormalized_prediction = loaded_target_scaler.inverse_transform(prediction.detach().numpy().reshape(-1, 1))

print("Denormalized Prediction:", denormalized_prediction)
