import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import joblib
import matplotlib.pyplot as plt
from Config import Config


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1, dropout=0.2):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        out = self.fc(lstm_out)
        return out


# 加载模型权重
input_size = 14
hidden_size = Config.hidden_size
output_size = 1
num_layers = Config.num_layers
dropout = Config.dropout
print("in:", input_size, "\nout:", output_size)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备选择

model = LSTMModel(input_size, hidden_size, output_size, num_layers, dropout)
model_path = 'lstm_model.pth'
model.load_state_dict(torch.load(model_path))
model = model.to(device)  # 将模型移动到 GPU 或 CPU

model.eval()  # 设置为评估模式


# 数据预处理函数
def preprocess_new_data(file_path, scaler, sequence_length=10):
    # Load new data
    data = pd.read_csv(file_path)

    # Isolate features (excluding 'ID', 'time', 'Y', and 'S')
    feature_columns = data.columns.difference(['ID', 'time', 'Y', 'S'])
    features = data[feature_columns]
    # features = data.drop(columns=['ID', 'time', 'Y', 'S'], errors='ignore')

    # Normalize features with the provided scaler
    features_normalized = pd.DataFrame(scaler.transform(features), columns=features.columns)

    # Prepare sliding window sequences (e.g., each 10 consecutive timesteps as one sample)
    sequences = []
    true_values = []  # 保存真实值
    for i in range(len(features_normalized) - sequence_length + 1):
        sequence = features_normalized.iloc[i:i + sequence_length].values
        true_value = data['Y'].iloc[i:i + sequence_length].values  # 提取真实值
        sequences.append(sequence)
        true_values.append(true_value)

    # 转换为张量并移动到设备
    sequences_tensor = torch.tensor(np.array(sequences), dtype=torch.float32).to(device)
    true_values_tensor = torch.tensor(np.array(true_values), dtype=torch.float32).to(device)
    return sequences_tensor, true_values_tensor


# 加载归一化器
scaler = joblib.load('scaler.pkl')

# 模型预测
with torch.no_grad():
    new_data_tensor, true_values_tensor = preprocess_new_data(file_path='./load.csv', scaler=scaler, sequence_length=15)
    predictions = model(new_data_tensor)
    predicted_values = predictions.cpu().numpy()  # 将结果移动到 CPU
    true_values = true_values_tensor.cpu().numpy()  # 将真实值移动到 CPU

# 绘制预测结果
num_samples_to_plot = min(10, len(predicted_values))  # 最多绘制 10 个样本
fig, axes = plt.subplots(num_samples_to_plot, 1, figsize=(10, num_samples_to_plot * 3), squeeze=False)

for i in range(num_samples_to_plot):
    axes[i, 0].plot(true_values[i], label="True Value", color='blue')  # 绘制真实值
    axes[i, 0].plot(predicted_values[i], label="Predicted Value", linestyle='--', color='orange')  # 绘制预测值
    axes[i, 0].set_title(f"Sample {i + 1}")
    axes[i, 0].set_xlabel("Time Step")
    axes[i, 0].set_ylabel("Value")
    axes[i, 0].legend()

plt.tight_layout()
plt.show()
