import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt

# 设置随机种子以确保可重复性
np.random.seed(42)
torch.manual_seed(42)

# 生成合成时间序列数据
t = np.linspace(0, 100, 1000)
data = np.sin(0.1 * t) + np.random.normal(0, 0.1, 1000)  # 正弦波 + 噪声

# 数据预处理：创建序列数据
def create_sequences(data, seq_length):
    X, y = [], []
    for i in range(len(data) - seq_length):
        X.append(data[i:i + seq_length])
        y.append(data[i + seq_length])
    return np.array(X), np.array(y)

seq_length = 10  # 序列长度
X, y = create_sequences(data, seq_length)

# 转换为 PyTorch 张量
X = torch.FloatTensor(X).unsqueeze(-1)  # 形状 [样本数, seq_length, 1]
y = torch.FloatTensor(y).unsqueeze(-1)  # 形状 [样本数, 1]

# 划分训练集和测试集
train_size = int(0.8 * len(X))
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

# 定义 LSTM 模型
class LSTMRegressor(nn.Module):
    def __init__(self, input_size=1, hidden_size=50, num_layers=1):
        super(LSTMRegressor, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, 1)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        out, _ = self.lstm(x, (h0, c0))
        out = self.fc(out[:, -1, :])  # 取最后一个时间步的输出
        return out

# 实例化模型
model = LSTMRegressor()
criterion = nn.MSELoss()  # 均方误差损失
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    outputs = model(X_train)
    optimizer.zero_grad()
    loss = criterion(outputs, y_train)
    loss.backward()
    optimizer.step()
    if (epoch + 1) % 20 == 0:
        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")

# 测试预测
model.eval()
with torch.no_grad():
    y_pred_train = model(X_train).numpy()
    y_pred_test = model(X_test).numpy()

# 计算测试集的平均绝对误差 (MAE)
mae = np.mean(np.abs(y_test.numpy() - y_pred_test))
print(f"Test Set Mean Absolute Error (MAE): {mae:.3f}")

# 计算预测区间（基于残差的简单近似）
residuals = y_train.numpy() - y_pred_train  # 训练集残差
sigma = np.std(residuals)  # 残差标准差
confidence_level = 1.96  # 95% 置信区间对应的 z 值 (正态分布假设)
lower_bound = y_pred_test - confidence_level * sigma
upper_bound = y_pred_test + confidence_level * sigma

# 输出测试集第一个点的预测区间示例（修复格式化错误）
print(f"Test Point 1 - Predicted Value: {y_pred_test[0].item():.3f}, "
      f"95% Prediction Interval: [{lower_bound[0].item():.3f}, {upper_bound[0].item():.3f}]")

# 可视化
plt.figure(figsize=(12, 6))

# 绘制训练集的真实值和预测值
plt.plot(range(seq_length, seq_length + len(y_train)), y_train.numpy(),
         color='blue', label='Training True Values', alpha=0.5)
plt.plot(range(seq_length, seq_length + len(y_pred_train)), y_pred_train,
         color='red', label='Training Predicted Values')

# 绘制测试集的真实值和预测值
test_time = range(seq_length + len(y_train), len(data))
plt.plot(test_time, y_test.numpy(), color='green', label='Test True Values', alpha=0.5)
plt.plot(test_time, y_pred_test, color='orange', label='Test Predicted Values')

# 绘制测试集的95%预测区间
plt.fill_between(test_time, lower_bound.flatten(), upper_bound.flatten(),
                 color='orange', alpha=0.2, label='95% Prediction Interval')

# 添加标签和图例（英文）
plt.xlabel('Time Step')  # 横坐标
plt.ylabel('Value')      # 纵坐标
plt.title('LSTM Regression: True vs Predicted Values with 95% Prediction Interval')
plt.legend()
plt.grid(True)

# 显示图像
plt.show()