import torch
import torch.nn as nn
import numpy as np

# 1. 生成示例数据
# 假设我们有一个简单的时间序列：y = sin(x)
def generate_data(seq_length, num_samples):
    x = np.linspace(0, 100, num_samples)
    y = np.sin(x)
    data = []
    for i in range(len(y) - seq_length):
        data.append(y[i:i+seq_length+1])  # 包括输入和目标值
    data = np.array(data)
    return data[:, :-1], data[:, -1]  # 前 seq_length 个作为输入，最后一个作为目标

# 参数
seq_length = 10  # 每次用 10 个时间步预测下一个
num_samples = 1000
X, y = generate_data(seq_length, num_samples)

# 调整输入数据形状 (samples, timesteps, features)
X = X.reshape(X.shape[0], X.shape[1], 1)  # 每个时间步只有 1 个特征
y = y.reshape(-1, 1)

# 转换为 PyTorch 张量
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)

# 2. 定义 LSTM 模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)  # 全连接层，用于输出预测值

    def forward(self, x):
        # 初始化隐藏状态和细胞状态
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)  # (num_layers, batch_size, hidden_size)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)  # (num_layers, batch_size, hidden_size)

        # LSTM 前向传播
        out, _ = self.lstm(x, (h0, c0))  # out: (batch_size, seq_length, hidden_size)

        # 取最后一个时间步的输出
        out = self.fc(out[:, -1, :])  # (batch_size, output_size)
        return out

# 模型参数
input_size = 1   # 每个时间步的特征数
hidden_size = 50 # LSTM 隐藏层的单元数
output_size = 1  # 输出一个值
num_layers = 1   # LSTM 层数

model = LSTMModel(input_size, hidden_size, output_size, num_layers)

# 3. 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# 4. 训练模型
num_epochs = 20
batch_size = 32

for epoch in range(num_epochs):
    for i in range(0, len(X), batch_size):
        x_batch = X[i:i+batch_size]
        y_batch = y[i:i+batch_size]

        # 前向传播
        outputs = model(x_batch)
        loss = criterion(outputs, y_batch)

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch+1) % 5 == 0:
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

# 5. 测试模型
test_input = X[:5]  # 取前 5 个样本作为测试
predictions = model(test_input)

print("输入形状：", test_input.shape)
print("预测输出：", predictions)
