import torch
import torch.nn as nn
import torch.optim as optim


# 定义 LSTM 模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        # 定义 LSTM 层
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        # 定义全连接层，将 LSTM 层的输出映射到最终的输出空间
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        # 初始化隐藏状态和细胞状态
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)

        # LSTM 层前向传播
        out, _ = self.lstm(x, (h0, c0))

        # 全连接层前向传播
        # 使用 LSTM 层的最后一个时间步的输出 out[:, -1, :] 作为全连接层的输入
        out = self.fc(out[:, -1, :])
        return out


# 模型参数
input_size = 1  # 输入特征维度
hidden_size = 16  # 隐藏层大小
output_size = 1  # 输出特征维度
num_layers = 1  # LSTM 层数

# 初始化模型
model = LSTMModel(input_size, hidden_size, output_size, num_layers)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# 输入序列：用 [0, 1, 2, 3] 预测 [1, 2, 3, 4]
X = torch.FloatTensor([[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5]]).view(3, 4,
                                                                       1)  # 形状：(batch_size=3, seq_length=4, input_size=1)
y = torch.FloatTensor([[1], [2], [3]]).view(3, 1)  # 目标值形状与输出一致
print(X)
print(y)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    output = model(X)
    loss = criterion(output, y)
    loss.backward()
    optimizer.step()
    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")

# 测试预测结果
model.eval()
with torch.no_grad():
    test_input = torch.FloatTensor([[3, 4, 5, 6]]).view(1, 4, 1)  # 新的输入序列
    pred = model(test_input)
    print("Predicted value:", pred.item())  # 预测的值