import torch
import torch.nn as nn
import torch.optim as optim


# 定义 RNN 模型
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
                          num_layers=num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        # 初始化隐藏状态
        h0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(x.device)

        # RNN 前向传播
        out, _ = self.rnn(x, h0)

        # 全连接层
        out = self.fc(out)
        return out


# 模型参数
input_size = 1  # 输入特征维度
hidden_size = 16  # 隐藏层大小
output_size = 1  # 输出特征维度
num_layers = 2  # RNN 层数

# 初始化模型
model = SimpleRNN(input_size, hidden_size, output_size, num_layers)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# 输入序列：用 [0, 1, 2, 3] 预测 [1, 2, 3, 4]
X = torch.FloatTensor([[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5]]).view(3, 4, 1)  # (batch_size=3, seq_length=4, input_size=1)
y = torch.FloatTensor([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]]).view(3, 4, 1)  # 目标序列形状与输入一致

# 训练模型
num_epochs = 100

for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    output  = model(X)
    loss = criterion(output, y)
    loss.backward()
    optimizer.step()
    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")

# 测试预测结果
model.eval()
with torch.no_grad():
    test_input = torch.FloatTensor([[3, 4, 5, 6]]).view(1, 4, 1)  # 新的输入序列
    pred = model(test_input)
    print("Predicted sequence:", pred.view(-1).numpy())  # 将输出整形为一维数组
    print("Predicted next number:", pred[0, -1, 0].item())  # 预测序列最后一个值的下一个数