import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

# 准备数据
text = "hello world"
chars = sorted(list(set(text)))
char_to_idx = {ch: idx for idx, ch in enumerate(chars)}
idx_to_char = {idx: ch for idx, ch in enumerate(chars)}
input_size = len(chars)
hidden_size = 8  # 隐藏层神经元数量
output_size = len(chars)
sequence_length = 4  # 每次输入4个字符


def char_tensor(char):
    tensor = torch.zeros(input_size)
    tensor[char_to_idx[char]] = 1.0
    return tensor


def char_seq_tensor(seq):
    tensor = torch.zeros(sequence_length, input_size)
    for i, char in enumerate(seq):
        tensor[i] = char_tensor(char)
    return tensor


# 定义模型
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x, h):
        out, h = self.rnn(x, h)
        out = self.fc(out[:, -1, :])  # 取最后一个时间步的输出
        return out, h


# 初始化模型、损失函数和优化器
model = SimpleRNN(input_size, hidden_size, output_size)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# 训练模型
num_epochs = 500
for epoch in range(num_epochs):
    for i in range(len(text) - sequence_length):
        input_seq = text[i:i + sequence_length]
        target_seq = text[i + 1:i + sequence_length + 1]

        input_tensor = char_seq_tensor(input_seq).unsqueeze(0)
        target_tensor = torch.tensor([char_to_idx[char] for char in target_seq])

        h = torch.zeros(1, 1, hidden_size)

        output, h = model(input_tensor, h)
        loss = criterion(output, target_tensor[-1].unsqueeze(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')


# 文本生成
def generate_text(model, start_str, length):
    model.eval()
    input_seq = start_str
    generated_str = start_str
    h = torch.zeros(1, 1, hidden_size)

    for _ in range(length):
        input_tensor = char_seq_tensor(input_seq[-sequence_length:]).unsqueeze(0)
        output, h = model(input_tensor, h)
        prob = nn.functional.softmax(output[-1], dim=0).data
        char_idx = torch.max(prob, dim=0)[1].item()
        char = idx_to_char[char_idx]
        generated_str += char
        input_seq += char

    return generated_str


# 使用训练好的模型生成新文本
start_str = "hell"
generated_text = generate_text(model, start_str, 1)
print("Generated Text:", generated_text)
