# encoding=utf-8
import torch
import torch.nn.functional as F
import string
from torch import nn

# 用于文本清理的标点符号
punctuation = string.punctuation


class RNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size
        self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
        self.i2o = nn.Linear(input_size + hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input_tensor, hidden_tensor):
        combined = torch.cat((input_tensor, hidden_tensor), 1)
        hidden = self.i2h(combined)
        output = self.i2o(combined)
        output = self.softmax(output)
        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, self.hidden_size)


def generate_text(model, initial_string, length=100, temperature=0.8):
    hidden = model.init_hidden()
    initial_input = torch.zeros(1, input_size)
    initial_input[:, char_to_index[initial_string[0]]] = 1

    # Use initial input to "prime" the model
    output, hidden = model(initial_input, hidden)

    # Use temperature to control the "creativity" of the model
    output_dist = output.data.view(-1).div(temperature).exp()
    top_i = torch.multinomial(output_dist, 1)[0]

    # Use the top_i character as the next input
    input_char = index_to_char[top_i]
    initial_string += input_char

    # Use the model to generate the rest of the text
    for i in range(length - 1):
        input_tensor = torch.zeros(1, input_size)
        input_tensor[:, char_to_index[input_char]] = 1
        output, hidden = model(input_tensor, hidden)
        output_dist = output.data.view(-1).div(temperature).exp()
        top_i = torch.multinomial(output_dist, 1)[0]
        input_char = index_to_char[top_i]
        initial_string += input_char
    return initial_string


# 读取文本文件
with open("text_file.txt", "r", encoding= 'utf-8') as f:
    text = f.read()

# 清理文本数据
text = text.lower()
text = ''.join([c for c in text if c not in punctuation])

# 创建字符映射表
chars = list(set(text))
char_to_index = {ch: i for i, ch in enumerate(chars)}
index_to_char = {i: ch for i, ch in enumerate(chars)}

# 将文本转换为数字
input_data = [char_to_index[ch] for ch in text]

# 定义模型超参数
input_size = len(chars)
hidden_size = 128
output_size = len(chars)
learning_rate = 0.001

# 创建模型和优化器
model = RNN(input_size, hidden_size, output_size)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
num_epochs = 1000
batch_size = 100
seq_length = 50
for epoch in range(num_epochs):
    # 随机选择一个起始位置
    start_index = torch.randint(0, len(input_data) - seq_length - 1, (1,))
    input_seq = input_data[start_index:start_index + seq_length]
    target_seq = input_data[start_index + 1:start_index + seq_length + 1]

    # 将数据转换为张量
    input_tensor = torch.zeros(seq_length, batch_size, input_size)
    target_tensor = torch.zeros(seq_length, batch_size).long()
    for i in range(seq_length):
        for j in range(batch_size):
            input_tensor[i, j, input_seq[i + j]] = 1
            target_tensor[i, j] = target_seq[i + j]

    # 将数据输入到模型中进行训练
    hidden = model.init_hidden()
    model.zero_grad()
    loss = 0
    for i in range(seq_length):
        output, hidden = model(input_tensor[i], hidden)
        loss += F.nll_loss(output.squeeze(0), target_tensor[i])

    loss.backward()
    optimizer.step()

    # 输出损失值
    if (epoch + 1) % 100 == 0:
        print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item()))

# 生成一段长度为 100 个字符的文本
generated_text = generate_text(model, "The quick brown fox ", length=100)

print(generated_text)
