import torch
import torch.nn as nn
import torch.optim as optim
import re

# 预定义一些中文诗数据
file_path = "C:\\Users\\LENOVO\\Desktop\\wuyan.txt"
with open(file_path, 'r', encoding='utf-8') as f:
    text = f.read()

# 数据预处理
def preprocess_text(text):
    text = text.replace('\n', '')
    text = re.sub(r'[^\u4e00-\u9fff]+', '', text)
    return text


text = preprocess_text(text)
chars = sorted(list(set(text)))
char_to_idx = {char: idx for idx, char in enumerate(chars)}
idx_to_char = {idx: char for char, idx in char_to_idx.items()}
vocab_size = len(chars)

print(f"Total characters: {len(text)}")
print(f"Vocabulary size: {vocab_size}")


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=2):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, x, hidden):
        lstm_out, hidden = self.lstm(x, hidden)
        output = self.fc(lstm_out[:, -1, :])
        output = self.softmax(output)
        return output, hidden

    def init_hidden(self, batch_size):
        weight = next(self.parameters()).data
        hidden = (weight.new(self.num_layers, batch_size, self.hidden_size).zero_(),
                  weight.new(self.num_layers, batch_size, self.hidden_size).zero_())
        return hidden


def prepare_data(text, seq_length):
    inputs = []
    targets = []
    for i in range(0, len(text) - seq_length, 1):
        seq_in = text[i:i + seq_length]
        seq_out = text[i + seq_length]
        inputs.append([char_to_idx[char] for char in seq_in])
        targets.append(char_to_idx[seq_out])
    return inputs, targets


seq_length = 10
inputs, targets = prepare_data(text, seq_length)

# Convert to tensors
inputs = torch.tensor(inputs, dtype=torch.long)
targets = torch.tensor(targets, dtype=torch.long)

batch_size = 64
input_size = vocab_size
hidden_size = 256
output_size = vocab_size
num_epochs = 50
learning_rate = 0.003

model = LSTMModel(input_size, hidden_size, output_size)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(num_epochs):
    h = model.init_hidden(batch_size)
    total_loss = 0

    for i in range(0, len(inputs), batch_size):
        x = inputs[i:i + batch_size]
        y = targets[i:i + batch_size]
        if x.size(0) != batch_size:
            continue
        x = nn.functional.one_hot(x, num_classes=vocab_size).float()

        output, h = model(x, h)
        loss = criterion(output, y)

        optimizer.zero_grad()
        loss.backward()

        optimizer.step()

        total_loss += loss.item()

    print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {total_loss / len(inputs):.4f}")


def generate_text(model, start_str, length=15):
    model.eval()
    with torch.no_grad():
        input_eval = torch.tensor([char_to_idx[char] for char in start_str], dtype=torch.long).unsqueeze(0)
        input_eval = nn.functional.one_hot(input_eval, num_classes=vocab_size).float()
        h = model.init_hidden(1)
        predicted_text = start_str

        for _ in range(length):
            output, h = model(input_eval, h)
            prob = torch.softmax(output, dim=1).data
            predicted_idx = torch.multinomial(prob, num_samples=1).item()
            predicted_char = idx_to_char[predicted_idx]
            predicted_text += predicted_char

            input_eval = torch.tensor([[predicted_idx]], dtype=torch.long)
            input_eval = nn.functional.one_hot(input_eval, num_classes=vocab_size).float()

        return predicted_text


def format_text(text):
    # 按照每5个字切分句子
    sentences = [text[i:i + 5] for i in range(0, len(text), 5)]

    # 格式化每两句加一个句号，否则加逗号
    formatted_text = ""
    for i, sentence in enumerate(sentences):
        if (i + 1) % 2 == 0:  # 每两句
            formatted_text += sentence + "。"
        else:
            formatted_text += sentence + "，"

    # 如果最后一个句子已经有逗号而不是句号，我们可以去掉最后的逗号
    if formatted_text.endswith("，"):
        formatted_text = formatted_text[:-1]

    return formatted_text

start_string = "君看一叶舟"
generated_text = generate_text(model, start_string, length=15)
generated_text = format_text(generated_text)
print(generated_text)