# encoding=utf-8
import torch
import torch.nn as nn

# 彩虹屁生成模型
class RainbowComplimentGenerator(nn.Module):
    def __init__(self, vocab_size, embedding_size, hidden_size, num_layers):
        super(RainbowComplimentGenerator, self).__init__()

        self.embedding = nn.Embedding(vocab_size, embedding_size)
        self.lstm = nn.LSTM(embedding_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, vocab_size)

    def forward(self, input, hidden):
        embedded = self.embedding(input)
        output, hidden = self.lstm(embedded, hidden)
        output = self.fc(output[:, -1, :])
        return output, hidden

    def init_hidden(self, batch_size):
        return (torch.zeros(self.num_layers, batch_size, self.hidden_size),
                torch.zeros(self.num_layers, batch_size, self.hidden_size))

# 超参数
vocab_size = 10000  # 词汇表大小
embedding_size = 128  # 词向量维度
hidden_size = 256  # LSTM隐藏层大小
num_layers = 2  # LSTM层数
length = 10  # 彩虹屁的长度

# 加载训练好的模型
model = RainbowComplimentGenerator(vocab_size, embedding_size, hidden_size, num_layers)
model.load_state_dict(torch.load('rainbow_compliment_model.pth'))

# 生成彩虹屁
input = torch.randint(0, vocab_size, (1, 1))
hidden = model.init_hidden(1)
output_str = ""

for i in range(length):
    output, hidden = model(input, hidden)
    _, topi = output.topk(1)
    topi = topi.squeeze(1)
    if topi.item() == 0:
        break
    output_str += str(topi.item()) + " "
    input = topi.unsqueeze(0)

print(output_str)
