import torch
import re
from torch import nn , optim
from torch.utils.data import Dataset,DataLoader

def preprocess(filepath):
    poem_list=[]
    char_set=set()
    #从文件读取
    with open(filepath,'r',encoding='utf-8') as f:
        for line in f:
            #去除标点符号
            line=re.sub(r'[，。？！、：]','',line)
            line=line.strip()
            #原诗保持到poem_list
            poem_list.append(list(line))
            #更新字符集合
            char_set.update(list(line))
    #1.2构建词表
    vocab = list(char_set)
    vocab.append('<PAD>')
    word2id = {word: idx for idx, word in enumerate(vocab)}
    # print(word2id['山'])
    #1.3诗转成序列
    id_sequences=[]
    for poem in poem_list:
        id_seq = [word2id.get(word) for word in poem]
        id_sequences.append(id_seq)
    # print(id_sequences[0])
    return id_sequences,vocab,word2id

id_sequences,vocab,word2id=preprocess('../dataset/poems.txt')

class PoetryDataset(Dataset):
    def __init__(self,sequences,seq_len):
        self.seq_len=seq_len
        self.data=[]
        for seq in sequences:
            for i in range(0,len(seq)-seq_len):
                self.data.append((seq[i:i+seq_len],seq[i+1:i+seq_len+1]))
    def __len__(self):
        return len(self.data)
    def __getitem__(self,idx):
        x=torch.LongTensor(self.data[idx][0])
        y=torch.LongTensor(self.data[idx][1])
        return x,y
dataset = PoetryDataset(id_sequences,seq_len=24)
# print(len(dataset))        
# print(dataset.__getitem__(0))

class PoetryRNNLM(nn.Module):
    def __init__(self,vocab_size,embedding_dim=128,hidden_dim=256,num_layers=1):
        super().__init__()
        self.embedding=nn.Embedding(vocab_size,embedding_dim)
        self.rnn=nn.RNN(embedding_dim,hidden_dim,num_layers)
        self.linear=nn.Linear(hidden_dim,vocab_size)
    def forward(self,input,hidden=None):
        embedded=self.embedding(input)
        output,hn=self.rnn(embedded,hidden)
        output=self.linear(output)
        return output,hn

model = PoetryRNNLM(vocab_size=len(vocab),embedding_dim=256,hidden_dim=512,num_layers=2)

def train(model,dataset,epochs,batch_size,lr,device):
    model.train()
    model.to(device)
    dataloader = DataLoader(dataset,batch_size=batch_size,shuffle=True)
    loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),lr=lr)
    for epoch in range(epochs):
        loss_total = 0
        for batch_idx,(x,y) in enumerate(dataloader):
            x,y = x.to(device),y.to(device)
            output,hn = model(x)
            loss_value = loss(output.transpose(1,2),y) #
            optimizer.zero_grad()
            loss_value.backward()
            optimizer.step()
            loss_total += loss_value.item()*x.shape[0]
            print(f"\repoch:{epoch:0>2}[{'='*(int((batch_idx+1) / len(dataloader) * 50)):<50}]", end="")
        print(f' epoch:{epoch:0>2}, loss:{loss_total/len(dataset):.4f}')
# train(model,dataset,epochs=20,batch_size=32,lr=0.001,device=torch.device('cpu'))

def generate_poem(model, word2idx, vocab, start_token, line_num=4, line_length=7):
    model.eval()  # 设置为预测模式
    poem = []  # 记录生成结果
    current_line_length = line_length  # 当前句的剩余长度
    start_token = word2idx.get(start_token, word2idx["<PAD>"])  # 起始token 不在词表使用pad
    # 如果起始token在词典中，添加到结果中
    if start_token != word2idx["<PAD>"]:
        poem.append(vocab[start_token])
        current_line_length -= 1
    input = torch.LongTensor([[start_token]]) #1x1
    # print( input.shape)
    hidden = None  # 初始化隐状态
    with torch.no_grad():  # 关闭梯度计算
        for _ in range(line_num):  # 生成的行数
            for interpunction in ["，", "。\n"]:  # 每行两句
                while current_line_length > 0:  # 每句诗line_length个字
                    output, hidden = model(input, hidden)
                    # print(output.shape)
                    prob = torch.softmax(output[0, 0], dim=-1)  # 计算概率
                    next_token = torch.multinomial(prob, 1)  # 从概率分布中随机采样
                    poem.append(vocab[next_token.item()])  # 将采样结果添加到结果中 概率高的字符被选中的可能性更大
                    input = next_token.unsqueeze(0) #1x1
                    current_line_length -= 1
                current_line_length = line_length
                poem.append(interpunction)  # 每句结尾添加标点符号
    return "".join(poem)  # 将列表转换为字符串

print(generate_poem(model, word2id, vocab, start_token="一", line_num=4, line_length=7))