﻿import torch  # 导入 PyTorch 库，用于构建和训练深度学习模型
from torch import nn  # 从 PyTorch 中导入神经网络模块
from torchtext.datasets import PennTreebank  # 导入 PennTreebank 数据集模块
from torchtext.vocab import build_vocab_from_iterator  # 用于从迭代器构建词汇表的工具函数
from torchtext.data.utils import get_tokenizer  # 用于获取分词器的工具函数
from torch.utils.data import DataLoader, Dataset  # 用于批量加载数据和定义数据集类

#定义超参数
BATCH_SIZE=32
NUM_STEPS=35
EMBEDING_DIM = 100
HIDDEN_SIZE = 128
LEARNING_RATE = 0.001
NUM_EPOCHS = 2

class PTBdataset(Dataset):
    def __init__(self,data,vocab,tokenizer,num_steps):
        self.data = [torch.tensor([vocab[token] for token in tokenizer(line)]) for line in data]
        self.num_steps = num_steps  #每个序列的长度
        self.vocab = vocab #初始化词汇表

    def __len__(self):
        return len(self.data)
    
    def __getitem__(self,idx):
        tokens = self.data[idx] # 获取当前样本的token
        if len(tokens) >= self.num_steps:
            tokens = tokens[:self.num_steps]
        elif len(tokens) < self.num_steps:
            tokens = torch.cat([tokens,torch.full((self.num_steps-len(tokens),),self.vocab['<pad>'])])  #如果长度不够，用pad补齐
        return tokens[:-1],tokens[1:]  #返回当前样本去掉'<bos>', '<eos>

def data_loader(batch_size,num_steps):
    train_iter,test_iter,valid_item = PennTreebank(root='./data',split=('train','test','valid'))
    #转换为list
    train_data = list(train_iter)
    test_data = list(test_iter)
    valid_data = list(valid_item)

    #分词器
    tokenizer = get_tokenizer('basic_english')

    #构造迭代器，取出分词
    def yield_tokens(data_iter):
        for line in data_iter:
            yield tokenizer(line) 
    vocab = build_vocab_from_iterator(yield_tokens(train_data), specials=['<unk>','<pad>', '<bos>', '<eos>']) #构建词汇表,并标记处未知、空格、开始、结束
    vocab.set_default_index(vocab['<unk>']) #设置默认索引

    ## 用PTBdataset，封装数据
    train_datastset = PTBdataset(train_data,vocab,tokenizer,num_steps)
    test_datastset = PTBdataset(test_data,vocab,tokenizer,num_steps)
    valid_datastset = PTBdataset(valid_data,vocab,tokenizer,num_steps)

    # 用dataloader分批打包
    train_loader = DataLoader(train_datastset,batch_size,shuffle=True)
    test_loader = DataLoader(test_datastset,batch_size=batch_size)
    valid_loader = DataLoader(valid_datastset,batch_size=batch_size)

    return train_loader,test_loader,valid_loader,vocab

class RNNmodel(nn.Module):
    def __init__(self,vocab_size,embed_size,hidden_size):
        super(RNNmodel,self).__init__()
        #第一层复制训练优化词嵌入层的权重
        self.embedding = nn.Embedding(vocab_size,embed_size)
        #第二层定义RNN层
        self.rnn = nn.RNN(embed_size,hidden_size)
        #第三层全连接
        self.fc = nn.Linear(hidden_size,vocab_size)
    
    def forward(self,x,hidden_state=None):
        x = self.embedding(x)
        out,hidden_state = self.rnn(x,hidden_state)
        out = self.fc(out)
        return out,hidden_state
    
def train(model,train_loader,optimizer,criterion,device,num_epochs,vocab):
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for input,target in train_loader:
            input,target = input.to(device),target.to(device)
           
            optimizer.zero_grad()
            logits, _= model(input)
            loss = criterion(logits.view(-1,len(vocab)),target.view(-1))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print(f"epoch{epoch+1}, loss: {total_loss/len(train_loader):.4f}")
      
def evaluate(model,valid_loader,criterion,device,vocab):
    model.eval()
    total_loss = 0
    with torch.no_grad():
        for input,target in valid_loader:
            input,target = input.to(device),target.to(device)
            logits,_ = model(input)
            loss = criterion(logits.view(-1,len(vocab)),target.view(-1))
            total_loss += loss.item()
    return total_loss/len(valid_loader)

# 计算困惑度
def calculate_PPL(loss):
    return torch.exp(torch.tensor(loss))



def generate_text(model, input_ids, vocab, max_len=20):
    """
    根据输入生成句子。
    
    参数：
        model: 训练好的语言模型
        input_ids: 输入的词 ID 序列
        vocab: 词汇表对象
        max_len: 生成句子的最大长度
    
    返回：
        生成的词 ID 列表
    """
    model.eval()
    generated = input_ids   #输入的词 ID 序列
    hidden = None  # 初始化隐藏状态（RNN 不需要额外初始化方法）
    with torch.no_grad():
        for _ in range(max_len):    #生成句子的最大长度
            output, hidden = model(generated[:, -1:], hidden)  # 使用最后一个词作为输入
            next_token = torch.argmax(output, dim=-1)  # 获取概率最高的单词
            generated = torch.cat((generated, next_token), dim=1)  # 拼接到生成序列
    return generated.squeeze(0).tolist()

def test_and_generate(model, test_loader, vocab, device, max_len=20, num_samples=5):
    """
    测试模型并生成句子。
    
    参数：
        model: 训练好的语言模型
        test_loader: 测试数据加载器
        vocab: 词汇表对象
        device: 设备（CPU 或 GPU）
        max_len: 生成句子的最大长度
        num_samples: 测试的样本数
    """
    model.eval()  # 设置模型为评估模式
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs = inputs.to(device)
            
            # 遍历样本，进行生成测试
            for i in range(min(num_samples, inputs.size(0))):
                input_ids = inputs[i].unsqueeze(0)  # 取单个句子
                original_sentence = ' '.join([vocab.get_itos()[idx] for idx in input_ids[0].tolist()])
                
                # 生成新句子
                generated_ids = generate_text(model, input_ids[:, :1], vocab, max_len=max_len)
                generated_sentence = ' '.join([vocab.get_itos()[idx] for idx in generated_ids])
                
                # 打印结果
                print(f"\nSample {batch_idx * num_samples + i + 1}:")
                print(f"Original Sentence : {original_sentence}")
                print(f"Generated Sentence: {generated_sentence}")
            
            # 只测试一个批次即可
            break


def main ():
    # step 1: 加载数据
    print('step 1: 加载数据')
    train_loader,test_loader,valid_loader,vocab = data_loader(BATCH_SIZE,NUM_STEPS)
    # step 2: 定义模型
    print('step 2 加载模型')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RNNmodel(len(vocab),EMBEDING_DIM,HIDDEN_SIZE).to(device)

    # step 3: 定义梯度下降优化
    criterion = nn.CrossEntropyLoss(ignore_index = vocab['<pad>']) #
    optimizer = torch.optim.Adam(model.parameters(),lr=LEARNING_RATE)
    # step 4：训练
    print('step 4 trainning.......')
    train(model,train_loader,optimizer,criterion,device,NUM_EPOCHS,vocab)
    # step 5：评估
    print('step 5 evaluating.......')
    valid_loss = evaluate(model,valid_loader,criterion,device,vocab)
    print(f'valid loss: {valid_loss:.4f}')
    # step 6 ：测试（使用困惑度）
    print('step 6 testing.......')
    test_loss = evaluate(model,test_loader,criterion,device,vocab)

    # step 7：打印结果
    print(f'test loss: {test_loss:.4f}')
    print(f'PPL: {calculate_PPL(test_loss):.4f}')


    # step 8 ：输入与输出测试
    test_and_generate(model,test_loader,vocab,device)


if __name__ == "__main__":
    main()

