import torch
from transformers import AutoTokenizer

from JiuTian import LMConfig
from JiuTian import Transformer

tokenizer = AutoTokenizer.from_pretrained('model/mateconv_tokenizer', use_fast=False)


lm_config = LMConfig()
model = Transformer(lm_config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 检查模型结构和参数
#print(model)

# 加载模型权重【这里要修改为你的模型地址】
model.load_state_dict(torch.load('out/pretrain_768.pth', map_location=device))
model.eval()  # 切换到评估模式

while True:
    input_text = input("请输入问题：")
    if input_text == 'exit':
        break

    # 准备输入文本
    #input_text = "长江、"
    input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)

    # 生成多个 token
    num_tokens_to_generate = 22  # 要生成的 token 数量
    generated_tokens = []

    with torch.no_grad():
        for _ in range(num_tokens_to_generate):
            output = model(input_ids)
            next_token = output.logits.argmax(dim=-1)[:, -1]  # 获取最后一个 token 的预测
            generated_tokens.append(next_token.item())  # 将 token ID 添加到列表中
            input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=1)  # 将新 token 添加到输入中

    # 将生成的 token IDs 转换为文本
    generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)

    # 打印最终回复
    print(generated_text)