import itertools
import re
import json
import jsonlines
import psutil
import ujson
import numpy as np
import pandas as pd
from transformers import AutoTokenizer
from datasets import load_dataset
import os
from tqdm import tqdm

bos_token = "<s>"
eos_token = "</s>"

tokenizer = AutoTokenizer.from_pretrained("model", use_fast=False)
print(f'加载的tokenierzer词表大小: {len(tokenizer)}')

import torch
from LMConfig import LMConfig
from model import Transformer

config = LMConfig()
model = Transformer(config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device=device)
print(model)

model.load_state_dict(torch.load("out/pretrain_512_moe.pth", map_location="cuda:0"), strict=False)
model.eval()

# 准备输入文本
input_text = "长江是"
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)

num_tokens_to_generate = 10
generated_tokens = []
with torch.no_grad():
    for i in range(num_tokens_to_generate):
        print(tokenizer.decode(input_ids[0]))
        output = model(input_ids)
        next_token = output.logits.argmax(dim=-1)[:, -1]
        generated_tokens.append(next_token.item())
        input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=1)

generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
print(generated_text)


# 准备输入文本
input_text = "长江、黄"
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)

# 生成多个 token
num_tokens_to_generate = 10  # 要生成的 token 数量
generated_tokens = []

with torch.no_grad():
    for _ in range(num_tokens_to_generate):
        output = model(input_ids)
        next_token = output.logits.argmax(dim=-1)[:, -1]  # 获取最后一个 token 的预测
        generated_tokens.append(next_token.item())  # 将 token ID 添加到列表中
        input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=1)  # 将新 token 添加到输入中

# 将生成的 token IDs 转换为文本
generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)

# 打印最终回复
print(generated_text)