import os

import torch

from model import GPT, GPTConfig
from train import tokenizer

device="mps"
out_dir="out"


init_from = "resume"
if init_from == 'resume':
    # init from a model saved in a specific directory
    ckpt_path = os.path.join(out_dir, 'ckpt.pt')
    checkpoint = torch.load(ckpt_path, map_location=device)
    gptconf = GPTConfig(**checkpoint['model_args'])
    model = GPT(gptconf)
    state_dict = checkpoint['model']
    unwanted_prefix = '_orig_mod.'
    for k, v in list(state_dict.items()):
        if k.startswith(unwanted_prefix):
            state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
    model.load_state_dict(state_dict)


model.eval()
model.to(device)

max_new_tokens = 12

start_ids = tokenizer.encode("女人")
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
temperature = 1  # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
top_k = 12  # retain only the top_k most likely tokens, clamp others to have 0 probability
# run generation
with torch.no_grad():
    for k in range(1):
        y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
        tolist = y[0].tolist()
        tokenizer_decode = tokenizer.decode(tolist)
        print(tokenizer_decode)

if __name__ == '__main__':
    print("==========over==========")
