import torch
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizer
saved_path = '/home/yunpeng/checkpoints/jiuding_interview_llm/2024_11_21_21_20_57_448154/checkpoint-150'
# saved_path = f'{SAVE_DIR}/checkpoint-100'

dev = torch.device('cuda:0')

model_infer = AutoPeftModelForCausalLM.from_pretrained(
    saved_path, trust_remote_code=True,
).to(dev).eval()

tokenizer_infer = AutoTokenizer.from_pretrained(
    saved_path, trust_remote_code=True
)

test_txt = '北京九鼎图业科技有限公司是'
test_inputs = tokenizer_infer(test_txt, return_tensors='pt')
test_inputs = {k: v.to(dev) for k, v in test_inputs.items()}

with torch.no_grad():
    past_key_values = None
    result = ''
    for i in range(100):
        n = i + 1
        outputs = model_infer(
            **test_inputs,
            past_key_values=past_key_values,
            use_cache=True,
        )
        print('hidden', outputs[0].shape)
        hidden = outputs[0][:, -1, :]
        past_key_values = outputs[1]
        # print('past_key_values len', len(past_key_values))
        # print('past_key_values[0][0]', past_key_values[0][0].shape)
        next_tokens = hidden.argmax(-1)
        # test_inputs['input_ids'] = torch.cat([test_inputs['input_ids'], next_tokens.unsqueeze(0)], dim=-1)
        # test_inputs['token_type_ids'] = torch.cat([test_inputs['token_type_ids'], torch.Tensor([[0]]).to(dev)], dim=-1)
        # test_inputs['attention_mask'] = torch.cat([test_inputs['attention_mask'], torch.Tensor([[1]]).to(dev)], dim=-1)
        test_inputs['input_ids'] = next_tokens.unsqueeze(0)
        test_inputs['token_type_ids'] = torch.Tensor([[0]]).to(dev)
        test_inputs['attention_mask'] = torch.Tensor([[1]]).to(dev)
        new_text = tokenizer_infer.batch_decode(test_inputs['input_ids'], skip_special_tokens=False)
        result += new_text[0]
        print(n, result)
        