from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
import torch
import torch_npu
from train import LLM, Config
from torch_npu.contrib import transfer_to_npu
torch.npu.set_compile_mode(jit_compile=False)

from model_lora import replace_layers_with_lora,count_lora_parameters


str = "运动员经常使用哪些方法来提高他们的体力和耐力？"
t = AutoTokenizer.from_pretrained('/media/nvme1n1_dist/tokenizer_demo/model/')
# t = AutoTokenizer.from_pretrained('./tokenizer')
AutoConfig.register("small_model", Config)
AutoModelForCausalLM.register(Config, LLM)
model = AutoModelForCausalLM.from_pretrained('/media/nvme1n1_dist/temp/llm_related-main/train_llm_from_scratch/saves/sft1')



# model.load_state_dict(torch.load("lora_adapter.pth"), strict=False)
def load_lora_weights(model, load_path):
    lora_weights = torch.load(load_path)
    model_state_dict = model.state_dict()
    # 仅更新 LoRA 参数
    for name, param in lora_weights.items():
        if name in model_state_dict:
            model_state_dict[name].copy_(param)

    model.load_state_dict(model_state_dict, strict=False)

replace_layers_with_lora(model,rank=2, alpha=4)
# 加载时调用
load_lora_weights(model, "lora_adapter.pth")
lora_params = count_lora_parameters(model,target_keywords=["q_proj", "v_proj"])
print(f"LoRA 参数量: {lora_params}")
model.npu().eval()

# input_data = [t.bos_token_id] + t.encode(str)
input_data = t.apply_chat_template([{'role':'user', 'content':str}])
print(input_data)

for token in model.generate({"input_ids":torch.tensor(input_data).unsqueeze(0).npu(), "labels":None}, t.eos_token_id, 512, stream=False,temperature=0.8, top_k=3,repetition_penalty=1.2):
    print('DPO:  ', t.decode(token[0]))
 
 
from train import LLM, Config  # 你自定义的结构

# 注册模型结构
AutoConfig.register("small_model", Config)
AutoModelForCausalLM.register(Config, LLM)

sft_model = AutoModelForCausalLM.from_pretrained('/media/nvme1n1_dist/temp/llm_related-main/train_llm_from_scratch/saves/dpo',local_files_only=True,
    trust_remote_code=True )
sft_model = sft_model.npu()
input_data = t.apply_chat_template([{'role':'user', 'content':str}])
print(input_data)

for token in sft_model.generate({"input_ids":torch.tensor(input_data).unsqueeze(0).npu(), "labels":None}, t.eos_token_id, 512, stream=False,temperature=0.8, top_k=3,repetition_penalty=1.2):
    print('sft:  ',t.decode(token[0]))
