from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig, TaskType, get_peft_model
from peft import PeftModel
import torch
import os

data_dir = '/data/datasets/customs/wiki_data.json'
pretrain_model_dir = "/data/models/modelscope/modelscope/Llama-2-7b-ms"

save_dir = '/data/logs/Llama-2-7b-ms_lora_tuning_8bit_wiki_data'
lora_dir = os.path.join(save_dir, "checkpoint-9058")

tokenizer = AutoTokenizer.from_pretrained(pretrain_model_dir)
tokenizer.padding_side = "right"  # 一定要设置padding_side为right，否则batch大于1时可能不收敛
tokenizer.pad_token_id = 2

# 多卡情况，可以去掉device_map="auto"，否则会将模型拆开
model = AutoModelForCausalLM.from_pretrained(pretrain_model_dir, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16,
                                             device_map="auto", load_in_8bit=True)
peft_model = PeftModel.from_pretrained(model=model, model_id=lora_dir)
peft_model.enable_input_require_grads()
peft_model = peft_model.half()  # 当整个模型都是半精度时，需要将adam_epsilon调大
# merge_model = peft_model.merge_and_unload()
#
# merge_model.save_pretrained(os.path.join(save_dir, "lora_model_for_custom"))

peft_model = peft_model.cuda()
peft_model.eval()
ipt = tokenizer("Human: {}\n{}".format("部署时的常见错误", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(
    peft_model.device)
print(tokenizer.decode(peft_model.generate(**ipt, max_length=256, do_sample=False, temperature=0.7, top_p=0.6)[0],
                       skip_special_tokens=True))

# ipt = tokenizer("Human: {}\n{}".format("部署时的常见问题", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(
#     peft_model.device)
# print(tokenizer.decode(peft_model.generate(**ipt, max_length=256, do_sample=False)[0], skip_special_tokens=True))

# config = LoraConfig(task_type=TaskType.CAUSAL_LM, )
# model = get_peft_model(model, config)

# peft_model.print_trainable_parameters()
# merge_model = peft_model.merge_and_unload()
# merge_model.save_pretrained(os.path.join(save_dir,"lora_model_for_custom"))
#
# model.eval()
# ipt = tokenizer("Human: {}\n{}".format("你好", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(model.device)
# tokenizer.decode(model.generate(**ipt, max_length=512, do_sample=True, eos_token_id=tokenizer.eos_token_id)[0], skip_special_tokens=True)
