
import torch,datetime
from qlora_utils import lora_utils, dataset, collator
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training,set_peft_model_state_dict
from torch.utils.data import DataLoader
from modelscope import AutoModel, AutoTokenizer, BitsAndBytesConfig
from tqdm import tqdm

load_in_4bit = True
max_new_tokens = 256

checkpoint_path="C:\\Users\\16014\\.cache\\modelscope\\hub\\models\\ZhipuAI\\chatglm3-6b"
base_model = model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True).half().cuda()

tokenizer = AutoTokenizer.from_pretrained(checkpoint_path, trust_remote_code=True)

#input_text = '类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领'
while 1:

    input_text = input("请输入您的文案提示：")
    print(f'输入：\n{input_text}')
    print("---------------------------------------------------------------------------------")
    response, history = base_model.chat(tokenizer=tokenizer, query=input_text)
    print(f'微调前：\n{response}')
    print("************************")
    from peft import PeftModel
    model = PeftModel.from_pretrained(base_model, "./qlora/qlora_saver")
    response, history = model.chat(tokenizer=tokenizer, query=input_text)
    print(f'第一种微调后(LoRA): \n{response}')
    print("************************")

    target_modules = ["query_key_value"]#lora_utils.find_all_linear_names(model)
    # 初始化lora配置
    config = LoraConfig(
        r=16,  # qlora矩阵的秩。一般设置为8、16、32、64等，在qlora论文中作者设为64。越大则参与训练的参数量越大，一般来说效果会更好，但需要更多显存，
        lora_alpha=16,  # qlora中的缩放参数。一般设为16、32即可
        target_modules=target_modules,  #
        lora_dropout=0.1,  # lora权重的dropout rate
        bias="none",
        task_type="CAUSAL_LM",
    )

    model = get_peft_model(base_model, config)
    model = model.to("cuda")

    # 这个就是载入的参数
    adapters_weights = torch.load("./qlora/qlora_saver/adapter_model.bin")
    set_peft_model_state_dict(model, adapters_weights)
    response, history = model.chat(tokenizer=tokenizer, query=input_text)
    print(f'第二种微调后(QLoRA): \n{response}')


