
# 步骤一： 加载模型与分词器
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# 设置huggingface镜像源
# import os
# os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 本地模型路径
# model_path = "D:/modelscope/Qwen/Qwen2___5-0___5B-Instruct"  # 替换为你的实际路径
model_path = r"D:/modelscope/deepseek-llm-7b-chat"

# import bitsandbytes

# tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-7b-chat", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    # "deepseek-ai/deepseek-llm-7b-chat",
    model_path,
    load_in_4bit=True,  # 4bits 量化
    bnb_4bit_quant_type="nf4",
    device_map="auto",
    torch_dtype=torch.half
)


# 步骤二：数据需格式化为模型输入模板
def process_func(example):
    instruction = f"User: {example['instruction']}{example['input']}\n\n"
    response = f"Assistant: {example['output']}"
    # 拼接并编码为 input_ids 和 labels
    return tokenizer(instruction + response, truncation=True, max_length=384)
data_json = {
  "instruction": "现在你要扮演皇帝身边的女人--甄嬛",
  "input": "你是谁？",
  "output": "家父是大理寺少卿甄远道。"
}
tokenized_dataset = process_func(data_json)

# 步骤 3：配置 LORA 参数
from peft import LoraConfig
config = LoraConfig(
    task_type="CAUSAL_LM",
    target_modules=["q_proj", "v_proj"],  # 选择注意力层
    r=8,              # 秩
    lora_alpha=32,    # 缩放因子
    lora_dropout=0.1,
)
model.add_adapter(config)  # 添加适配层


# 步骤 4：设置训练参数

from transformers import TrainingArguments
args = TrainingArguments(
    output_dir="./output",
    per_device_train_batch_size=4,
    gradient_accumulation_steps=2,
    num_train_epochs=3,
    learning_rate=1e-4,
    optim="paged_adamw_32bit",  # 优化 4bits 训练
    logging_steps=10,
    save_steps=100

)


# 步骤 5：启动训练
from transformers import Trainer
from transformers import DataCollatorForSeq2Seq
trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer, padding=True)
)
trainer.train()


# 步骤 6：模型推理
text = "小姐，别的秀女都在求中选，唯有咱们小姐想被撂牌子，菩萨一定记得真真儿的——"
inputs = tokenizer(f"User: {text}\n\n", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
























def main():
    print("Hello from lora-huggingface!")


if __name__ == "__main__":
    main()
