from transformers import AutoModelForCausalLM,AutoTokenizer,TrainingArguments,Trainer
from peft import get_peft_model, LoraConfig, TaskType

#加载模型
model_path = "deepseek/deepseek-11m-7b"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")

#配置LoRA
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1
)
model = get_peft_model(model,config)
model.print_trainable_parameters()

#训练数据
train_data =[{"input":"你好，你叫什么名字？","output":"我是Deepseek模型。"},
           {"input":"你好，你叫什么名字？","output":"我是Deepseek模型。"},
    ]


#数据格式化
train_encodings= tokenizer([d["input"] for d in train_data], padding=True, truncation=True, return_tensors="pt")
labels = tokenizer([d["output"] for d in train_data], padding=True, truncation=True, return_tensors="pt")["input_ids"]

training_args = TrainingArguments(
    output_dir="./results",
    per_device_train_batch_size=2,
    num_train_epochs=1,
    save_strategy="epoch"
)

trainer =Trainer(
    model = model,
    args = training_args,
    train_dataset = train_encodings
)
trainer.train()
model.save_pretrained("fine_tuned_model")
tokenizer.save_pretrained("fine_tuned_modei")
