from peft import LoraConfig, get_peft_model

config = LoraConfig(
    r=8,  # 低秩矩阵维度
    lora_alpha=32,  # 缩放因子
    target_modules=["query", "value"],  # 目标微调模块
    lora_dropout=0.1  #  dropout比例
)
model = get_peft_model(base_model, config)
# 仅训练适配器参数，降低显存占用
optimizer = AdamW(model.parameters(), lr=3e-4)