from openmind import run_train

run_train(
    model_id="Qwen2-0.5B",
    stage="sft",
    do_train=True,
    finetuning_type="lora",
    dataset="alpaca_zh_51k, alpaca",
    output_dir="saves/qwen2_0.5b_lora",
    logging_steps=1,
    save_steps=20000,
    overwrite_output_dir=True,
    per_device_train_batch_size=2,
    gradient_accumulation_steps=1,
    learning_rate=1.0e-5,
    bf16=True,
    max_steps=10,
    seed=1234,
)