export HF_ENDPOINT=https://hf-mirror.com
huggingface-cli download Qwen/Qwen2-7B-Instruct --local-dir .model/Qwen2-7B-Instruct --repo-type model

#启动llamafactory
cd
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
cd LLaMA-Factory
conda create --name lf python=3.10
conda activate lf
pip install -e ".[torch,metrics]"

# 添加到 data_info
#  "my_dataset": {
#    "file_name": "4o_v2.json",
#    "formatting": "sharegpt",
#    "columns": {
#      "messages": "conversations"
#    }
#  },

export DISABLE_VERSION_CHECK=1
pip install vllm
pip install swanlab
llamafactory-cli webui

# 微调参数

# 部署, 这里是4卡4090
vllm serve /root/autodl-tmp/model/2025-03-17-20-45-05 --served-model-name "qwen"  --tensor-parallel-size 4  --max-log-len 10

