# 原始模型
modelPath=/app/models/DeepSeek-R1-Distill-Llama-8B
# 上一步微调得到的 LoRA 权重
adapterModelPath=./fine_tuned_kr_model/

llamafactory-cli export \
  --model_name_or_path /app/models/DeepSeek-R1-Distill-Llama-8B \
  --adapter_name_or_path ./fine_tuned_kr_model/ \
  --template llama3 \
  --export_size 8 \
  --export_dir ./saves/lora/export/ 

## use llama.cpp to generate a gguf (optional)
python /app/code/llama.cpp/convert_hf_to_gguf.py "/app/code/krtune/saves/lora/export" --outtype f16 --outfile "/app/code/krtune/saves/lora/krtllm-model-f16.gguf"

##  imort to ollama using Modelfile generate by llamafactory cli
cd /app/code/krtune/saves/lora/export/
sudo ollama create krtllm -f Modelfile