.PHONY: create-env download-model convert-gguf load-ollama call-api

# 创建虚拟环境
create-env:
	conda create -n py310torch210 python=3.10
	conda activate py310torch210
	pip install -r requirements.txt

# modelscope下载模型
download-model:
	python main.py --model_id Qwen/Qwen2-0.5B-Instruct

# 转换modelscope模型到gguf
convert-gguf:
	python utils/convert_hf_to_gguf.py D:/ProjectWorkspace/PythonProjects2/mnnLlmPrjoects/models/Qwen/Qwen2-0.5B-Instruct --outtype f16 --verbose --outfile ggufModels/qwen2_0.5b_instruct_f16.gguf
	
# 使用ollama加载gguf模型
load-ollama:
	@echo "请参考 ollama加载gguf模型.md 文件进行操作"

# 通过模块调用ollama的api
call-api:
	python ./ollamaApiPython/callOllamaApi.py

# 显示帮助信息
help:
	@echo "可用的命令："
	@echo "  make create-env     - 创建并配置虚拟环境"
	@echo "  make download-model - 下载Qwen模型"
	@echo "  make convert-gguf   - 转换模型为gguf格式"
	@echo "  make load-ollama    - 加载gguf模型到ollama"
	@echo "  make call-api       - 调用ollama API" 