include .env
.PHONY : hello
hello:
	@echo '----------- [ hello, this is the hello of makefile ] ------------'

include mk_files/ollama.mk
include libs/example/mk_files/hello.mk
include libs/example/mk_files/tale.mk
include libs/example/mk_files/mem.mk
include libs/example/mk_files/rag.mk
include libs/example/mk_files/fc.mk
include libs/example/mk_files/reflect.mk
include libs/example/mk_files/test.mk
include libs/example/mk_files/qianfan.mk
include libs/example/mk_files/zhipu.mk
include libs/example/mk_files/lcht.mk
include libs/example/mk_files/lght.mk
include libs/example/mk_files/lc_llm_chain.mk
include libs/example/mk_files/write.mk

RUN:
	@echo '--start--'
	make -C libs/example

port:
	@echo LLM_PORT: $(LLM_PORT)

install:
	make -C libs/example install

ck_update:
	make -C libs/example ck_update
# test

test:
	make -C libs/example tst

chatbot_lg_8080:
	env LLM_PORT=8080 make -C libs/example helo_lg

llama_cpp_py_test:
	make -C libs/example llama_cpp_py_test

# openai api
ser_init: llama-cpp-python dl_qwen2_0_5

bge:
	python -m llama_cpp.server --model data/bge-large-zh-v1.5-f16-gguf --n_ctx 20480

ser:
	python -m llama_cpp.server --model data/qwen2-0_5b-instruct-q5_k_m.gguf --n_ctx 20480

tser: llama_cpp_py_test
	@echo llama_cpp_python is ok


# llama.cpp

llm: qwen2_0_5


tllm: test_openai_api_completion


llama_qwen2_0_5: dl_data qwen2_0_5

dl_data: dl_qwen2_0_5 dl_llama_cpp

dl_qwen2_0_5:
	mkdir -p data
	modelscope download --model=qwen/Qwen2-0.5B-Instruct-GGUF --local_dir data qwen2-0_5b-instruct-q5_k_m.gguf

dl_bge:
	modelscope download --model=milkey/bge-large-zh-v1.5-f16-gguf --local_dir data bge-large-zh-v1.5-f16-gguf
	#wget https://modelscope.cn/models/milkey/bge-large-zh-v1.5-f16-gguf/resolve/master/bge-large-zh-v1.5-f16.gguf

dl_llama_cpp:
	@echo 'cl_llama_cpp'
	wget https://gitee.com/tim_ai/llm_tools/releases/download/v1/llama.cpp.zip
	mkdir -p data
	mv llama.cpp.zip data/
	tar xvf data/llama.cpp.zip -C data/

qwen2_0_5:
	data/llama.cpp/llama-server -m data/qwen2-0_5b-instruct-q5_k_m.gguf -ngl 24 -fa

test_openai_api_completion :
	curl --request POST \
    --url http://localhost:8080/completion \
    --header "Content-Type: application/json" \
    --data '{"prompt": "你好","n_predict": 128}'

test_ollama_api_chat:
	curl http://localhost:11434/api/chat -d '{"model": "qwen2:0.5b", "messages": [{ "role": "user", "content": "why is the sky blue?" }]}'

test_ollama_api_completions:
	curl http://localhost:11434/v1/chat/completions \
    -H "Content-Type: application/json" \
    -d '{"model": "qwen2:0.5b", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello!"}]}'


# env init

init: bpytop poetry zsh se base
	@echo '--init--'

zsh:
	apt update
	apt install zsh -y
	curl -fsSL https://gitee.com/pocmon/ohmyzsh/raw/master/tools/install.sh | sh

bpytop:
	pip install bpytop

llama-cpp-python:
	pip install llama-cpp-python starlette_context pydantic_settings

poetry:
	pip install poetry

init_svim: spacevim svc

spacevim:
	curl -sLf https://spacevim.org/cn/install.sh | bash -s -- -i

svc:
	mkdir -p ~/.SpaceVim.d/autoload
	cp conf/myspacevim.vim ~/.SpaceVim.d/autoload/myspacevim.vim
	cp conf/init.toml ~/.SpaceVim.d/

emacs:
	add-apt-repository ppa:ubuntuhandbook1/emacs
	apt install emacs emacs-common -y

se: se-bin
	rm -rf ~/.emacs.d
	git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d
	cp conf/.spacemacs ~/.spacemacs

se-bin:
	echo "emacs" > /usr/local/bin/se
	chmod +x /usr/local/bin/se

lynx:
	apt install -y lynx

base:
	apt install -y global universal-ctags exuberant-ctags
