@echo off
setlocal
pushd %~dp0

set JEXTRACT_PATH=C:\jextract-25
set LLAMA_PATH=..\..\llama.cpp
set CRT_PATH=C:\Program Files (x86)\Windows Kits\10\Include\10.0.19041.0\ucrt
set MSVC_PATH=C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.44.35207\include

call %JEXTRACT_PATH%\bin\jextract.bat ^
-I "%LLAMA_PATH%\include" ^
-I "%LLAMA_PATH%\ggml\include" ^
-I "%CRT_PATH%" ^
-I "%MSVC_PATH%" ^
-l llama ^
-l ggml ^
-t llama ^
--output src ^
--include-struct llama_model_params ^
--include-struct llama_context_params ^
--include-struct llama_batch ^
--include-function ggml_backend_load_all ^
--include-function llama_model_default_params ^
--include-function llama_model_load_from_file ^
--include-function llama_context_default_params ^
--include-function llama_init_from_model ^
--include-function llama_get_memory ^
--include-function llama_model_chat_template ^
--include-function llama_model_get_vocab ^
--include-function llama_vocab_n_tokens ^
--include-function llama_tokenize ^
--include-function llama_memory_seq_rm ^
--include-function llama_memory_seq_add ^
--include-function llama_decode ^
--include-function llama_get_logits_ith ^
--include-function llama_vocab_is_eog ^
--include-function llama_token_to_piece ^
--include-function llama_free ^
--include-function llama_model_free ^
llama.h

pause
