#!/bin/bash MODEL_DIR="maykeye_tinyllama" METADATA_FILE="maykeye_tinyllama-metadata.json" ############################################################################### # Pull both model folder, llamafile (for the engine) and llama.cpp (for the conversion script) echo == Prep Enviroment == git submodule update --init ############################################################################### echo == Build and prep the llamafile engine execuable == pushd llamafile make -j8 make # This is where each executables is located for reference purpose for now as of 2024-04-05 # and was determined by running `sudo make install PREFIX=/usr/local` # ./o/llamafile/zipalign --> /usr/local/bin/zipalign # ./o/llama.cpp/main/main --> /usr/local/bin/llamafile # ./o/llama.cpp/imatrix/imatrix --> /usr/local/bin/llamafile-imatrix # ./o/llama.cpp/quantize/quantize --> /usr/local/bin/llamafile-quantize # ./build/llamafile-convert --> /usr/local/bin/llamafile-convert # ./o/llama.cpp/perplexity/perplexity --> /usr/local/bin/llamafile-perplexity # ./o/llama.cpp/llava/llava-quantize --> /usr/local/bin/llava-quantize popd ############################################################################### echo == What is our llamafile name going to be? == OUTFILE=$(./llama.cpp/convert-hf-to-gguf.py ${MODEL_DIR} --metadata ${METADATA_FILE} --outtype f16 --get-outfile) echo We will be aiming to generate $OUTFILE.llamafile ############################################################################### echo == Convert from safetensor to gguf == ./llama.cpp/convert-hf-to-gguf.py ${MODEL_DIR} --metadata ${METADATA_FILE} --outtype f16 --verbose mv ${MODEL_DIR}/${OUTFILE}.gguf ${OUTFILE}.gguf ############################################################################### echo == Generating Llamafile == cp ./llamafile/o/llama.cpp/main/main ${OUTFILE}.llamafile # Create an .args file with settings defaults cat >.args <