Mat17892's picture
llamacpp
b664585 verified
# CMAKE generated file: DO NOT EDIT!
# Generated by "Unix Makefiles" Generator, CMake Version 3.22
# compile C with /usr/bin/cc
# compile CXX with /usr/bin/c++
C_DEFINES = -DGGML_BACKEND_SHARED -DGGML_SHARED -DGGML_USE_CPU
C_INCLUDES = -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/examples" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/examples/gguf-hash/deps" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/ggml/src/../include"
C_FLAGS = -O3 -DNDEBUG -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wdouble-promotion
CXX_DEFINES = -DGGML_BACKEND_SHARED -DGGML_SHARED -DGGML_USE_CPU
CXX_INCLUDES = -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/examples" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/examples/gguf-hash/deps" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/ggml/src/../include"
CXX_FLAGS = -O3 -DNDEBUG -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-array-bounds -Wextra-semi