Update README.md
Browse files
README.md
CHANGED
@@ -158,6 +158,8 @@ cd whisper.cpp
|
|
158 |
GGML_CUDA=1 make -j # assuming CUDA is available. see docs
|
159 |
ln -s server ~/.local/bin/whisper_cpp_server # (just put it somewhere in $PATH)
|
160 |
|
|
|
|
|
161 |
whisper_cpp_server -l en -m models/ggml-tiny.en.bin --port 7777
|
162 |
cd whisper_dictation
|
163 |
./whisper_cpp_client.py
|
|
|
158 |
GGML_CUDA=1 make -j # assuming CUDA is available. see docs
|
159 |
ln -s server ~/.local/bin/whisper_cpp_server # (just put it somewhere in $PATH)
|
160 |
|
161 |
+
# -ngl option assums AI accelerator like CUDA is available
|
162 |
+
llama-server --hf-repo hellork/law-chat-IQ4_NL-GGUF --hf-file law-chat-iq4_nl-imat.gguf -c 2048 -ngl 17 --port 8888
|
163 |
whisper_cpp_server -l en -m models/ggml-tiny.en.bin --port 7777
|
164 |
cd whisper_dictation
|
165 |
./whisper_cpp_client.py
|