iandennismiller
commited on
Commit
•
2292bb2
1
Parent(s):
5796f82
llava image describe
Browse files- bin/llava-describe.sh +34 -0
- install.sh +1 -0
bin/llava-describe.sh
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
SIZE=7b
|
4 |
+
|
5 |
+
LLAVA_PROMPT=$(
|
6 |
+
cat <<EOF
|
7 |
+
Generate a JSON description of the image by adapting the following JSON template:
|
8 |
+
|
9 |
+
{
|
10 |
+
extended_description: ""
|
11 |
+
number_of_people: ,
|
12 |
+
objects_in_picture_list: [
|
13 |
+
""
|
14 |
+
]
|
15 |
+
}
|
16 |
+
EOF
|
17 |
+
)
|
18 |
+
|
19 |
+
if [ "$SIZE" == "7b" ]; then
|
20 |
+
GGML_MODEL=~/.ai/models/llava/mys/ggml_llava-v1.5-7b/ggml-model-f16.gguf
|
21 |
+
MMPROJ_MODEL=~/.ai/models/llava/mys/ggml_llava-v1.5-7b/mmproj-model-f16.gguf
|
22 |
+
else
|
23 |
+
GGML_MODEL=~/.ai/models/llava/mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf
|
24 |
+
MMPROJ_MODEL=~/.ai/models/llava/mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf
|
25 |
+
fi
|
26 |
+
|
27 |
+
llava \
|
28 |
+
--prompt-cache "$HOME/.ai/cache/llava.cache" \
|
29 |
+
--model "$GGML_MODEL" \
|
30 |
+
--mmproj "$MMPROJ_MODEL" \
|
31 |
+
--image "$1" \
|
32 |
+
--prompt "$LLAVA_PROMPT" \
|
33 |
+
--n-gpu-layers 1 \
|
34 |
+
--log-disable 2>/dev/null | grep -v clip_model_load
|
install.sh
CHANGED
@@ -10,5 +10,6 @@ install -C -v ./bin/llama-finetune.sh ~/.local/bin
|
|
10 |
install -C -v ./bin/llama.sh ~/.local/bin
|
11 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
12 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
|
|
13 |
|
14 |
echo "done"
|
|
|
10 |
install -C -v ./bin/llama.sh ~/.local/bin
|
11 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
12 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
13 |
+
install -C -v ./bin/llava-describe.sh ~/.local/bin
|
14 |
|
15 |
echo "done"
|