Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- .gitattributes +35 -35
- README.md +16 -12
- app.py +65 -0
- requirements.txt +5 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo:
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.42.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: moondream-webui
|
3 |
+
emoji: πππ
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.42.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
short_description: demo of batch processing with moondream
|
11 |
+
license: apache-2.0
|
12 |
+
preload_from_hub:
|
13 |
+
- vikhyatk/moondream2
|
14 |
+
---
|
15 |
+
|
16 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import torch
|
3 |
+
import re
|
4 |
+
import gradio as gr
|
5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
if torch.cuda.is_available():
|
9 |
+
device, dtype = "cuda", torch.float16
|
10 |
+
else:
|
11 |
+
device, dtype = "cpu", torch.float32
|
12 |
+
|
13 |
+
model_id = "vikhyatk/moondream2"
|
14 |
+
revision = "2024-08-26"
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
|
16 |
+
moondream = AutoModelForCausalLM.from_pretrained(
|
17 |
+
model_id, trust_remote_code=True, revision=revision, torch_dtype=dtype
|
18 |
+
).to(device=device)
|
19 |
+
moondream.eval()
|
20 |
+
|
21 |
+
@spaces.GPU
|
22 |
+
def answer_questions(image_tuples, prompt_text):
|
23 |
+
result = ""
|
24 |
+
Q_and_A = ""
|
25 |
+
prompts = [p.strip() for p in prompt_text.split(',')]
|
26 |
+
image_embeds = [img[0] for img in image_tuples if img[0] is not None]
|
27 |
+
|
28 |
+
#print(f"\nprompts: {prompts}\n\n")
|
29 |
+
answers = []
|
30 |
+
for prompt in prompts:
|
31 |
+
image_answers = moondream.batch_answer(
|
32 |
+
images=[img.convert("RGB") for img in image_embeds],
|
33 |
+
prompts=[prompt] * len(image_embeds),
|
34 |
+
tokenizer=tokenizer,
|
35 |
+
)
|
36 |
+
answers.append(image_answers)
|
37 |
+
|
38 |
+
for i, prompt in enumerate(prompts):
|
39 |
+
Q_and_A += f"### Q: {prompt}\n"
|
40 |
+
for j, image_tuple in enumerate(image_tuples):
|
41 |
+
image_name = f"image{j+1}"
|
42 |
+
answer_text = answers[i][j]
|
43 |
+
Q_and_A += f"**{image_name} A:** \n {answer_text} \n\n"
|
44 |
+
|
45 |
+
result = {'headers': prompts, 'data': answers}
|
46 |
+
#print(f"result\n{result}\n\nQ_and_A\n{Q_and_A}\n\n")
|
47 |
+
return Q_and_A, result
|
48 |
+
|
49 |
+
with gr.Blocks() as demo:
|
50 |
+
gr.Markdown("# MoonDream WebUI")
|
51 |
+
gr.Markdown("## π Modify by https://huggingface.co/spaces/Csplk/moondream2-batch-processing")
|
52 |
+
gr.Markdown("## π moondream2\nA tiny vision language model. [GitHub](https://github.com/vikhyatk/moondream)")
|
53 |
+
with gr.Row():
|
54 |
+
img = gr.Gallery(label="Upload Images", type="pil", preview=True, columns=4)
|
55 |
+
with gr.Row():
|
56 |
+
prompt = gr.Textbox(label="Input Prompts", placeholder="Enter prompts (one prompt for each image provided) separated by commas. Ex: Describe this image, What is in this image?", lines=8)
|
57 |
+
with gr.Row():
|
58 |
+
submit = gr.Button("Submit")
|
59 |
+
with gr.Row():
|
60 |
+
output = gr.Markdown(label="Questions and Answers", line_breaks=True)
|
61 |
+
with gr.Row():
|
62 |
+
output2 = gr.Dataframe(label="Structured Dataframe", type="array", wrap=True)
|
63 |
+
submit.click(answer_questions, [img, prompt], [output, output2])
|
64 |
+
|
65 |
+
demo.queue().launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
timm
|
2 |
+
transformers
|
3 |
+
einops
|
4 |
+
accelerate
|
5 |
+
torch
|