fcole commited on
Commit
5e27961
β€’
1 Parent(s): 8d9f2b2

First commit

Browse files
Files changed (2) hide show
  1. app.py +62 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import argparse
3
+ import torch
4
+ import re
5
+ import gradio as gr
6
+ from threading import Thread
7
+ from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
8
+
9
+ parser = argparse.ArgumentParser()
10
+
11
+ if torch.cuda.is_available():
12
+ device, dtype = "cuda", torch.float16
13
+ else:
14
+ device, dtype = "cpu", torch.float32
15
+
16
+ model_id = "vikhyatk/moondream2"
17
+ tokenizer = AutoTokenizer.from_pretrained(model_id, revision="2024-03-06")
18
+ moondream = AutoModelForCausalLM.from_pretrained(
19
+ model_id, trust_remote_code=True, revision="2024-03-06"
20
+ ).to(device=device, dtype=dtype)
21
+ moondream.eval()
22
+
23
+
24
+ @spaces.GPU(duration=10)
25
+ def answer_question(img, prompt):
26
+ image_embeds = moondream.encode_image(img)
27
+ streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
28
+ thread = Thread(
29
+ target=moondream.answer_question,
30
+ kwargs={
31
+ "image_embeds": image_embeds,
32
+ "question": prompt,
33
+ "tokenizer": tokenizer,
34
+ "streamer": streamer,
35
+ },
36
+ )
37
+ thread.start()
38
+
39
+ buffer = ""
40
+ for new_text in streamer:
41
+ clean_text = re.sub("<$|<END$", "", new_text)
42
+ buffer += clean_text
43
+ yield buffer
44
+
45
+
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown(
48
+ """
49
+ # πŸŒ” moondream2
50
+ A tiny vision language model. [GitHub](https://github.com/vikhyat/moondream)
51
+ """
52
+ )
53
+ with gr.Row():
54
+ prompt = gr.Textbox(label="Input", placeholder="Type here...", scale=4)
55
+ submit = gr.Button("Submit")
56
+ with gr.Row():
57
+ img = gr.Image(type="pil", label="Upload an Image")
58
+ output = gr.TextArea(label="Response")
59
+ submit.click(answer_question, [img, prompt], output)
60
+ prompt.submit(answer_question, [img, prompt], output)
61
+
62
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ timm==0.9.12
2
+ transformers==4.36.2
3
+ einops==0.7.0