Daemontatox commited on
Commit
c5ee08d
1 Parent(s): 11ec7bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -43
app.py CHANGED
@@ -7,21 +7,23 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
 
10
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
14
 
 
 
 
 
15
 
 
16
  @spaces.GPU
17
- def bot_streaming(message, history, max_new_tokens=250):
18
-
19
  txt = message["text"]
20
- ext_buffer = f"{txt}"
21
-
22
- messages= []
23
  images = []
24
-
25
 
26
  for i, msg in enumerate(history):
27
  if isinstance(msg[0], tuple):
@@ -29,35 +31,30 @@ def bot_streaming(message, history, max_new_tokens=250):
29
  messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
30
  images.append(Image.open(msg[0][0]).convert("RGB"))
31
  elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
32
- # messages are already handled
33
  pass
34
- elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
35
  messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
36
  messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
37
 
38
- # add current message
39
  if len(message["files"]) == 1:
40
-
41
- if isinstance(message["files"][0], str): # examples
42
  image = Image.open(message["files"][0]).convert("RGB")
43
- else: # regular input
44
  image = Image.open(message["files"][0]["path"]).convert("RGB")
45
  images.append(image)
46
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
47
  else:
48
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
49
 
50
-
51
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)
52
 
53
  if images == []:
54
  inputs = processor(text=texts, return_tensors="pt").to("cuda")
55
  else:
56
  inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
 
57
  streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
58
-
59
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
60
- generated_text = ""
61
 
62
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
63
  thread.start()
@@ -65,36 +62,34 @@ def bot_streaming(message, history, max_new_tokens=250):
65
 
66
  for new_text in streamer:
67
  buffer += new_text
68
- generated_text_without_prompt = buffer
69
  time.sleep(0.01)
70
  yield buffer
71
 
72
-
73
- demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
74
- [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
75
- 200],
76
- [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
77
- 250],
78
- [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
79
- 250],
80
- [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
81
- 250],
82
- [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
83
- 250],
84
  ],
85
- textbox=gr.MultimodalTextbox(),
86
- additional_inputs = [gr.Slider(
87
- minimum=10,
88
- maximum=500,
89
- value=250,
90
- step=10,
91
- label="Maximum number of new tokens to generate",
92
- )
93
- ],
94
- cache_examples=False,
95
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
- stop_btn="Stop Generation",
97
- fill_height=True,
98
- multimodal=True)
99
-
 
 
100
  demo.launch(debug=True)
 
7
  from gradio import FileData
8
  import time
9
  import spaces
10
+
11
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
12
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
13
  torch_dtype=torch.bfloat16).to("cuda")
14
  processor = AutoProcessor.from_pretrained(ckpt)
15
 
16
+ SYSTEM_PROMPT = """You are a Vision Language Model specialized in interpreting and extracting data from visual documents, including timesheets, invoices, charts, and other structured or semi-structured documents.
17
+
18
+ Your task is to analyze the provided visual data and respond to queries with concise answers, such as single words, numbers, or short phrases.
19
+ These documents may include tables, labels, handwritten or printed text, and graphical elements.
20
 
21
+ Focus on delivering accurate, succinct answers based on the visual and contextual information provided. Avoid additional explanation unless absolutely necessary."""
22
  @spaces.GPU
23
+ def bot_streaming(message, history, max_new_tokens=4048):
 
24
  txt = message["text"]
25
+ messages = [{"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]}]
 
 
26
  images = []
 
27
 
28
  for i, msg in enumerate(history):
29
  if isinstance(msg[0], tuple):
 
31
  messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
32
  images.append(Image.open(msg[0][0]).convert("RGB"))
33
  elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
 
34
  pass
35
+ elif isinstance(history[i-1][0], str) and isinstance(msg[0], str):
36
  messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
37
  messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
38
 
 
39
  if len(message["files"]) == 1:
40
+ if isinstance(message["files"][0], str):
 
41
  image = Image.open(message["files"][0]).convert("RGB")
42
+ else:
43
  image = Image.open(message["files"][0]["path"]).convert("RGB")
44
  images.append(image)
45
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
46
  else:
47
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
48
 
 
49
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)
50
 
51
  if images == []:
52
  inputs = processor(text=texts, return_tensors="pt").to("cuda")
53
  else:
54
  inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
55
+
56
  streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
 
57
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
 
58
 
59
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
60
  thread.start()
 
62
 
63
  for new_text in streamer:
64
  buffer += new_text
 
65
  time.sleep(0.01)
66
  yield buffer
67
 
68
+ demo = gr.ChatInterface(
69
+ fn=bot_streaming,
70
+ title="Multimodal Llama",
71
+ examples=[
72
+ [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]}, 200],
73
+ [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]}, 250],
74
+ [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]}, 250],
75
+ [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]}, 250],
76
+ [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]}, 250],
 
 
 
77
  ],
78
+ textbox=gr.MultimodalTextbox(),
79
+ additional_inputs=[
80
+ gr.Slider(
81
+ minimum=10,
82
+ maximum=500,
83
+ value=4048,
84
+ step=10,
85
+ label="Maximum number of new tokens to generate"
86
+ )
87
+ ],
88
+ cache_examples=False,
89
+ description="Try Multimodal Llama by transformers. Upload an image and start chatting about it, or try one of the examples below.",
90
+ stop_btn="Stop Generation",
91
+ fill_height=True,
92
+ multimodal=True
93
+ )
94
+
95
  demo.launch(debug=True)