edbeeching HF staff commited on
Commit
5f87faa
·
verified ·
1 Parent(s): da35bb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -32,7 +32,7 @@ def bot_streaming(message, history):
32
 
33
  if image is None:
34
  gr.Error("You need to upload an image for LLaVA to work.")
35
- prompt=f"USER: <image>\n{message['text']}\nASSISTANT:" #f"[INST] <image>\n{message['text']} [/INST]"
36
  image = Image.open(image).convert("RGB")
37
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
38
 
@@ -43,7 +43,7 @@ def bot_streaming(message, history):
43
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
44
  thread.start()
45
 
46
- text_prompt =f"USER: \n{message['text']}\nASSISTANT: " #f"[INST] \n{message['text']} [/INST]"
47
 
48
 
49
  buffer = ""
@@ -57,7 +57,8 @@ def bot_streaming(message, history):
57
 
58
 
59
  demo = gr.ChatInterface(fn=bot_streaming, title="VLM Playground", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
60
- {"text": "How to make this pastry?", "files":["./baklava.png"]}],
61
- description="Playground for internal VLMs. Change the model ID and revision under the environments of the Space settings.",
 
62
  stop_btn="Stop Generation", multimodal=True)
63
  demo.launch(debug=True)
 
32
 
33
  if image is None:
34
  gr.Error("You need to upload an image for LLaVA to work.")
35
+ prompt=f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\n{message['text']}\nASSISTANT:" #f"[INST] <image>\n{message['text']} [/INST]"
36
  image = Image.open(image).convert("RGB")
37
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
38
 
 
43
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
44
  thread.start()
45
 
46
+ text_prompt =f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \n{message['text']}\nASSISTANT: " #f"[INST] \n{message['text']} [/INST]"
47
 
48
 
49
  buffer = ""
 
57
 
58
 
59
  demo = gr.ChatInterface(fn=bot_streaming, title="VLM Playground", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
60
+ {"text": "How to make this pastry?", "files":["./baklava.png"]},
61
+ {"text": "What is this?", "files":["./pizza2.jpeg"]}],
62
+ description="VLM Playground", #for internal VLMs. Change the model ID and revision under the environments of the Space settings.
63
  stop_btn="Stop Generation", multimodal=True)
64
  demo.launch(debug=True)