not-lain commited on
Commit
11eab42
1 Parent(s): 1cea0e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -54
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import spaces
2
  import os
3
  import re
4
- import time
5
  import gradio as gr
6
  import torch
7
  from transformers import AutoModelForCausalLM
8
  from transformers import TextIteratorStreamer
9
  from threading import Thread
 
10
 
11
  model_name = 'AIDC-AI/Ovis1.6-Gemma2-9B'
12
 
@@ -21,26 +21,28 @@ streamer = TextIteratorStreamer(text_tokenizer, skip_prompt=True, skip_special_t
21
  image_placeholder = '<image>'
22
  cur_dir = os.path.dirname(os.path.abspath(__file__))
23
 
24
- def submit_chat(chatbot, text_input):
25
- response = ''
26
- chatbot.append((text_input, response))
27
- return chatbot ,''
28
 
29
  @spaces.GPU
30
- def ovis_chat(chatbot, image_input):
 
 
 
 
31
  # preprocess inputs
32
  conversations = []
33
  response = ""
34
- text_input = chatbot[-1][0]
35
- for query, response in chatbot[:-1]:
36
- conversations.append({
37
- "from": "human",
38
- "value": query
39
- })
40
- conversations.append({
41
- "from": "gpt",
42
- "value": response
43
- })
 
 
44
  text_input = text_input.replace(image_placeholder, '')
45
  conversations.append({
46
  "from": "human",
@@ -79,19 +81,8 @@ def ovis_chat(chatbot, image_input):
79
  thread.start()
80
  for new_text in streamer:
81
  response += new_text
82
- chatbot[-1][1] = response
83
- yield chatbot
84
  thread.join()
85
- # debug
86
- print('*'*60)
87
- print('*'*60)
88
- print('OVIS_CONV_START')
89
- for i, (request, answer) in enumerate(chatbot[:-1], 1):
90
- print(f'Q{i}:\n {request}')
91
- print(f'A{i}:\n {answer}')
92
- print('New_Q:\n', text_input)
93
- print('New_A:\n', response)
94
- print('OVIS_CONV_END')
95
 
96
  def clear_chat():
97
  return [], None, ""
@@ -138,29 +129,5 @@ latex_delimiters_set = [{
138
  "display": True
139
  }]
140
 
141
- text_input = gr.Textbox(label="prompt", placeholder="Enter your text here...", lines=1, container=False)
142
- with gr.Blocks(title=model_name.split('/')[-1], theme=gr.themes.Ocean()) as demo:
143
- gr.HTML(html)
144
- with gr.Row():
145
- with gr.Column(scale=3):
146
- image_input = gr.Image(label="image", height=350, type="pil")
147
- gr.Examples(
148
- examples=[
149
- [f"{cur_dir}/examples/case0.png", "Find the area of the shaded region."],
150
- [f"{cur_dir}/examples/case1.png", "explain this model to me."],
151
- [f"{cur_dir}/examples/case2.png", "What is net profit margin as a percentage of total revenue?"],
152
- ],
153
- inputs=[image_input, text_input]
154
- )
155
- with gr.Column(scale=7):
156
- chatbot = gr.Chatbot(label="Ovis", layout="panel", height=600, show_copy_button=True, latex_delimiters=latex_delimiters_set)
157
- text_input.render()
158
- with gr.Row():
159
- send_btn = gr.Button("Send", variant="primary")
160
- clear_btn = gr.Button("Clear", variant="secondary")
161
-
162
- send_click_event = send_btn.click(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot)
163
- submit_event = text_input.submit(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot)
164
- clear_btn.click(clear_chat, outputs=[chatbot, image_input, text_input])
165
-
166
- demo.launch()
 
1
  import spaces
2
  import os
3
  import re
 
4
  import gradio as gr
5
  import torch
6
  from transformers import AutoModelForCausalLM
7
  from transformers import TextIteratorStreamer
8
  from threading import Thread
9
+ from PIL import Image
10
 
11
  model_name = 'AIDC-AI/Ovis1.6-Gemma2-9B'
12
 
 
21
  image_placeholder = '<image>'
22
  cur_dir = os.path.dirname(os.path.abspath(__file__))
23
 
 
 
 
 
24
 
25
  @spaces.GPU
26
+ def ovis_chat(message, history):
27
+ try :
28
+ image_input = Image.open(message["files"][0]).convert("RGB")
29
+ except :
30
+ image_input = None
31
  # preprocess inputs
32
  conversations = []
33
  response = ""
34
+ text_input = message["text"]
35
+ for msg in enumerate(history):
36
+ if msg["role"] == "user" and "size" not in msg.keys():
37
+ conversations.append({
38
+ "from": "human",
39
+ "value": msg["text"]
40
+ })
41
+ elif msg["role"] == "assistant":
42
+ conversations.append({
43
+ "from": "gpt",
44
+ "value": response
45
+ })
46
  text_input = text_input.replace(image_placeholder, '')
47
  conversations.append({
48
  "from": "human",
 
81
  thread.start()
82
  for new_text in streamer:
83
  response += new_text
84
+ yield response
 
85
  thread.join()
 
 
 
 
 
 
 
 
 
 
86
 
87
  def clear_chat():
88
  return [], None, ""
 
129
  "display": True
130
  }]
131
 
132
+ demo = gr.ChatInterface(fn=ovis_chat, type="messages", textbox=gr.MultimodalTextbox(),multimodal=True)
133
+ demo.launch(debug=True)