comidan commited on
Commit
cf32435
1 Parent(s): f7c8e95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -27
app.py CHANGED
@@ -75,6 +75,28 @@ print('Initialization Finished')
75
  # Gradio Setting
76
  # ========================================
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  def gradio_reset(chat_state, img_list):
79
  if chat_state is not None:
80
  chat_state.messages = []
@@ -104,8 +126,16 @@ def upload_imgorvideo(gr_video, gr_img, text_input,chatbot,audio_flag):
104
  )
105
  img_list = []
106
  llm_message = chat.upload_video(gr_video, chat_state, img_list)
107
- print(chat_state.messages)
108
- return text_input,chat_state, chatbot
 
 
 
 
 
 
 
 
109
  elif gr_img is not None:
110
  print(gr_img)
111
  chatbot = [((gr_img,), None)]
@@ -125,28 +155,6 @@ def upload_imgorvideo(gr_video, gr_img, text_input,chatbot,audio_flag):
125
  # img_list = []
126
  return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
127
 
128
- def gradio_ask(user_message, chatbot, chat_state):
129
- print("building prompt...")
130
- if len(user_message) == 0:
131
- return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
132
- chat.ask(user_message, chat_state)
133
- chatbot = chatbot + [[user_message, None]]
134
- return '', chatbot, chat_state
135
-
136
-
137
- def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
138
- print("generating answer...")
139
- llm_message = chat.answer(conv=chat_state,
140
- img_list=img_list,
141
- num_beams=1,
142
- temperature=temperature,
143
- max_new_tokens=240,
144
- max_length=511)[0]
145
- chatbot[-1][1] = llm_message
146
- print(chat_state.get_prompt())
147
- print(chat_state)
148
- return chatbot, chat_state, img_list
149
-
150
 
151
  title = """
152
  <h1 align="center"><a href="https://github.com/DAMO-NLP-SG/Video-LLaMA"><img src="https://s1.ax1x.com/2023/05/22/p9oQ0FP.jpg", alt="Video-LLaMA" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>
@@ -219,14 +227,13 @@ with gr.Blocks() as demo:
219
  img_list = gr.State()
220
  chatbot = gr.Chatbot(label='Video-LLaMA')
221
  text_input = gr.Textbox(label='User', placeholder='Upload your image/video first, or directly click the examples at the bottom of the page.', interactive=False)
 
222
 
223
 
224
  gr.Markdown(cite_markdown)
225
  #upload_button.click(upload_imgorvideo, inputs=[video, image, text_input], outputs=[chat_state,chatbot])
226
 
227
- text_input.submit(upload_imgorvideo, inputs=[video, image, text_input], outputs=[text_input,chatbot, chat_state]).then(
228
- gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
229
- )
230
  #clear.click(gradio_reset, [chat_state, img_list], [chatbot, video, image, text_input, upload_button, chat_state, img_list], queue=False)
231
 
232
  demo.queue().launch(debug=True)
 
75
  # Gradio Setting
76
  # ========================================
77
 
78
+ def gradio_ask(user_message, chatbot, chat_state):
79
+ print("building prompt...")
80
+ if len(user_message) == 0:
81
+ return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
82
+ chat.ask(user_message, chat_state)
83
+ chatbot = chatbot + [[user_message, None]]
84
+ return '', chatbot, chat_state
85
+
86
+
87
+ def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
88
+ print("generating answer...")
89
+ llm_message = chat.answer(conv=chat_state,
90
+ img_list=img_list,
91
+ num_beams=1,
92
+ temperature=temperature,
93
+ max_new_tokens=240,
94
+ max_length=511)[0]
95
+ chatbot[-1][1] = llm_message
96
+ print(chat_state.get_prompt())
97
+ print(chat_state)
98
+ return chatbot, chat_state, img_list
99
+
100
  def gradio_reset(chat_state, img_list):
101
  if chat_state is not None:
102
  chat_state.messages = []
 
126
  )
127
  img_list = []
128
  llm_message = chat.upload_video(gr_video, chat_state, img_list)
129
+ llm_message = chat.ask(text_input, chat_state)
130
+ llm_message = chat.answer(conv=chat_state,
131
+ img_list=img_list,
132
+ num_beams=1,
133
+ temperature=1.0,
134
+ max_new_tokens=240,
135
+ max_length=511)[0]
136
+ print(llm_message)
137
+ output = [[llm_message]]
138
+ return llm_message, output
139
  elif gr_img is not None:
140
  print(gr_img)
141
  chatbot = [((gr_img,), None)]
 
155
  # img_list = []
156
  return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
  title = """
160
  <h1 align="center"><a href="https://github.com/DAMO-NLP-SG/Video-LLaMA"><img src="https://s1.ax1x.com/2023/05/22/p9oQ0FP.jpg", alt="Video-LLaMA" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>
 
227
  img_list = gr.State()
228
  chatbot = gr.Chatbot(label='Video-LLaMA')
229
  text_input = gr.Textbox(label='User', placeholder='Upload your image/video first, or directly click the examples at the bottom of the page.', interactive=False)
230
+ output = gr.Textbox(label='Output')
231
 
232
 
233
  gr.Markdown(cite_markdown)
234
  #upload_button.click(upload_imgorvideo, inputs=[video, image, text_input], outputs=[chat_state,chatbot])
235
 
236
+ text_input.submit(upload_imgorvideo, inputs=[video, image, text_input], outputs=[output])
 
 
237
  #clear.click(gradio_reset, [chat_state, img_list], [chatbot, video, image, text_input, upload_button, chat_state, img_list], queue=False)
238
 
239
  demo.queue().launch(debug=True)