Reuben Tan commited on
Commit
290c75a
1 Parent(s): 4fa6d68

remove img upload button

Browse files
Files changed (2) hide show
  1. app.py +17 -4
  2. eval_configs/conversation_demo.yaml +1 -1
app.py CHANGED
@@ -111,7 +111,7 @@ def gradio_reset(chat_state, img_list):
111
  img_list = []
112
  return None, gr.update(value=None, interactive=True), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your video first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
113
 
114
- def upload_imgorvideo(gr_video, gr_img, text_input, chat_state,chatbot):
115
  if args.model_type == 'vicuna':
116
  chat_state = default_conversation.copy()
117
  else:
@@ -134,7 +134,20 @@ def upload_imgorvideo(gr_video, gr_img, text_input, chat_state,chatbot):
134
  return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot
135
  else:
136
  # img_list = []
137
- return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  def gradio_ask(user_message, chatbot, chat_state):
140
  if len(user_message) == 0:
@@ -239,8 +252,8 @@ with gr.Blocks() as demo:
239
  ], inputs=[video, text_input])'''
240
 
241
  gr.Markdown(cite_markdown)
242
- #upload_button.click(upload_imgorvideo, [video, text_input, chat_state,chatbot], [video, text_input, upload_button, chat_state, img_list,chatbot])
243
- upload_button.click(upload_imgorvideo, [video, image, text_input, chat_state,chatbot], [video, image, text_input, upload_button, chat_state, img_list,chatbot])
244
 
245
  text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
246
  gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
 
111
  img_list = []
112
  return None, gr.update(value=None, interactive=True), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your video first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
113
 
114
+ '''def upload_imgorvideo(gr_video, gr_img, text_input, chat_state,chatbot):
115
  if args.model_type == 'vicuna':
116
  chat_state = default_conversation.copy()
117
  else:
 
134
  return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot
135
  else:
136
  # img_list = []
137
+ return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot'''
138
+
139
+ def upload_imgorvideo(gr_video, text_input, chat_state, chatbot):
140
+ if args.model_type == 'vicuna':
141
+ chat_state = default_conversation.copy()
142
+ else:
143
+ chat_state = conv_llava_llama_2.copy()
144
+
145
+ print(gr_video)
146
+ chatbot = chatbot + [((gr_video,), None)]
147
+ chat_state.system = "You are able to understand the visual content that the user provides. Follow the instructions carefully and explain your answers in detail."
148
+ img_list = []
149
+ llm_message = chat.upload_video_without_audio(gr_video, chat_state, img_list)
150
+ return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot
151
 
152
  def gradio_ask(user_message, chatbot, chat_state):
153
  if len(user_message) == 0:
 
252
  ], inputs=[video, text_input])'''
253
 
254
  gr.Markdown(cite_markdown)
255
+ upload_button.click(upload_imgorvideo, [video, text_input, chat_state,chatbot], [video, text_input, upload_button, chat_state, img_list,chatbot])
256
+ #upload_button.click(upload_imgorvideo, [video, image, text_input, chat_state,chatbot], [video, image, text_input, upload_button, chat_state, img_list,chatbot])
257
 
258
  text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
259
  gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
eval_configs/conversation_demo.yaml CHANGED
@@ -16,7 +16,7 @@ model:
16
  imagebind_ckpt_path: "ckpt/imagebind_path/"
17
 
18
  # The ckpt of vision branch after stage1 pretrained,
19
- ckpt: 'ckpt/VL_LLaMA_2_7B_Finetuned.pth' # you can use our pretrained ckpt from https://huggingface.co/DAMO-NLP-SG/Video-LLaMA-2-13B-Pretrained/
20
 
21
 
22
  # only train vision branch
 
16
  imagebind_ckpt_path: "ckpt/imagebind_path/"
17
 
18
  # The ckpt of vision branch after stage1 pretrained,
19
+ ckpt: 'ckpt/VL_LLaMA_2_7B_Finetuned.pth' # you can use our pretrained ckpt from https://huggingface.co/DAMO-NLP-SG/Video-LLaMA-2-13B-Pretrained/
20
 
21
 
22
  # only train vision branch