Reuben Tan commited on
Commit
2e18035
1 Parent(s): 28577bb

update space description and links

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -170,24 +170,24 @@ def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
170
  return chatbot, chat_state, img_list
171
 
172
  title = """
173
- <h1 align="center">Global-Local QFormer for Long Video Understanding with LLMs</h1>
174
 
175
- <h5 align="center"> Introduction: We introduce a Global-Local QFormer video model that is connected with a Large Language Model to understand and answer questions about long videos. To try out this demo, please upload a video and start the chat. </h5>
176
 
177
  <div style='display:flex; gap: 0.25rem; '>
178
- <a href='https://huggingface.co/spaces/rxtan/Global-Local-QFormer-Video-LLM'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
179
- <a href='https://cs-people.bu.edu/rxtan/projects/Global-Local-QFormer/pdf/CVPR_2024_paper.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
180
  </div>
181
 
182
 
183
- Thank you for using the Global-Local QFormer Demo Page! If you have any questions or feedback, please feel free to contact us.
184
  Current online demo uses the 7B version of Llama-2 due to resource limitations.
185
 
186
 
187
  """
188
 
189
  Note_markdown = ("""
190
- ### We note that our Global-Local QFormer model may be limited at understanding videos from rare domains. Due to the pretraining data, the \
191
  model may be susceptible to hallucinations
192
  We would like to acknowledge the Video-LLama repository which we copied the demo layout from.
193
 
 
170
  return chatbot, chat_state, img_list
171
 
172
  title = """
173
+ <h1 align="center">Koala: Key frame-conditioned long video-LLM</h1>
174
 
175
+ <h5 align="center"> Introduction: We introduce a key frame-conditioned video model that is connected with a Large Language Model to understand and answer questions about long videos. To try out this demo, please upload a video and start the chat. </h5>
176
 
177
  <div style='display:flex; gap: 0.25rem; '>
178
+ <a href='https://huggingface.co/spaces/rxtan/Koala-video-llm'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
179
+ <a href='https://cs-people.bu.edu/rxtan/projects/Koala/pdf/cvpr_2024_koala.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
180
  </div>
181
 
182
 
183
+ Thank you for using the Koala video-LLM demo page! If you have any questions or feedback, please feel free to contact us.
184
  Current online demo uses the 7B version of Llama-2 due to resource limitations.
185
 
186
 
187
  """
188
 
189
  Note_markdown = ("""
190
+ ### We note that our Koala video-LLM model may be limited at understanding videos from rare domains. Due to the pretraining data, the \
191
  model may be susceptible to hallucinations
192
  We would like to acknowledge the Video-LLama repository which we copied the demo layout from.
193