hangzhang-nlp commited on
Commit
9ed1764
β€’
1 Parent(s): 3cc53fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -137,15 +137,26 @@ def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
137
  print(chat_state)
138
  return chatbot, chat_state, img_list
139
 
140
- title = """<h1 align="center">Demo of Video-LLaMA</h1>"""
141
- description = """<h3>This is the demo of Video-LLaMA. Upload your images/videos and start chatting!</h3>"""
142
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  #TODO show examples below
145
 
146
  with gr.Blocks() as demo:
147
  gr.Markdown(title)
148
- gr.Markdown(description)
149
 
150
  with gr.Row():
151
  with gr.Column(scale=0.5):
 
137
  print(chat_state)
138
  return chatbot, chat_state, img_list
139
 
140
+ title = """
141
+ <h1 align="center"><a href="https://github.com/DAMO-NLP-SG/Video-LLaMA"><img src="https://s1.ax1x.com/2023/05/22/p9oQ0FP.jpg", alt="Video-LLa" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>
142
 
143
+ # Video-LLaMA: An Instruction-Finetuned Visual Language Model for Video Understanding
144
+
145
+ This is the demo for the Video-LLaMA project, which is working on empowering large language models with video understanding capability. Upload your images/videos and start chatting!!!
146
+
147
+ Continuously upgrading, stay tuned for more updates!
148
+
149
+ <div style='display:flex; gap: 0.25rem; '>
150
+ <a href='https://github.com/DAMO-NLP-SG/Video-LLaMA/paper.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
151
+ <a href='https://github.com/DAMO-NLP-SG/Video-LLaMA'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
152
+ </div>
153
+
154
+ """
155
 
156
  #TODO show examples below
157
 
158
  with gr.Blocks() as demo:
159
  gr.Markdown(title)
 
160
 
161
  with gr.Row():
162
  with gr.Column(scale=0.5):