ynhe commited on
Commit
3243a8d
1 Parent(s): e1e7ba5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -113,8 +113,11 @@ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
113
  gr.Markdown("<h1><center>Ask Anything with GPT</center></h1>")
114
  gr.Markdown(
115
  """
116
- Ask-Anything is a multifunctional video question answering tool that combines the functions of Action Recognition, Visual Captioning and ChatGPT. Our solution generates dense, descriptive captions for any object and action in a video, offering a range of language styles to suit different user preferences. It supports users to have conversations in different lengths, emotions, authenticity of language.<br>
117
- <p><a href='https://github.com/OpenGVLab/Ask-Anything'><img src='https://img.shields.io/badge/Github-Code-blue'></a></p><p>
 
 
 
118
  """
119
  )
120
 
@@ -125,10 +128,12 @@ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
125
 
126
  with gr.Row():
127
  with gr.Column(sclae=0.3, min_width=0):
128
- caption = gr.Button("✍ Upload")
129
  chat_video = gr.Button(" 🎥 Let's Chat! ", interactive=False)
130
  with gr.Column(scale=0.7, min_width=0):
131
  loadinglabel = gr.Label(label="State")
 
 
132
  with gr.Column():
133
  openai_api_key_textbox = gr.Textbox(
134
  value='',
@@ -152,8 +157,7 @@ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
152
  with gr.Column(scale=0.10, min_width=0):
153
  clear = gr.Button("🔄Clear️")
154
 
155
- with gr.Row():
156
- example_videos = gr.Dataset(components=[input_video_path], samples=[['images/yoga.mp4'], ['images/making_cake.mp4'], ['images/playing_guitar.mp4']])
157
 
158
  example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
159
  caption.click(bot.memory.clear)
 
113
  gr.Markdown("<h1><center>Ask Anything with GPT</center></h1>")
114
  gr.Markdown(
115
  """
116
+ Ask-Anything is a multifunctional video question-answering tool that combines the functions of Action Recognition, Visual Captioning and ChatGPT. Our solution generates dense, descriptive captions for any object and action in a video, offering a range of language styles to suit different user preferences. It supports users to have conversations in different lengths, emotions, authenticity of language.<br>
117
+
118
+ <p><a href='https://github.com/OpenGVLab/Ask-Anything'><img src='https://img.shields.io/badge/Github-Code-blue&logo=github'></a></p><p>
119
+
120
+ Recommended to use GPU for inference for a good experience.
121
  """
122
  )
123
 
 
128
 
129
  with gr.Row():
130
  with gr.Column(sclae=0.3, min_width=0):
131
+ caption = gr.Button("✍ Watch it!")
132
  chat_video = gr.Button(" 🎥 Let's Chat! ", interactive=False)
133
  with gr.Column(scale=0.7, min_width=0):
134
  loadinglabel = gr.Label(label="State")
135
+ with gr.Row():
136
+ example_videos = gr.Dataset(components=[input_video_path], samples=[['images/yoga.mp4'], ['images/making_cake.mp4'], ['images/playing_guitar.mp4']])
137
  with gr.Column():
138
  openai_api_key_textbox = gr.Textbox(
139
  value='',
 
157
  with gr.Column(scale=0.10, min_width=0):
158
  clear = gr.Button("🔄Clear️")
159
 
160
+
 
161
 
162
  example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
163
  caption.click(bot.memory.clear)