nateraw commited on
Commit
8b7f367
β€’
1 Parent(s): a5d0544

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -140,6 +140,10 @@ class Pipeline:
140
  return text
141
  return text
142
 
 
 
 
 
143
  interface = gr.Interface(
144
  Pipeline(),
145
  inputs=[
@@ -148,8 +152,8 @@ interface = gr.Interface(
148
  gr.Slider(0.0, 1.0, 0.95, label='top_p'),
149
  ],
150
  outputs='text',
151
- examples=[['eating_spaghetti.mp4', 0.7, 0.95], ['assets/3c0dffd0-e38e-4643-bc48-d513943dc20b_012_014.mp4', 0.7, 0.95]]
152
- )
153
-
154
- if __name__ == '__main__':
155
- interface.launch(debug=True)
 
140
  return text
141
  return text
142
 
143
+ title = "LaViLa"
144
+ description = """LaViLa (**L**anguage **a**ugmented **Vi**deo **La**nguage Pretraining) is a new approach to learning video representations from Large Language Models (LLMs). We repurpose LLMs to be visually conditioned "Narrators", and use them to automatically generate video-language paired data. We use this data to then learn a video-langauge representation, outperforming prior work by large margins. \nGradio Demo for LaVila. To use it, simply upload your video, or click one of the examples to load them. Read more at the links below."""
145
+ article = "<p style='text-align: center'><a href='https://github.com/facebookresearch/LaViLa' target='_blank'>Github Repo</a><a href='https://arxiv.org/abs/2212.04501' target='_blank'>Paper on arxiv</a></p><center><img src='https://visitor-badge.glitch.me/badge?page_id=nateraw_lavila' alt='visitor badge'></center></p>"
146
+
147
  interface = gr.Interface(
148
  Pipeline(),
149
  inputs=[
 
152
  gr.Slider(0.0, 1.0, 0.95, label='top_p'),
153
  ],
154
  outputs='text',
155
+ examples=[['eating_spaghetti.mp4', 0.7, 0.95], ['assets/3c0dffd0-e38e-4643-bc48-d513943dc20b_012_014.mp4', 0.7, 0.95]],
156
+ title=title,
157
+ description=description,
158
+ article=article,
159
+ ).launch(debug=True)