JustinLin610 commited on
Commit
7e8b986
1 Parent(s): 010185e
Files changed (6) hide show
  1. README.md +1 -0
  2. app.py +12 -7
  3. aurora.jpeg +0 -0
  4. beatles.jpeg +0 -0
  5. goog_luck.png +0 -0
  6. pokemon.jpeg +0 -0
README.md CHANGED
@@ -21,5 +21,6 @@ gradio
21
  `app_file`: _string_
22
  app.py
23
 
 
24
  `pinned`: _boolean_
25
  true
 
21
  `app_file`: _string_
22
  app.py
23
 
24
+
25
  `pinned`: _boolean_
26
  true
app.py CHANGED
@@ -1,11 +1,9 @@
1
  import os
2
 
3
-
4
  os.system('git clone https://github.com/pytorch/fairseq.git; cd fairseq;'
5
  'pip install --use-feature=in-tree-build ./; cd ..')
6
  os.system('ls -l')
7
 
8
-
9
  import torch
10
  import numpy as np
11
  from fairseq import utils, tasks
@@ -17,7 +15,6 @@ from PIL import Image
17
  from torchvision import transforms
18
  import gradio as gr
19
 
20
-
21
  # Register caption task
22
  tasks.register_task('caption', CaptionTask)
23
  # turn on cuda if GPU is available
@@ -105,8 +102,8 @@ def apply_half(t):
105
 
106
 
107
  # Function for image captioning
108
- def image_caption(inp):
109
- sample = construct_sample(inp)
110
  sample = utils.move_to_cuda(sample) if use_cuda else sample
111
  sample = utils.apply_to_sample(apply_half, sample) if use_fp16 else sample
112
  with torch.no_grad():
@@ -114,5 +111,13 @@ def image_caption(inp):
114
  return result[0]['caption']
115
 
116
 
117
- io = gr.Interface(fn=image_caption, inputs=gr.inputs.Image(type='pil'), outputs='text')
118
- io.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
1
  import os
2
 
 
3
  os.system('git clone https://github.com/pytorch/fairseq.git; cd fairseq;'
4
  'pip install --use-feature=in-tree-build ./; cd ..')
5
  os.system('ls -l')
6
 
 
7
  import torch
8
  import numpy as np
9
  from fairseq import utils, tasks
 
15
  from torchvision import transforms
16
  import gradio as gr
17
 
 
18
  # Register caption task
19
  tasks.register_task('caption', CaptionTask)
20
  # turn on cuda if GPU is available
 
102
 
103
 
104
  # Function for image captioning
105
+ def image_caption(Image):
106
+ sample = construct_sample(Image)
107
  sample = utils.move_to_cuda(sample) if use_cuda else sample
108
  sample = utils.apply_to_sample(apply_half, sample) if use_fp16 else sample
109
  with torch.no_grad():
 
111
  return result[0]['caption']
112
 
113
 
114
+ title = "OFA-Image_Caption"
115
+ description = "Gradio Demo for OFA-Image_Caption. Upload your own image or click any one of the examples, and click " \
116
+ "\"Submit\" and then wait for the generated caption. "
117
+ article = "<p style='text-align: center'><a href='https://github.com/OFA-Sys/OFA' target='_blank'>OFA Github " \
118
+ "Repo</a></p> "
119
+ examples = [['beatles.jpeg'], ['aurora.jpeg'], ['good_luck.png'], ['pokemon.jpeg'], ['cakes.JPG'], ['wedding.JPG']]
120
+ io = gr.Interface(fn=image_caption, inputs=gr.inputs.Image(type='pil'), outputs=gr.outputs.Textbox(label="Caption"),
121
+ title=title, description=description, article=article, examples=examples,
122
+ allow_flagging=False, allow_screenshot=False)
123
+ io.launch(enable_queue=True, cache_examples=True)
aurora.jpeg ADDED
beatles.jpeg ADDED
goog_luck.png ADDED
pokemon.jpeg ADDED