hellokitty commited on
Commit
e1059d2
1 Parent(s): 2d73bf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -26
app.py CHANGED
@@ -1,7 +1,7 @@
1
- import torch
 
2
  import re
3
- import gradio as gr
4
- from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
5
 
6
  device='cpu'
7
  encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
@@ -11,7 +11,6 @@ feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
11
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
12
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
13
 
14
-
15
  def predict(image,max_length=64, num_beams=4):
16
  image = image.convert('RGB')
17
  image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
@@ -20,25 +19,32 @@ def predict(image,max_length=64, num_beams=4):
20
  caption_text = clean_text(tokenizer.decode(caption_ids))
21
  return caption_text
22
 
23
-
24
-
25
- input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
26
- output = gr.outputs.Textbox(type="auto",label="Captions")
27
- examples = [f"example{i}.jpg" for i in range(1,7)]
28
-
29
- description= "Image captioning application made using transformers"
30
- title = "Image Captioning 🖼️"
31
-
32
- article = "Created By : Hello Kitty"
33
-
34
- interface = gr.Interface(
35
- fn=predict,
36
- inputs = input,
37
- theme="grass",
38
- outputs=output,
39
- examples = examples,
40
- title=title,
41
- description=description,
42
- article = article,
43
- )
44
- interface.launch(debug=True)
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
  import re
4
+ from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
 
5
 
6
  device='cpu'
7
  encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
 
11
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
12
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
13
 
 
14
  def predict(image,max_length=64, num_beams=4):
15
  image = image.convert('RGB')
16
  image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
 
19
  caption_text = clean_text(tokenizer.decode(caption_ids))
20
  return caption_text
21
 
22
+ def set_example_image(example: list) -> dict:
23
+ return gr.Image.update(value=example[0])
24
+ css = '''
25
+ h1#title {
26
+ text-align: center;
27
+ }
28
+ h3#header {
29
+ text-align: center;
30
+ }
31
+ img#overview {
32
+ max-width: 800px;
33
+ max-height: 600px;
34
+ }
35
+ img#style-image {
36
+ max-width: 1000px;
37
+ max-height: 600px;
38
+ }
39
+ '''
40
+ demo = gr.Blocks(css=css)
41
+ with demo:
42
+ gr.Markdown('''<h1 id="title">Image Caption 🖼️</h1>''')
43
+ gr.Markdown('''Made by : Shreyas Dixit''')
44
+ with gr.Column():
45
+ input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
46
+ output = gr.outputs.Textbox(type="auto",label="Captions")
47
+ btn = gr.Button("Genrate Caption")
48
+ btn.click(fn=predict, inputs=input, outputs=output)
49
+
50
+ demo.launch()