awacke1 commited on
Commit
774d35a
1 Parent(s): aaab92e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -44
app.py CHANGED
@@ -1,44 +1,36 @@
1
- import torch
2
- import re
3
- import gradio as gr
4
- from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
5
-
6
- device='cpu'
7
- encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
8
- decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
9
- model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
10
- feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
11
- tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
12
- model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
13
-
14
-
15
- def predict(image,max_length=64, num_beams=4):
16
- image = image.convert('RGB')
17
- image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
18
- clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
19
- caption_ids = model.generate(image, max_length = max_length)[0]
20
- caption_text = clean_text(tokenizer.decode(caption_ids))
21
- return caption_text
22
-
23
-
24
-
25
- input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
26
- output = gr.outputs.Textbox(type="auto",label="Captions")
27
- examples = [f"example{i}.jpg" for i in range(1,7)]
28
-
29
- description= "Image captioning application made using transformers"
30
- title = "Image Captioning 🖼️"
31
-
32
- article = "Created By : Shreyas Dixit "
33
-
34
- interface = gr.Interface(
35
- fn=predict,
36
- inputs = input,
37
- theme="grass",
38
- outputs=output,
39
- examples = examples,
40
- title=title,
41
- description=description,
42
- article = article,
43
- )
44
- interface.launch(debug=True)
 
1
+
2
+ from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
3
+
4
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
5
+ feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
6
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
7
+
8
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
+ model.to(device)
10
+
11
+
12
+
13
+ max_length = 16
14
+ num_beams = 4
15
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
16
+
17
+ def predict_step(image_paths):
18
+ images = []
19
+ for image_path in image_paths:
20
+ i_image = Image.open(image_path)
21
+ if i_image.mode != "RGB":
22
+ i_image = i_image.convert(mode="RGB")
23
+
24
+ images.append(i_image)
25
+
26
+ pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
27
+ pixel_values = pixel_values.to(device)
28
+
29
+ output_ids = model.generate(pixel_values, **gen_kwargs)
30
+
31
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
32
+ preds = [pred.strip() for pred in preds]
33
+ return preds
34
+
35
+
36
+ predict_step(['doctor.e16ba4e4.jpg'] # ['a woman in a hospital bed with a woman in a hospital bed']