Adityadn commited on
Commit
4aa568e
·
verified ·
1 Parent(s): d1aea07

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import torch
4
+ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
5
+
6
+ # Load model and processor
7
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
8
+ feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
10
+
11
+ # Set device
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model.to(device)
14
+
15
+ # Captioning function
16
+ def generate_caption(upload_img, webcam_img):
17
+ # Choose image from upload or webcam
18
+ image = webcam_img if webcam_img is not None else upload_img
19
+ if image is None:
20
+ return "No image provided."
21
+ # Preprocess
22
+ pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values.to(device)
23
+ # Generate
24
+ output_ids = model.generate(pixel_values, max_length=16, num_beams=4)
25
+ caption = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
26
+ return caption
27
+
28
+ # Build Gradio UI
29
+ with gr.Blocks() as demo:
30
+ gr.Markdown("# Image Captioning with Gradio")
31
+ with gr.Row():
32
+ upload_input = gr.Image(source="upload", type="pil", label="Upload Image")
33
+ webcam_input = gr.Image(source="webcam", type="pil", label="Use Camera")
34
+ output_text = gr.Textbox(label="Caption", interactive=False)
35
+ generate_btn = gr.Button("Generate Caption")
36
+ generate_btn.click(
37
+ fn=generate_caption,
38
+ inputs=[upload_input, webcam_input],
39
+ outputs=output_text
40
+ )
41
+
42
+ demo.launch()