kusumakar commited on
Commit
5149f5a
1 Parent(s): efce283

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image
3
+ import streamlit as st
4
+ from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
5
+
6
+ # Directory path to the saved model on Google Drive
7
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
8
+ feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning
10
+
11
+ def generate_captions(image):
12
+ image = Image.open(image).convert("RGB")
13
+ generated_caption = tokenizer.decode(model.generate(feature_extractor(image, return_tensors="pt").pixel_values.to("cpu"))[0])
14
+ sentence = generated_caption
15
+ text_to_remove = "<|endoftext|>"
16
+ generated_caption = sentence.replace(text_to_remove, "")
17
+ return generated_caption
18
+
19
+ # create the Streamlit app
20
+ def app():
21
+ st.title('Image from your Side, Trending Hashtags from our Side')
22
+
23
+ st.write('Upload an image to see what we have in store.')
24
+
25
+ # create file uploader
26
+ uploaded_file = st.file_uploader("Got You Covered, Upload your wish!, magic on the Way! ", type=["jpg", "jpeg", "png"])
27
+
28
+ # check if file has been uploaded
29
+ if uploaded_file is not None:
30
+ # load the image
31
+ image = Image.open(uploaded_file).convert("RGB")
32
+
33
+ # Image Captions
34
+ string = generate_captions(uploaded_file)
35
+
36
+ st.image(image, caption='The Uploaded File')
37
+ st.write("First is first captions for your Photo : ", string)
38
+
39
+ # run the app
40
+ if __name__ == '__main__':
41
+ app()