Soumen commited on
Commit
275cd40
1 Parent(s): 25cd4af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,16 +3,15 @@ import torch
3
  from PIL import Image
4
  from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
5
  @st.cache
 
 
 
 
6
  def load_models():
7
  model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
8
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
10
  return model, feature_extractor, tokenizer
11
- model, feature_extractor, tokenizer = load_models()
12
- #pickle.load(open('energy_model.pkl', 'rb'))
13
- #vocab = np.load('w2i.p', allow_pickle=True)
14
- st.title("Image_Captioning_App")
15
-
16
  #st.text("Build with Streamlit and OpenCV")
17
  if "photo" not in st.session_state:
18
  st.session_state["photo"]="not done"
@@ -28,6 +27,7 @@ camera_photo = c2.camera_input("Take a photo", on_change=change_photo_state)
28
 
29
  #st.subheader("Detection")
30
  if st.checkbox("Generate_Caption"):
 
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
  model.to(device)
33
  max_length = 16
 
3
  from PIL import Image
4
  from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
5
  @st.cache
6
+ #pickle.load(open('energy_model.pkl', 'rb'))
7
+ #vocab = np.load('w2i.p', allow_pickle=True)
8
+ st.title("Image_Captioning_App")
9
+ @st.experimental_singleton
10
  def load_models():
11
  model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
12
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
13
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
14
  return model, feature_extractor, tokenizer
 
 
 
 
 
15
  #st.text("Build with Streamlit and OpenCV")
16
  if "photo" not in st.session_state:
17
  st.session_state["photo"]="not done"
 
27
 
28
  #st.subheader("Detection")
29
  if st.checkbox("Generate_Caption"):
30
+ model, feature_extractor, tokenizer = load_models()
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
  model.to(device)
33
  max_length = 16