palitrajarshi commited on
Commit
96072ff
1 Parent(s): 37bed6c

Update pages/Captionize.py

Browse files
Files changed (1) hide show
  1. pages/Captionize.py +5 -3
pages/Captionize.py CHANGED
@@ -1,5 +1,7 @@
1
  import torch
2
- import re
 
 
3
  import streamlit as st
4
  from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
5
 
@@ -33,9 +35,9 @@ feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
33
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
34
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
35
 
36
-
37
  def predict(image,max_length=64, num_beams=4):
38
- image = image.convert('RGB')
 
39
  image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
40
  clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
41
  caption_ids = model.generate(image, max_length = max_length)[0]
 
1
  import torch
2
+ import re
3
+ from PIL import Image
4
+ import requests
5
  import streamlit as st
6
  from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
7
 
 
35
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
36
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
37
 
 
38
  def predict(image,max_length=64, num_beams=4):
39
+ #image = image.convert('RGB')
40
+ image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
41
  image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
42
  clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
43
  caption_ids = model.generate(image, max_length = max_length)[0]