awacke1 commited on
Commit
691b58e
1 Parent(s): 774d35a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -1,4 +1,4 @@
1
-
2
  from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
3
 
4
  model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
@@ -32,5 +32,6 @@ def predict_step(image_paths):
32
  preds = [pred.strip() for pred in preds]
33
  return preds
34
 
 
35
 
36
- predict_step(['doctor.e16ba4e4.jpg'] # ['a woman in a hospital bed with a woman in a hospital bed']
 
1
+ import torch
2
  from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
3
 
4
  model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
 
32
  preds = [pred.strip() for pred in preds]
33
  return preds
34
 
35
+ torch.hub.download_url_to_file('https://github.com/AaronCWacker/Yggdrasil/blob/main/images/35-Favorite-Games.jpg?raw=true', '35-Favorite-Games.jpg')
36
 
37
+ predict_step(['35-Favorite-Games.jpg'] # ['a woman in a hospital bed with a woman in a hospital bed']