moranyanuka commited on
Commit
576766f
1 Parent(s): d38a495

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -56,7 +56,7 @@ import requests
56
  from PIL import Image
57
  from transformers import BlipProcessor, BlipForConditionalGeneration
58
 
59
- processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
60
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha").to("cuda")
61
 
62
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
@@ -88,7 +88,7 @@ import requests
88
  from PIL import Image
89
  from transformers import BlipProcessor, BlipForConditionalGeneration
90
 
91
- processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
92
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha", torch_dtype=torch.float16).to("cuda")
93
 
94
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
 
56
  from PIL import Image
57
  from transformers import BlipProcessor, BlipForConditionalGeneration
58
 
59
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
60
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha").to("cuda")
61
 
62
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
 
88
  from PIL import Image
89
  from transformers import BlipProcessor, BlipForConditionalGeneration
90
 
91
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
92
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha", torch_dtype=torch.float16).to("cuda")
93
 
94
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'