trunks commited on
Commit
75c5fec
1 Parent(s): e822296

Update README.md

Browse files

# testing image
inputs = processor(images=img1, return_tensors="pt")

pixel_values = inputs.pixel_values

generated_ids = model.generate(pixel_values=pixel_values, max_length=50)

generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]

print(generated_caption)

Files changed (1) hide show
  1. README.md +5 -7
README.md CHANGED
@@ -8,21 +8,19 @@ pipeline_tag: image-to-text
8
  from transformers import AutoProcessor, BlipForConditionalGeneration
9
 
10
  processor = AutoProcessor.from_pretrained("trunks/blip-image-captioning-base")
 
11
  model = BlipForConditionalGeneration.from_pretrained("trunks/blip-image-captioning-base")
12
 
 
13
  # prepare image for model
14
  from PIL import Image
15
  from IPython.display import display
16
 
17
  img1 = Image.open("imagepath/img.jpeg")
 
18
  width, height = img1.size
 
19
  img1_resized = img1.resize((int(0.3 * width), int(0.3 * height))
20
- display(img1_resized)
21
 
22
- # testing image
23
- inputs = processor(images=img1, return_tensors="pt")
24
- pixel_values = inputs.pixel_values
25
 
26
- generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
27
- generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
28
- print(generated_caption)
 
8
  from transformers import AutoProcessor, BlipForConditionalGeneration
9
 
10
  processor = AutoProcessor.from_pretrained("trunks/blip-image-captioning-base")
11
+
12
  model = BlipForConditionalGeneration.from_pretrained("trunks/blip-image-captioning-base")
13
 
14
+
15
  # prepare image for model
16
  from PIL import Image
17
  from IPython.display import display
18
 
19
  img1 = Image.open("imagepath/img.jpeg")
20
+
21
  width, height = img1.size
22
+
23
  img1_resized = img1.resize((int(0.3 * width), int(0.3 * height))
 
24
 
25
+ display(img1_resized)
 
 
26