NicoZenith commited on
Commit
b023a3f
·
verified ·
1 Parent(s): ea711ae

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -2
README.md CHANGED
@@ -1,5 +1,10 @@
1
  ## Usage
2
  ```python
 
 
 
 
 
3
  from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
4
 
5
  model = LlavaOnevisionForConditionalGeneration.from_pretrained("NicoZenith/onevision-7b-all-vqa-conv")
@@ -19,6 +24,9 @@ conversation = [
19
 
20
  prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
21
 
22
- image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
23
  raw_image = Image.open(requests.get(image_file, stream=True).raw)
24
- inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(0, torch.float16)
 
 
 
 
1
  ## Usage
2
  ```python
3
+ import requests
4
+ from PIL import Image
5
+
6
+ import torch
7
+
8
  from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
9
 
10
  model = LlavaOnevisionForConditionalGeneration.from_pretrained("NicoZenith/onevision-7b-all-vqa-conv")
 
24
 
25
  prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
26
 
27
+ image_file = "https://prod-images-static.radiopaedia.org/images/29923576/fed73420497c8622734f21ce20fc91_gallery.jpeg"
28
  raw_image = Image.open(requests.get(image_file, stream=True).raw)
29
+ inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(0, torch.float16)
30
+
31
+ output = model.generate(**inputs, max_new_tokens=200, do_sample=False)
32
+ print(processor.decode(output[0][2:], skip_special_tokens=True))