snorfyang commited on
Commit
06f4116
·
verified ·
1 Parent(s): 9244cca

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -37,14 +37,14 @@ The model supports multi-image and multi-prompt generation. Meaning that you can
37
 
38
  ### Using `pipeline`:
39
 
40
- Below we used [`"snorfyang/llava-1.5-7b-lora-hf"`](https://huggingface.co/snorfyang/llava-1.5-7b-lora-hf) checkpoint.
41
 
42
  ```python
43
  from transformers import pipeline
44
  from PIL import Image
45
  import requests
46
 
47
- model_id = "snorfyang/llava-1.5-7b-lora-hf"
48
  pipe = pipeline("image-to-text", model=model_id)
49
  url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
50
 
@@ -67,7 +67,7 @@ from PIL import Image
67
  import torch
68
  from transformers import AutoProcessor, LlavaForConditionalGeneration
69
 
70
- model_id = "snorfyang/llava-1.5-7b-lora-hf"
71
 
72
  prompt = "USER: <image>\nWhat are these?\nASSISTANT:"
73
  image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
 
37
 
38
  ### Using `pipeline`:
39
 
40
+ Below we used [`"snorfyang/llava-v1.5-7b-lora-hf"`](https://huggingface.co/snorfyang/llava-v1.5-7b-lora-hf) checkpoint.
41
 
42
  ```python
43
  from transformers import pipeline
44
  from PIL import Image
45
  import requests
46
 
47
+ model_id = "snorfyang/llava-v1.5-7b-lora-hf"
48
  pipe = pipeline("image-to-text", model=model_id)
49
  url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
50
 
 
67
  import torch
68
  from transformers import AutoProcessor, LlavaForConditionalGeneration
69
 
70
+ model_id = "snorfyang/llava-v1.5-7b-lora-hf"
71
 
72
  prompt = "USER: <image>\nWhat are these?\nASSISTANT:"
73
  image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"