mtensor commited on
Commit
8c4b5c5
1 Parent(s): f06da25
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -63,15 +63,15 @@ model_inputs = processor(text=text_prompt, images=[image_pil], device="cuda:0")
63
  for k, v in model_inputs.items():
64
  model_inputs[k] = v.to("cuda:0")
65
 
66
- generation_output = model.generate(**model_inputs, max_new_tokens=8)
67
- generation_text = processor.batch_decode(generation_output, skip_special_tokens=True)[0][-38:]
68
- assert generation_text == "A bus parked on the side of a road.<s>"
69
  ```
70
 
71
  Fuyu can also perform some question answering on natural images and charts/diagrams (thought fine-tuning may be required for good performance):
72
  ```python
73
  text_prompt = "What color is the bus?\n"
74
- image_path = "/bus.png" # https://huggingface.co/adept-hf-collab/fuyu-8b/blob/main/bus.png
75
  image_pil = Image.open(image_path)
76
 
77
  model_inputs = processor(text=text_prompt, images=[image_pil], device="cuda:0")
 
63
  for k, v in model_inputs.items():
64
  model_inputs[k] = v.to("cuda:0")
65
 
66
+ generation_output = model.generate(**model_inputs, max_new_tokens=7)
67
+ generation_text = processor.batch_decode(generation_output, skip_special_tokens=True)[0][-35:]
68
+ assert generation_text == "A bus parked on the side of a road."
69
  ```
70
 
71
  Fuyu can also perform some question answering on natural images and charts/diagrams (thought fine-tuning may be required for good performance):
72
  ```python
73
  text_prompt = "What color is the bus?\n"
74
+ image_path = "bus.png" # https://huggingface.co/adept-hf-collab/fuyu-8b/blob/main/bus.png
75
  image_pil = Image.open(image_path)
76
 
77
  model_inputs = processor(text=text_prompt, images=[image_pil], device="cuda:0")