pcuenq HF staff commited on
Commit
5e2122e
1 Parent(s): d9a4d76

Use main branches

Browse files
Files changed (2) hide show
  1. app.py +2 -3
  2. requirements.txt +1 -1
app.py CHANGED
@@ -6,12 +6,11 @@ from transformers.models.fuyu.image_processing_fuyu import FuyuImageProcessor
6
  from PIL import Image
7
 
8
  model_id = "adept/fuyu-8b"
9
- revision = "refs/pr/3"
10
  dtype = torch.bfloat16
11
  device = "cuda"
12
 
13
- tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
14
- model = FuyuForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=dtype, revision=revision)
15
  processor = FuyuProcessor(image_processor=FuyuImageProcessor(), tokenizer=tokenizer)
16
 
17
  caption_prompt = "Generate a coco-style caption.\\n"
 
6
  from PIL import Image
7
 
8
  model_id = "adept/fuyu-8b"
 
9
  dtype = torch.bfloat16
10
  device = "cuda"
11
 
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ model = FuyuForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=dtype)
14
  processor = FuyuProcessor(image_processor=FuyuImageProcessor(), tokenizer=tokenizer)
15
 
16
  caption_prompt = "Generate a coco-style caption.\\n"
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- git+https://github.com/huggingface/transformers.git@add_fuyu_model
2
  accelerate
3
  torch==2.0.1
 
1
+ git+https://github.com/huggingface/transformers.git
2
  accelerate
3
  torch==2.0.1