Inoichan commited on
Commit
85e19da
1 Parent(s): 91d72ec

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -7
README.md CHANGED
@@ -61,6 +61,9 @@ See also [notebooks](./notebooks).
61
 
62
  ```python
63
  import requests
 
 
 
64
  from transformers import AutoProcessor
65
  from git_llm.git_llama import GitLlamaForCausalLM
66
 
@@ -82,22 +85,22 @@ text = f"##Instruction: Please answer the following question concletely. ##Quest
82
 
83
  # do preprocessing
84
  inputs = processor(
85
- text,
86
- image,
87
- return_tensors="pt",
88
- truncation=True,
89
  )
90
  inputs = {k: v.to(f"cuda:{device_id}") for k, v in inputs.items()}
91
 
92
  # set eos token
93
  eos_token_id_list = [
94
- processor.tokenizer.pad_token_id,
95
- processor.tokenizer.eos_token_id,
96
  ]
97
 
98
  # do inference
99
  with torch.no_grad():
100
- out = model.generate(**inputs, max_length=256, do_sample=False, temperature=0., eos_token_id=eos_token_id_list)
101
 
102
  # print result
103
  print(processor.tokenizer.batch_decode(out))
 
61
 
62
  ```python
63
  import requests
64
+ from PIL import Image
65
+
66
+ import torch
67
  from transformers import AutoProcessor
68
  from git_llm.git_llama import GitLlamaForCausalLM
69
 
 
85
 
86
  # do preprocessing
87
  inputs = processor(
88
+ text,
89
+ image,
90
+ return_tensors="pt",
91
+ truncation=True,
92
  )
93
  inputs = {k: v.to(f"cuda:{device_id}") for k, v in inputs.items()}
94
 
95
  # set eos token
96
  eos_token_id_list = [
97
+ processor.tokenizer.pad_token_id,
98
+ processor.tokenizer.eos_token_id,
99
  ]
100
 
101
  # do inference
102
  with torch.no_grad():
103
+ out = model.generate(**inputs, max_length=256, do_sample=False, temperature=0., eos_token_id=eos_token_id_list)
104
 
105
  # print result
106
  print(processor.tokenizer.batch_decode(out))