czczup commited on
Commit
acbf352
1 Parent(s): 170bf72

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -2
README.md CHANGED
@@ -101,12 +101,19 @@ from transformers import AutoModel, CLIPImageProcessor
101
  from transformers import AutoTokenizer
102
 
103
  path = "OpenGVLab/InternVL-Chat-Chinese-V1-2"
 
104
  model = AutoModel.from_pretrained(
105
  path,
106
  torch_dtype=torch.bfloat16,
107
  low_cpu_mem_usage=True,
108
- trust_remote_code=True,
109
- device_map='auto').eval()
 
 
 
 
 
 
110
 
111
  tokenizer = AutoTokenizer.from_pretrained(path)
112
  image = Image.open('./examples/image2.jpg').convert('RGB')
 
101
  from transformers import AutoTokenizer
102
 
103
  path = "OpenGVLab/InternVL-Chat-Chinese-V1-2"
104
+ # If you have an 80G A100 GPU, you can put the entire model on a single GPU.
105
  model = AutoModel.from_pretrained(
106
  path,
107
  torch_dtype=torch.bfloat16,
108
  low_cpu_mem_usage=True,
109
+ trust_remote_code=True).eval().cuda()
110
+ # Otherwise, you need to set device_map='auto' to use multiple GPUs for inference.
111
+ # model = AutoModel.from_pretrained(
112
+ # path,
113
+ # torch_dtype=torch.bfloat16,
114
+ # low_cpu_mem_usage=True,
115
+ # trust_remote_code=True,
116
+ # device_map='auto').eval()
117
 
118
  tokenizer = AutoTokenizer.from_pretrained(path)
119
  image = Image.open('./examples/image2.jpg').convert('RGB')