finalf0 commited on
Commit
dd592ec
1 Parent(s): 0757b95

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -82,7 +82,7 @@ import torch
82
  from PIL import Image
83
  from transformers import AutoModel, AutoTokenizer
84
 
85
- model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2.0', trust_remote_code=True, torch_dtype=torch.bfloat16)
86
  # For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
87
  model = model.to(device='cuda', dtype=torch.bfloat16)
88
  # For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
@@ -91,7 +91,7 @@ model = model.to(device='cuda', dtype=torch.bfloat16)
91
  # Run with `PYTORCH_ENABLE_MPS_FALLBACK=1 python test.py`
92
  #model = model.to(device='mps', dtype=torch.float16)
93
 
94
- tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2.0', trust_remote_code=True)
95
  model.eval()
96
 
97
  image = Image.open('xx.jpg').convert('RGB')
 
82
  from PIL import Image
83
  from transformers import AutoModel, AutoTokenizer
84
 
85
+ model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2', trust_remote_code=True, torch_dtype=torch.bfloat16)
86
  # For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
87
  model = model.to(device='cuda', dtype=torch.bfloat16)
88
  # For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
 
91
  # Run with `PYTORCH_ENABLE_MPS_FALLBACK=1 python test.py`
92
  #model = model.to(device='mps', dtype=torch.float16)
93
 
94
+ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2', trust_remote_code=True)
95
  model.eval()
96
 
97
  image = Image.open('xx.jpg').convert('RGB')