Jeel05 commited on
Commit
23bd154
·
verified ·
1 Parent(s): 6f9a239

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # test.py
2
+ import torch
3
+ from PIL import Image
4
+ from transformers import AutoModel, AutoTokenizer
5
+
6
+ model = AutoModel.from_pretrained('openbmb/MiniCPM-V', trust_remote_code=True, torch_dtype=torch.bfloat16)
7
+ # For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
8
+ model = model.to(device='cuda', dtype=torch.bfloat16)
9
+ # For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
10
+ #model = model.to(device='cuda', dtype=torch.float16)
11
+ # For Mac with MPS (Apple silicon or AMD GPUs).
12
+ # Run with `PYTORCH_ENABLE_MPS_FALLBACK=1 python test.py`
13
+ #model = model.to(device='mps', dtype=torch.float16)
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V', trust_remote_code=True)
16
+ model.eval()
17
+
18
+ image = Image.open('xx.jpg').convert('RGB')
19
+ question = 'What is in the image?'
20
+ msgs = [{'role': 'user', 'content': question}]
21
+
22
+ res, context, _ = model.chat(
23
+ image=image,
24
+ msgs=msgs,
25
+ context=None,
26
+ tokenizer=tokenizer,
27
+ sampling=True,
28
+ temperature=0.7
29
+ )
30
+ print(res)