File size: 3,334 Bytes
c8ad458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from PIL import Image
import requests
from io import BytesIO
import torch
from transformers import AutoModel, AutoProcessor, AutoConfig, AutoModelForVision2Seq

# from granite_cola import ColGraniteVisionConfig, ColGraniteVision, ColGraniteVisionProcessor

# --- 1) Register your custom classes so AutoModel/AutoProcessor work out-of-the-box
# AutoConfig.register("colgranitevision", ColGraniteVisionConfig)
# AutoModel.register(ColGraniteVisionConfig, ColGraniteVision)
# AutoProcessor.register(ColGraniteVisionConfig, ColGraniteVisionProcessor)

# ─────────────────────────────────────────────
# 2) Load model & processor
# ─────────────────────────────────────────────
model_dir = "."

model = AutoModelForVision2Seq.from_pretrained(
    model_dir,
    trust_remote_code=True,
    torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
)

# self.model = PeftModel.from_pretrained(self.model, peft_path).eval()

processor = AutoProcessor.from_pretrained(
    model_dir, 
    trust_remote_code=True,
    use_fast=True
)

# Set patch_size explicitly if needed
if hasattr(processor, 'patch_size') and processor.patch_size is None:
    processor.patch_size = 14  # Default patch size for vision transformers

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device).eval()

# ─────────────────────────────────────────────
# 3) Download sample image + build a prompt containing <image>
# ─────────────────────────────────────────────
image_url = "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg"
resp = requests.get(image_url)
image = Image.open(BytesIO(resp.content)).convert("RGB")

# ─────────────────────────────────────────────
# 4) Process image and text
# ─────────────────────────────────────────────
# Process image
image_inputs = processor.process_images([image])
image_inputs = {k: v.to(device) for k, v in image_inputs.items()}

# Process text
text = "A photo of a tiger"
text_inputs = processor.process_queries([text]) 
text_inputs = {k: v.to(device) for k, v in text_inputs.items()}

# ─────────────────────────────────────────────
# 5) Get embeddings and score
# ─────────────────────────────────────────────
with torch.no_grad():
    # Get image embedding
    image_embedding = model(**image_inputs)
    
    # Get text embedding 
    text_embedding = model(**text_inputs)
    
    # Calculate similarity score
    score = torch.matmul(text_embedding, image_embedding.T).item()

print(f"Similarity score between text and image: {score:.4f}")