import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
import time
from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image
import torch.nn.functional as F 
# load the model and processor
ckpt = "google/siglip2-so400m-patch14-384"
model = AutoModel.from_pretrained(ckpt, device_map="cuda").vision_model
processor = AutoProcessor.from_pretrained(ckpt)

# load the image
image = load_image("data/testcase/2/1.jpg")
# image = load_image('/home/tfj/datasets/image_retri10k/eval_images_v/604848_6183771.jpg')
inputs = processor(images=[image], return_tensors="pt").to('cuda')
image_embeddings1 = model(**inputs).pooler_output    
# image_embeddings1 = F.normalize(image_embeddings1, p=2, dim=1)
import numpy as np
image_embeddings1 = image_embeddings1.detach().cpu().numpy()
image_embeddings1 = np.repeat(image_embeddings1, 10000, axis=0)
np.savez('test',image_embeddings1)

# load the image
# image = load_image("data/testcase/2/2-baidi.jpg")
# # image = load_image('/home/tfj/datasets/image_retri10k/eval_images_v/604848_6183771.jpg')
# inputs = processor(images=[image], return_tensors="pt").to('cuda')
# image_embeddings2 = model(**inputs).pooler_output 
# image_embeddings2 = F.normalize(image_embeddings2, p=2, dim=1)

# cos_sim = F.cosine_similarity(image_embeddings1, image_embeddings2, dim=1)
# print(cos_sim)
# 1,2: 0.823
# 1,3: 0.967
# 2，3: 0.799