import open_clip
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:Marqo/marqo-fashionSigLIP')
tokenizer = open_clip.get_tokenizer('hf-hub:Marqo/marqo-fashionSigLIP')

import torch
from PIL import Image
import torch.nn.functional as F
import numpy as np

image = preprocess_val(Image.open("data/testcase/1/2c4fe016b6df4a530302268ed4099ffbae69a90d.png")).unsqueeze(0)
# text = tokenizer(["a hat", "a t-shirt", "shoes"])

with torch.no_grad():
    image_features1 = model.encode_image(image, normalize=True)
    image_features1 = image_features1.cpu().numpy()
    image_features1 = image_features1.reshape(-1)

image = preprocess_val(Image.open("data/testcase/1/7afe855899ef64ce4cd6da142cec2949f4596246.jpg")).unsqueeze(0)
# text = tokenizer(["a hat", "a t-shirt", "shoes"])

with torch.no_grad():
    image_features2 = model.encode_image(image, normalize=True)
    image_features2 = image_features2.cpu().numpy()
    image_features2 = image_features2.reshape(-1)

def cosine_similarity(a, b):
    a = np.array(a)
    b = np.array(b)
    dot_product = np.dot(a, b)
    norm_a = np.linalg.norm(a)
    norm_b = np.linalg.norm(b)
    if norm_a == 0 or norm_b == 0:
        return 0.0  # 防止除以0
    return dot_product / (norm_a * norm_b)
cs = cosine_similarity(image_features1, image_features2)
print(cs)






