import os
import time
import pickle
import numpy as np
#from sklearn.metrics.pairwise import cosine_similarity
from PIL import Image
from transformers import AutoTokenizer, CLIPProcessor, CLIPModel

data_path = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'data'))
image_path = os.path.join(data_path, 'raw_images')
features_path = os.path.join(data_path, 'image_features')
model_path = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'models', 'clip-vit-large-patch14'))

# load clip model
print("loading model")
tokenizer = AutoTokenizer.from_pretrained(model_path)
processor = CLIPProcessor.from_pretrained(model_path)
model = CLIPModel.from_pretrained(model_path)
model.eval()
model.requires_grad_(False)
print("load model complete")

def encode_text(text:str):
    inputs = tokenizer([text], return_tensors='pt')
    output_tensor = model.get_text_features(**inputs)
    features = output_tensor[0].numpy()
    #features = features / np.linalg.norm(features)

    return features

def encode_img(img:Image):
    inputs = processor(images=img, return_tensors='pt')
    output_tensor = model.get_image_features(**inputs)
    features = output_tensor[0].numpy()
    return features

img0 = Image.open(os.path.join(image_path, 'ILSVRC2012_test_00000026.JPEG')) # man
img1 = Image.open(os.path.join(image_path, 'ILSVRC2012_test_00000027.JPEG')) # dog

text_embedding = encode_text('a photo of dog')
img0_embedding = encode_img(img0)
img1_embedding = encode_img(img1)

text_embedding = text_embedding / np.linalg.norm(text_embedding)
img0_embedding = img0_embedding / np.linalg.norm(img0_embedding)
img1_embedding = img1_embedding / np.linalg.norm(img1_embedding)

print(np.dot(text_embedding, img0_embedding))
print(np.dot(text_embedding, img1_embedding))