import os
import numpy as np
from PIL import Image
from transformers import AutoTokenizer, CLIPProcessor, CLIPModel

class CLIPEncoder:
    def __init__(self):
        model_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'models', 'clip-vit-large-patch14'))

        # load clip model
        print("loading model")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.processor = CLIPProcessor.from_pretrained(model_path)
        self.model = CLIPModel.from_pretrained(model_path)
        self.model.eval()
        self.model.requires_grad_(False)

    # 编码图像特征
    def encode_img(self, img:Image) -> np.ndarray:
        inputs = self.processor(images=img, return_tensors='pt')
        output_tensor = self.model.get_image_features(**inputs)
        image_feature = output_tensor[0].numpy()
        return image_feature
    
    # 编码文本特征
    def encode_text(self, text:str) -> np.ndarray:
        inputs = self.tokenizer([text], return_tensors='pt')
        output_tensor = self.model.get_text_features(**inputs)
        features = output_tensor[0].numpy()
        return features

# 单例
clip_encoder = CLIPEncoder()