from numpy import ndarray


from typing import Any, List


from PIL import Image
import torch
from transformers import CLIPProcessor, CLIPModel

# 加载CLIP模型（可选ViT-B/32或更大版本）
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name).to(
    "cuda" if torch.cuda.is_available() else "cpu"
)
processor = CLIPProcessor.from_pretrained(model_name)


def get_image_embedding(image_path):
    """提取单张图像的CLIP嵌入"""
    image = Image.open(image_path).convert("RGB")
    inputs = processor(images=image, return_tensors="pt", padding=True)
    with torch.no_grad():
        image_embeddings = model.get_image_features(**inputs).cpu().numpy()
    return image_embeddings


def get_text_embedding(text) -> ndarray:
    """提取查询文本的CLIP嵌入"""
    if not text or text.strip() == "":
        raise ValueError("Text input cannot be empty")
    inputs = processor(text=text, padding=True, return_tensors="pt")
    with torch.no_grad():
        text_embeddings: ndarray = model.get_text_features(**inputs).cpu().numpy()
    return text_embeddings
