import chromadb
from chromadb.utils import embedding_functions
from sentence_transformers import SentenceTransformer
from PIL import Image

# 加载 CLIP模型，支持图片和向量的embedding
model = SentenceTransformer("clip-ViT-B-32")


def getImageEmbedding(image_path):
    # 打开图片并且转成RGB格式
    img = Image.open(image_path).convert("RGB")
    # 使用模型对图片进行编码 ，得到embedding  返回的是numpy数组
    emb = model.encode([img], convert_to_numpy=True)
    # 取出embedding并转为list
    return emb[0].tolist()


dog2_embedding = getImageEmbedding("dog2.jpeg")

client = chromadb.PersistentClient(path="./chromadb_data")

collection = client.get_or_create_collection("multi_model")

collection.add(embeddings=[dog2_embedding], documents=["dog2"], ids=["dog2"])
dog3_embedding = getImageEmbedding("dog3.jpg")
results = collection.query(query_embeddings=[dog3_embedding], n_results=1)
print(results)
