import chromadb
import requests
from chromadb.utils import embedding_functions
import argparse


class OpenAIEmbeddingFunction(embedding_functions.EmbeddingFunction):
    def __init__(self, model_name="", embedding_url=""):
        self.model_name = model_name
        self.embedding_url = embedding_url

    def __call__(self, text):
        headers = {
            "Content-Type": "application/json"
        }
        if len(text) >= 512:
            text = text[0:511]
        response = requests.post(self.embedding_url, headers=headers, json={"model": self.model_name, "input": [text]})
        data = response.json()["data"][0]["embedding"]
        return data


def semantic_search(query, embedding_function, db_path="vector_db", top_k=5):
    """基于语义的搜索"""
    client = chromadb.PersistentClient(path=db_path)
    collection = client.get_collection(name="image_descriptions")
    query_embedding = embedding_function(query)

    results = collection.query(
        query_embeddings=query_embedding,
        n_results=top_k
    )
    return results


def main(db_path, query):
    embedding_function = OpenAIEmbeddingFunction(model_name="TencentBAC/Conan-embedding-v1",
                                                 embedding_url="http://10.1.30.3:58000/v1/embeddings")
    # 测试基于语义的搜索
    semantic_results = semantic_search(query, embedding_function, db_path=db_path, top_k=3)
    # print("Semantic Search Results:", semantic_results)
    for data in semantic_results["metadatas"][0]:
        print(data["image_path"])
        print(data["description"])


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Qwen2-VL image description")
    parser.add_argument("--db_path", default="../rag/vector_db", help="Vector DB Path")
    parser.add_argument("--query", default="Intel ARC 770显卡", help="Query")
    args = parser.parse_args()
    main(args.db_path, args.query)
