import time
from typing import Dict

from langchain_community.embeddings import OllamaEmbeddings
from langchain_openai import OpenAIEmbeddings
from loguru import logger

from ai_engine.core.llm.volcengine_ark_embedding import VolcEngineArkEmbeddings
from ai_engine.load_config import SysConfig
from ai_engine.core.model.base import LlmApiType, ModelKwargs
from ai_engine.core.vectorstore.client.elasticsearch_client import ElasticsearchClient
from ai_engine.core.vectorstore.client.opensearch_client import OpenSearchClient
from ai_engine.core.vectorstore.client.vector_store_client import VectorStoreClient, VectorType


class VsClientFactory:
    """
     向量存储客户端工厂
    """
    _vs_client_dict: Dict[str, VectorStoreClient] = {}

    @classmethod
    def get_instance(
            cls,
            model_kwargs: ModelKwargs,
            collection_name: str = "qa") -> VectorStoreClient:
        """创建向量数据库处理类"""
        dict_key = model_kwargs.api_type + ":" + model_kwargs.api_key + ":" + collection_name
        logger.info("获取VectorStore, dict_key = " + dict_key)
        vector_client = cls._vs_client_dict.get(dict_key)
        if vector_client:
            logger.info("获取VectorStore成功, vector_type = " + vector_client.get_vector_type())
            return vector_client
        embeddings = None
        collectionNamePrefix = ""
        if model_kwargs.api_type == LlmApiType.OPENAI.value:
            embeddings = OpenAIEmbeddings(
                openai_api_base=model_kwargs.api_base,
                openai_api_key=model_kwargs.api_key,
                openai_api_version=model_kwargs.api_version,
                request_timeout=8,
                max_retries=3
            )
            collectionNamePrefix = "oula"
        elif model_kwargs.api_type == LlmApiType.OLLAMA.value:
            embeddings = OllamaEmbeddings(
                base_url=model_kwargs.api_base,
                model="llama3"
            )
            collectionNamePrefix = "llama"
        elif model_kwargs.api_type == LlmApiType.VOLCENGINEARK.value:
            embeddings = VolcEngineArkEmbeddings(
                model=model_kwargs.embedding_model_name,
                volc_ak=model_kwargs.api_key,
                volc_sk=model_kwargs.api_secret_key
            )
            collectionNamePrefix = "ark"

        collection_name = collectionNamePrefix + "_" + collection_name
        vector_type = SysConfig.vector_store["type"]
        start_time = time.time()
        logger.info("连接开始----------------------------,collection_name=" + collection_name)
        if vector_type == VectorType.OpenSearch:
            vector_client = OpenSearchClient(embeddings, collection_name, SysConfig.vector_store)
        elif vector_type == VectorType.ELASTICSEARCH:
            vector_client = ElasticsearchClient(embeddings, collection_name, SysConfig.vector_store)
        else:
            raise Exception("无效的向量数据库，vector_type=" + vector_type)
        cls._vs_client_dict[dict_key] = vector_client
        end_time = time.time()
        logger.info("连接成功----------------------------  耗时=%.2f秒" % (end_time - start_time))
        return vector_client
