import logging
from llama_index.core import Settings
from llama_index.llms.deepseek import DeepSeek
from llama_index.core.embeddings.utils import resolve_embed_model
from llama_index.storage.kvstore.redis import RedisKVStore as RedisCache
from llama_index.core.ingestion import IngestionCache
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.vector_stores.postgres import PGVectorStore
from llamaindex_ingestion_project import config

logger = logging.getLogger(__name__)


def setup_global_settings() -> None:
    """初始化并设置全局的 LLM 和 Embedding Model。"""
    logger.info("正在初始化 LLM 和 Embedding Model...")

    # 初始化 LLM
    llm = DeepSeek(model=config.LLM_MODEL_NAME, api_key=config.DEEPSEEK_API_KEY)

    # 初始化本地嵌入模型
    embed_model = resolve_embed_model(config.EMBED_MODEL_PATH)

    # 设置全局配置
    Settings.llm = llm
    Settings.embed_model = embed_model
    logger.info("全局 LLM 和 Embedding Model 设置完成。")


def initialize_stores() -> tuple[MongoDocumentStore, PGVectorStore, IngestionCache]:
    """
    初始化并返回文档存储、向量存储和摄入缓存。

    Returns:
        tuple: 包含 docstore, vector_store, ingest_cache 的元组。
    """
    logger.info("正在初始化 Document Store, Vector Store, 和 Ingestion Cache...")

    # 初始化 MongoDB 文档存储
    docstore = MongoDocumentStore.from_uri(
        uri=config.MONGO_URI, db_name=config.MONGO_DB_NAME
    )

    # 初始化 PostgreSQL 向量存储
    vector_store = PGVectorStore.from_params(
        database=config.PG_DB_NAME,
        host=config.PG_HOST,
        password=config.PG_PASSWORD,
        port=config.PG_PORT,
        user=config.PG_USER,
        table_name=config.PG_TABLE_NAME,
        embed_dim=config.EMBED_DIM,
    )

    # 初始化 Redis 摄入缓存
    ingest_cache = IngestionCache(
        cache=RedisCache.from_host_and_port(
            host=config.REDIS_HOST, port=config.REDIS_PORT
        ),
        collection=config.REDIS_CACHE_COLLECTION,
    )

    logger.info("所有存储服务初始化完成。")
    return docstore, vector_store, ingest_cache