# components/models.py

import os
import nest_asyncio
from llama_index.llms.deepseek import DeepSeek
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core import Settings
from ..config import settings  # 从我们的配置文件导入设置


def initialize_models():
    """
    Initializes and configures the LLM and Embedding models globally
    using LlamaIndex's Settings.
    """
    print("🧠 [Models] Initializing LLM and Embedding models...")

    # Apply nest_asyncio for environments like Jupyter notebooks (optional but good practice)
    nest_asyncio.apply()

    # Set environment variables from settings
    os.environ['DEESEEK_API_KEY'] = settings.DEEPSEEK_API_KEY
    os.environ["HUGGING_FACE_TOKEN"] = settings.HUGGINGFACE_TOKEN

    # Initialize the LLM
    llm = DeepSeek(
        model=settings.LLM_MODEL_NAME,
        api_key=settings.DEEPSEEK_API_KEY
    )

    # Initialize the local embedding model
    embed_model = resolve_embed_model(settings.EMBED_MODEL_PATH)

    # Set the models globally for LlamaIndex
    Settings.llm = llm
    Settings.embed_model = embed_model

    print("✅ [Models] LLM and Embedding models have been set in LlamaIndex Settings.")