import os

from llama_index.llms.deepseek import DeepSeek
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings


# ================== 初始化模型 ==================
from init_config import Config


def init_models(remote: bool):
    """初始化模型并验证"""
    # Embedding模型
    embed_model = HuggingFaceEmbedding(
        model_name=Config.EMBED_MODEL_PATH,
        # encode_kwargs = {
        #     'normalize_embeddings': True,
        #     'device': 'cuda' if hasattr(Settings, 'device') else 'cpu'
        # }
    )
    # LLM
    if remote:
        #调用deepseek
        llm = DeepSeek(model="deepseek-reasoner", api_key=Config.DEEP_SEEK_API_KEY)
    else:
        #加载本地模型
        llm = HuggingFaceLLM(
            model_name=Config.LLM_MODEL_PATH,
            tokenizer_name=Config.LLM_MODEL_PATH,
            model_kwargs={
                "trust_remote_code": True,
                # "device_map": "auto"
            },
            tokenizer_kwargs={"trust_remote_code": True},
            generate_kwargs={"temperature": 0.3}
        )

    Settings.embed_model = embed_model
    Settings.llm = llm

    # 验证模型
    test_embedding = embed_model.get_text_embedding("测试文本")
    print(f"Embedding维度验证：{len(test_embedding)}")

    return embed_model, llm