# llm_provider.py
import os
from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv

load_dotenv(dotenv_path=".env")

_llm_instance = None

def get_llm():
    """
    获取单例 ChatOllama 实例
    """
    global _llm_instance
    if _llm_instance is not None:
        return _llm_instance

    model_type = os.getenv("MODEL_TYPE").lower()
    model = os.getenv("MODEL")
    base_url = os.getenv("BASE_URL")
    api_key = os.getenv("DEEPSEEK_API_KEY", "dummy_key").strip()
    if model_type == "ollama":
        _llm_instance = ChatOllama(
            model=os.getenv("MODEL"),
            base_url=os.getenv("BASE_URL"),
            temperature=0.0,
            # num_predict=200
        )
    elif model_type == "vllm":
        _llm_instance = ChatOpenAI(
            model=model,
            base_url=base_url,
            api_key=api_key,
            temperature=0.0,
            # extra_body={
            #     "chat_template_kwargs": {
            #         "enable_thinking": False  # 禁用“思考”过程输出
            #     }
            # }
        )
    else:
        # ☁️ OpenAI 格式 (包括 vLLM)
        _llm_instance = ChatOpenAI(
            model=model,
            base_url=base_url,
            api_key=api_key,
            temperature=0.0
        )
    return _llm_instance
