import os
from langchain_community.llms import Ollama
from langchain_openai import ChatOpenAI
from app.core.logger import get_logger

logger = get_logger(__name__)

def get_llm():
    """获取LLM实例"""
    try:
        import os
        # 获取模型服务类型
        chat_service = os.getenv("CHAT_SERVICE", "ollama").lower()
        
        # 根据服务类型选择模型
        if chat_service == "glm":
            # 使用智普(GLM)模型
            zhipu_api_key = os.getenv("ZHIPU_API_KEY")
            zhipu_base_url = os.getenv("ZHIPU_BASE_URL", "https://open.bigmodel.cn/api/paas/v4/")
            zhipu_model = os.getenv("ZHIPU_MODEL", "glm-4.5")
            
            if zhipu_api_key:
                logger.info(f"Zhipu (GLM) LLM initialized with model: {zhipu_model}")
                return ChatOpenAI(
                    model=zhipu_model,
                    openai_api_key=zhipu_api_key,
                    openai_api_base=zhipu_base_url,
                    temperature=0.7
                )
            else:
                logger.warning("ZHIPU_API_KEY not found, falling back to Ollama")
                chat_service = "ollama"
        
        elif chat_service == "kimi":
            # 使用Moonshot (Kimi)模型
            moonshot_api_key = os.getenv("MOONSHOT_API_KEY")
            moonshot_base_url = os.getenv("MOONSHOT_BASE_URL", "https://api.moonshot.cn/v1")
            moonshot_model = os.getenv("MOONSHOT_MODEL", "moonshot-v1-8k")
            
            if moonshot_api_key:
                logger.info(f"Moonshot (Kimi) LLM initialized with model: {moonshot_model}")
                return ChatOpenAI(
                    model=moonshot_model,
                    openai_api_key=moonshot_api_key,
                    openai_api_base=moonshot_base_url,
                    temperature=0.7
                )
            else:
                logger.warning("MOONSHOT_API_KEY not found, falling back to Ollama")
                chat_service = "ollama"
        
        elif chat_service == "qwen":
            # 使用阿里云通义千问(DashScope)模型
            dashscope_api_key = os.getenv("DASHSCOPE_API_KEY")
            dashscope_base_url = os.getenv("DASHSCOPE_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")
            dashscope_model = os.getenv("DASHSCOPE_MODEL", "qwen-plus")
            
            if dashscope_api_key:
                logger.info(f"DashScope (Qwen) LLM initialized with model: {dashscope_model}")
                return ChatOpenAI(
                    model=dashscope_model,
                    openai_api_key=dashscope_api_key,
                    openai_api_base=dashscope_base_url,
                    temperature=0.7
                )
            else:
                logger.warning("DASHSCOPE_API_KEY not found, falling back to Ollama")
                chat_service = "ollama"
        
        if chat_service == "deepseek":
            # 使用DeepSeek API
            deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
            deepseek_base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/")
            deepseek_model = os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
            
            if deepseek_api_key:
                logger.info(f"DeepSeek LLM initialized with model: {deepseek_model}")
                return ChatOpenAI(
                    model=deepseek_model,
                    openai_api_key=deepseek_api_key,
                    openai_api_base=deepseek_base_url,
                    temperature=0.7
                )
            else:
                logger.warning("DEEPSEEK_API_KEY not found, falling back to Ollama")
                chat_service = "ollama"
        
        # 默认使用Ollama
        ollama_model = os.getenv("OLLAMA_CHAT_MODEL", "qwen3:1.7b")
        logger.info(f"Using Ollama LLM with model: {ollama_model}")
        return Ollama(model=ollama_model)
    except Exception as e:
        logger.error(f"Failed to initialize LLM: {e}")
        # 如果所有方法都失败，返回一个基本的Ollama实例
        try:
            fallback_model = os.getenv("OLLAMA_CHAT_MODEL", "qwen3:1.7b")
            logger.info(f"Falling back to Ollama with model: {fallback_model}")
            return Ollama(model=fallback_model)
        except:
            logger.error("Failed to initialize fallback LLM")
            raise