import os
from langchain_community.chat_models import ChatZhipuAI
from langchain_deepseek import ChatDeepSeek
from langchain_openai import ChatOpenAI
import httpx
httpx.get("https://api.deepseek.com", timeout=5)  # 测试基础连通性

openai_api_key = "sk-odbcqwxspzefxjqdixdjpuxedxppckrmnyhwecshodzjfwnn"


def create_llm(type: str, temperature: float, request_timeout: int, async_mode: bool = False):
    match type:
        case "glm":
            model_args = {
                "model": "glm-4-plus",
                "temperature": temperature,
                "request_timeout": request_timeout
            }
            if async_mode:
                model_args["http_client"] = httpx.AsyncClient(timeout=30.0)
            return ChatZhipuAI(**model_args)
        case "deepseek":
            return ChatDeepSeek(
                model="deepseek-chat",  # 使用官方指定的模型名称
                temperature=temperature,
                request_timeout = request_timeout,
            )
        case "siliconflow":
            ChatOpenAI(
                openai_api_key=openai_api_key,  # 替换为硅基流动 API 密钥
                base_url="https://api.siliconflow.cn/v1",  # 硅基流动 API 地址
                model="deepseek-ai/DeepSeek-R1",  # 指定模型名称，如 deepseek-chat
                temperature=temperature,
                request_timeout = request_timeout,
            )