from langchain_community.chat_models import ChatOpenAI

import env_utils

# 本地私有化部署的大模型
# llm = ChatOpenAI(
#     model='qwen3-8b',
#     temperature=0.8,
#     api_key=env_utils.LOCAL_API_KEY,
#     base_url=env_utils.LOCAL_BASE_URL,
# )

# llm = ChatOpenAI(
#     model='qwen3-8b',
#     temperature=0.8,
#     api_key=env_utils.OPENAI_API_KEY,
#     base_url=env_utils.OPENAI_BASE_URL,
# )

llm = ChatOpenAI(
    model='deepseek-reasoner',
    temperature=0.8,
    api_key=env_utils.DEEPSEEK_API_KEY,
    base_url=env_utils.DEEPSEEK_BASE_URL,
    # model_kwargs={ "response_format": { "type": "json_object" } },
)

multiModel_llm = ChatOpenAI(
    model='qwen-omni-turbo',
    temperature=0.8,
    api_key=env_utils.OPENAI_API_KEY,
    base_url=env_utils.OPENAI_BASE_URL,
    # model_kwargs={'stream':True,'modalities': ["text", "audio"],   "stream_options":{"include_usage": True}}
)
