import os
from dotenv import load_dotenv
from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
import pprint



load_dotenv()

batch_register_model_provider([
    {"provider": "dashscope", "chat_model": "openai-compatible"},
    {"provider": "deepseek", "chat_model": "openai-compatible"},
    {"provider": "zhipu", "chat_model": "openai-compatible"},
])

def get_chat_model(model="deepseek-chat", provider="deepseek", is_thinking=False):
    model_string = f"{provider}:{model}"
    
    if is_thinking and provider == "dashscope":
        return load_chat_model(
            model_string, 
            extra_body={"enable_thinking": True},
            streaming=True
        )
    else:
        return load_chat_model(model_string)

# model = get_chat_model(model="deepseek-chat",provider="deepseek")
# print(model.invoke("你好"))
