# from langchain_ollama import ChatOllama

# llm = ChatOllama(
#     model="deepseek-r1:8b",
#     base_url="http://localhost:9420"
# )

import os
import langchain
from langchain_openai import ChatOpenAI
if not hasattr(langchain, "verbose"):
    langchain.verbose = True
if not hasattr(langchain, "debug"):
    langchain.debug = False
if not hasattr(langchain, "llm_cache"):
    langchain.llm_cache = None
# 创建智谱AI LLM实例
llm = ChatOpenAI(
    temperature=0.6,
    model="glm-z1-flash",
    openai_api_key="fb5dd4d84aa444cda7e7cad9240533af.R9dN2QVRHt6V5c2B",
    openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)




resp = llm.invoke("请总结一下LangChain的作用")

print(type(resp))    # <class 'langchain_core.messages.ai.AIMessage'>
print(resp.content)  # 这里就是模型的回答