from langchain_openai import ChatOpenAI
from langchain.prompts import (
    ChatPromptTemplate,
    MessagesPlaceholder,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from dotenv import load_dotenv

load_dotenv() # 从.env中加载环境变量  OPENAI_API_KEY="aaa.bbb"

llm = ChatOpenAI(
    temperature=0.95,
    model="glm-4",
    openai_api_base = "http://open.bigmodel.cn/api/paas/v4",
    streaming=True,
    callbacks=[],
    verbose=False,
    request_timeout=120,
    max_retries=3,
    max_tokens=2048,
    n=1,
    frequency_penalty=0,
    presence_penalty=0,
    top_p=1,
    stop=None,
    logit_bias=None,
    user=None,
    model_kwargs={}
)

messages=[
        {"role": "system", "content": "你是一个乐于回答各种问题的小助手，你的任务是提供专业、准确、有洞察力的建议。"},
        {"role": "user", "content": "我对太阳系的行星非常感兴趣，尤其是土星。请提供关于土星的基本信息，包括它的大小、组成、环系统以及任何独特的天文现象。"},
    ],
# response = conversation.invoke({"question": "请为我写一首诗"})
for chunk in llm.stream(messages):
    print(chunk)