import os

from langchain.chains import LLMChain
from langchain_community.chat_models import ChatTongyi
from langchain_core.prompts import ChatPromptTemplate

os.environ["DASHSCOPE_API_KEY"] = "sk-9d8f1914800e497f8717144e860f99bc"

llm = ChatTongyi(streaming=True)

prompt = ChatPromptTemplate.from_messages(
    [("system", "你是一个专业的AI助手。"), ("human", "{query}")]
)

llm_chain = prompt | llm

# llm_chain = LLMChain(llm=llm, prompt=prompt)

# 流式数据处理
# stream方法的默认实现并不支持真正的流式输出，它仅仅是一次性调用invoke然后将其结果封装成单个元素的迭代器返回
ret = llm_chain.stream({"query": "python和Java的区别是什么，请详细解释"})
print(f"ret:{ret}")
for token in ret:
    print(token.content, end="", flush=True)
