from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain import PromptTemplate
import os

# 使用DeepSeek-chat模型作为chatmodel
model = ChatOpenAI(model="deepseek-chat",
                   api_key=os.environ.get("DEEPSEEK_API_KEY"),
                   base_url=os.environ.get("DEEPSEEK_BASE_URL"))

prompt = ChatPromptTemplate.from_template("请你介绍一下什么是 {topic}")
prompt1 = PromptTemplate(template="请你介绍一下什么是 {topic}")

chain = {"topic":RunnablePassthrough()} | prompt1 | model | StrOutputParser()
print(prompt1.input_schema.schema())


# chain = {"topic":RunnablePassthrough()} | prompt | model | StrOutputParser()
# print(prompt.input_schema.schema())
# print(chain.input_schema.schema())

# print(prompt.output_schema.schema())
# result = chain.invoke("人工智能")
# print(result)
for chunk in chain.stream("人工智能"):
    print(chunk,end="",flush=True)