from langchain.chat_models import init_chat_model
import os

key = os.getenv("OPENAI_API_KEY")
# print(key)
api_key = str(key)

llm = init_chat_model(
    model="gpt-4o-mini",
    model_provider="openai",
    base_url="https://api.zetatechs.com/v1",
    api_key=api_key
)
# chunks1 = []
# for chunk in llm.stream("what color is the sky?"):
#     chunks1.append(chunk)
#     print(chunk.content, end="|", flush=True)

# chunks2 = []
# async for chunk in llm.astream("what color is the sky?"):
#     chunks2.append(chunk)
#     print(chunk.content, end="|", flush=True)
# 异步的好像还有点问题
# print(chunks1[0])
# print((chunks1[0] + chunks1[1] + chunks1[2] + chunks1[3] + chunks1[4]))

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import asyncio

prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
parser = StrOutputParser()
chain = prompt | llm | parser


async def main():
    async for chunk in chain.astream({"topic": "parrot"}):
        print(chunk, end="|", flush=True)


asyncio.run(main())
