import asyncio

from langchain.chat_models import init_chat_model
import os

key = os.getenv("OPENAI_API_KEY")
api_key = str(key)

llm = init_chat_model(
    model="gpt-4o-mini",
    model_provider="openai",
    base_url="https://api.zetatechs.com/v1",
    api_key=api_key
)


# for chunk in llm.stream("Write me a 1 verse song about goldfish on the moon"):
#     print(chunk.content, end="|", flush=True)

async def main():
    async for chunk in llm.astream("Write me a 1 verse song about goldfish on the moon"):
        print(chunk.content, end="|", flush=True)


asyncio.run(main())
