from langchain_community.llms.openai import OpenAI
from langchain_community.chat_models.openai import ChatOpenAI
# import os
# os.environ["OPENAI_API_BASE"] = "http://192.168.2.45:7892/v1"
# os.environ["OPENAI_API_KEY"] = "xxx"
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
ChatOpenAI().invoke("你好")
class ChatLLM(ChatOpenAI):
    def __init__(self,streaming:bool=True,max_tokens=1000,*args,**kwargs):
        super().__init__(temperature=0,streaming=streaming,max_tokens=max_tokens,*args,**kwargs)
        

if __name__ == '__main__':
    import asyncio
    from langchain.prompts import PromptTemplate
    async def main():
        llm=ChatLLM(callbacks=[],)
        print(llm)
       
        prompt=PromptTemplate(template='{question}',input_variables=['question'])
        res=''
        chain=(prompt | llm)
        async for token in chain.astream({"question":"hello\nai\n"}):
            print(token)
            res+=token
        print(res)

        # async for token in callback.aiter():
        #     print(token)
        # async for token in res:
        #     print(token)
        # result=await res
        # print(result)
    asyncio.run(main())
        