from typing import Dict, Any, List, Iterator

from langchain.chains import LLMChain
from langchain_community.chat_models import ChatTongyi
from langchain_core.callbacks import BaseCallbackHandler
import os

from langchain_core.outputs import LLMResult
from langchain_core.prompts import PromptTemplate

os.environ["DASHSCOPE_API_KEY"] = "sk-9d8f1914800e497f8717144e860f99bc"


class StreamingIteratorCallbackHandler(BaseCallbackHandler):
    def __init__(self):
        self.tokens = []
        self.index = 0

    def __iter__(self) -> Iterator[str]:
        return self

    def __next__(self) -> str:
        if self.index < len(self.tokens):
            token = self.tokens[self.index]
            self.index += 1
            return token
        else:
            raise StopIteration()

    def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
        """Called when a new token is generated by the language model."""
        # 实时打印输出
        self.tokens.append(token)
        print(token, end='', flush=True)


if __name__ == "__main__":
    handler = StreamingIteratorCallbackHandler()
    llm = ChatTongyi(
        streaming=True,
        callbacks=[handler],
    )
    template = "Question: {question}"
    prompt = PromptTemplate(template=template, input_variables=["question"])
    chain = LLMChain(llm=llm, prompt=prompt)

    response = chain.invoke({"question": "请详细描述java和python的区别"})
    # 供了一种方式来事后访问或重放流式输出的内容，而不仅仅是实时打印。
    for token in handler:
        print(f"Received token: {token}")