from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers.generation.streamers import BaseStreamer
import torch

from config.transformers_config import get_model_tokenizer


class TokenHandler(BaseStreamer):
    def __init__(self, tokenizer):
        self.tokenizer = tokenizer
        self.tokens = []
        self.generated_text = ""

    def put(self, token_ids):
        """核心处理方法：接收token_ids并解码"""
        current_token = self.tokenizer.decode(token_ids[0], skip_special_tokens=True)
        self.tokens.append(current_token)
        self.generated_text += current_token
        # 这里可添加自定义处理逻辑
        self.on_token_generated(current_token)

    def end(self):
        """生成结束时自动调用"""
        self.on_generation_end()

    def on_token_generated(self, token):
        """可重写的token处理钩子"""
        pass

    def on_generation_end(self):
        """可重写的结束处理钩子"""
        pass


def generate_stream_text(
        model,
        tokenizer,
        prompt,
        max_new_tokens=100,
        temperature=0.7,
        custom_handler=None
):
    """执行流式生成"""
    handler = custom_handler or TokenHandler(tokenizer)

    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
    )

    result = pipe(
        prompt,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        temperature=temperature,
        streamer=handler
    )

    return handler


if __name__ == '__main__':

    model, tokenizer = get_model_tokenizer()
    #
    # handler = generate_stream_text(
    #     model,
    #     tokenizer,
    #     prompt="如何学习机器学习？"
    # )
    #
    # print("完整结果:", handler.generated_text)
    # 示例2：自定义处理
    class MyHandler(TokenHandler):
        def on_token_generated(self, token):
            # 实时写入文件
            # with open("output.txt", "a", encoding="utf-8") as f:
            #     f.write(token)

            # 实时发送到WebSocket等
            # websocket.send(token)
            print(token, end="")

    custom_handler = MyHandler(tokenizer)
    generate_stream_text(
        model,
        tokenizer,
        prompt="Python编程技巧",
        custom_handler=custom_handler
    )