from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TextStreamer
import torch
from config.transformers_config import get_model_tokenizer

model, tokenizer = get_model_tokenizer()


# 创建流式处理器
streamer = TextStreamer(
    tokenizer,
    skip_prompt=True,    # 跳过输入的重复显示
    skip_special_tokens=True
)

# 创建 pipeline
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
)

# 流式生成调用
input_text = "如何学习机器学习？"
generation_kwargs = {
    "max_new_tokens": 100,
    "do_sample": True,
    "streamer": streamer  # 关键：传递 streamer 对象
}

# 执行生成（会实时打印输出）- 直接输出到控制台
result = pipe(input_text, **generation_kwargs)
