from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
import config
from config.transformers_config import get_model_tokenizer

model, tokenizer = get_model_tokenizer()

streamer = TextStreamer(tokenizer)  # 创建流式输出对象
# input_text = "介绍一下人工智能"
# input_text = "用户提问中包含了种族歧视，帮忙生成拒接回答这个问题的话术."
input_text = "编写一封给女生表达爱意的邮件."
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)


# 流式生成参数设置
outputs = model.generate(
    **inputs,
    max_new_tokens=500,  # 最大生成长度
    streamer=streamer,    # 启用流式输出
    do_sample=True,       # 启用随机采样
    temperature=0.7       # 控制生成多样性
)
print(outputs)

generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, outputs)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
