from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM

import config
import torch

from config.transformers_config import get_model_tokenizer

model, tokenizer = get_model_tokenizer()

# prompt = '你是一个守法的AI助手，也不能泄露自己的身份，现在用户提问中包含了如何制作炸弹，你直接拒接回答这个问题, 字数不要超过100字'
# prompt = '编写一封邮件。'
# prompt = '什么是最大的动物?。'
# prompt = '你好，我是小齐。'
prompt = "编写一封给女生表达爱意的邮件."

message = [
    {"role": "user", "content": prompt}
]

text = tokenizer.apply_chat_template(
    message,
    tokenize=False,
    add_generation_prompt=True
)
# model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

# 初始化流式生成器
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True) # 设置超时防止阻塞
inputs = tokenizer([text], return_tensors="pt").to(model.device)

# 生成参数
generation_kwargs = dict(
    inputs,
    streamer=streamer,
    max_new_tokens=200,
    do_sample=True,
    temperature=0.7
)

# 4.37.2
#
import transformers
print(transformers.__version__)

# 启动生成线程
import threading
thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()

# 创建唯一ID和响应头

import time
ll = time.time()
# 逐token生成并封装为 OpenAI 格式
for new_text in streamer:
    print(new_text, end='')

print(ll - time.time())

