from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.streamers import TextStreamer
import time
import builtins

start = time.perf_counter()
_original_print = builtins.print


#model_name = "D:\\models\\deepseek-ai\\DeepSeek-R1-Distill-Qwen-1.5B"
model_name = "D:\\models\\Qwen\\Qwen3-1.7B"


# 加载模型和分词器（强制使用transformers库）
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",
    trust_remote_code=True
)

def get_after_substring(s, substring):
    index = s.find(substring)
    if index != -1:
        return s[index + len(substring):]
    return s

# 定义流式输出处理器
class JsonStreamer(TextStreamer):
    def __init__(self, tokenizer, skip_prompt=True, **kwargs):
        super().__init__(tokenizer, skip_prompt, **kwargs)
        self.partial_json = ""


    def on_finalized_text(self, text: str, stream_end: bool = False):
        # 实时显示生成内容
        print(text, end="", flush=True)
        # 收集JSON片段
        self.partial_json += text
        if stream_end:
            # 最终解析JSON
            try:
                import json

                result = get_after_substring(self.partial_json, "</think>")
                result = result.replace("<|im_end|>", "", 2)
                result = json.loads(result)
                print("\n解析后的JSON:", result)

                elapsed = time.perf_counter() - start
                _original_print(f"[耗时] print执行时间: {elapsed:.6f}秒")
            except:
                print("\nJSON解析失败，原始输出:", self.partial_json)


# 准备输入
prompt = ("你是待办提醒助手,帮我识别用户输入是否是一个待办或提醒,只要有事件无论是否有time,code都为true,以"
          "{\"code\": \"true或false\",\"response\": {\"desc\": \"输入的原文\",\"time\": \"提醒时间\"}},"
          "模板样例{\"code\": \"true\",\"response\": {\"desc\": \"明天晚上8点给客户打电话\",\"time\": \"明天晚上8点\"}},"
          "返回严格的json格式,不要有其它多余字符。以下是用户输入:开会")
messages = [
    {"role": "system", "content": "你是智能助手！"},
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(messages,
                                     tokenize=False,
                                     add_generation_prompt=True,
                                     enable_thinking=False
                                     )

# 创建流式处理器
streamer = JsonStreamer(tokenizer)

# 生成文本（开启流式）
generated_ids = model.generate(
    **tokenizer([text], return_tensors="pt").to(model.device),
    max_new_tokens=512,
    streamer=streamer,
    temperature=0.3,          # 控制生成随机性
    repetition_penalty=1.1,   # 防止重复
)