import os
from swift.llm import PtEngine, RequestConfig, InferRequest


# 加载推理引擎
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model = 'output/v4-20250330-125148/checkpoint-100-merged'
engine = PtEngine(model, max_batch_size=1)
request_config = RequestConfig(max_tokens=512, temperature=0, stream=True)


# 维护对话历史
history = []

print("对话已启动，输入'clear'清空对话历史，'exit'或'quit'退出")
try:
    import sys
    import re
    while True:
        # 获取用户输入
        try:
            user_input = input("你: ")
        except UnicodeDecodeError:
            print("输入包含无法解码的字符，请重新输入。")
            continue
        user_input = user_input.strip()
        user_input = re.sub(r'[^\w\u4e00-\u9fa5]', '', user_input)

        # 检查退出条件
        if user_input.lower() in ["exit", "quit"]:
            print("结束对话")
            break

        # 如果是清空历史指令
        if user_input.lower() == "clear":
            history = []
            print("对话历史已清空")
            continue

        # 添加用户消息到历史
        history.append({'role': 'user', 'content': user_input})

        # 创建推理请求
        infer_request = InferRequest(messages=history)

        # 执行推理
        response = engine.infer([infer_request], request_config)

        # 获取模型回复
        model_response = response[0].choices[0].message.content

        # 添加模型回复到历史
        history.append({'role': 'assistant', 'content': model_response})

        # 打印模型回复
        print(f"AI: {model_response}")

except KeyboardInterrupt:
    print("\n对话被中断")