import os
import sys
import re
import asyncio
from swift.llm import PtEngine, RequestConfig, InferRequest

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

model = 'output/v4-20250330-125148/checkpoint-100-merged'

# 加载推理引擎
engine = PtEngine(model, max_batch_size=1, model_kwargs={'return_legacy_cache': true})
request_config = RequestConfig(max_tokens=512, temperature=0, stream=True)


async def process_response(response_generator_list):
    model_response = ""
    print("AI: ", end="", flush=True)  # 提示开始输出
    try:
        if not response_generator_list:
            return model_response

        response_generator = response_generator_list[0]
        for response in response_generator: # Changed to regular for loop

            # 检查响应对象是否有错误
            if hasattr(response, 'error') and response.error:
                print(f"\nError: {response.error}")
                break

            # 检查响应对象是否有 choices
            if hasattr(response, 'choices') and response.choices:
                choice = response.choices[0]
                # 检查 choice 是否有 delta
                if hasattr(choice, 'delta') and choice.delta:
                    chunk = choice.delta.content
                    model_response += chunk
                    print(chunk, end="", flush=True)  # 逐字输出
    except Exception as e:
        print(f"\nError: {str(e)}")
    return model_response

# 维护对话历史
history = []

print("对话已启动，输入'clear'清空对话历史，'exit'或'quit'退出")
try:
    while True:
        # 获取用户输入
        try:
            user_input = input("你: ")
        except UnicodeDecodeError:
            print("输入包含无法解码的字符，请重新输入。")
            continue
        user_input = user_input.strip()
        user_input = re.sub(r'[^\w\u4e00-\u9fa5]', '', user_input)

        # 检查退出条件
        if user_input.lower() in ["exit", "quit"]:
            print("结束对话")
            break

        # 如果是清空历史指令
        if user_input.lower() == "clear":
            history = []
            print("对话历史已清空")
            continue

        # 添加用户消息到历史
        history.append({'role': 'user', 'content': user_input})

        # 创建推理请求
        infer_request = InferRequest(messages=history)

        # 执行推理
        response_generator_list = engine.infer([infer_request], request_config)

        # 处理流式输出
        model_response = asyncio.run(process_response(response_generator_list))

        # 添加完整模型回复到历史
        if model_response:
            history.append({'role': 'assistant', 'content': model_response})

        # 换行以便下一轮对话
        print()

except KeyboardInterrupt:
    print("\n对话被中断")