import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'
import re

# def strip_think_tags(text: str) -> str:
#         """
#         删除 <think>...</think> 部分，只保留其后的模型输出内容
#         """
#         # 用正则匹配并替换
#         cleaned = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
#         return cleaned.strip()
from swift.llm import PtEngine, RequestConfig, InferRequest
model = '/mnt/ssd/jsj/models/models/baichuan-inc/Baichuan-M2-32B'

# 加载推理引擎
#engine = PtEngine(model, max_batch_size=2)
# 在创建 PtEngine 时添加参数，禁用 Flash Attention
engine = PtEngine(
    model, 
    max_batch_size=2,
    model_kwargs={"use_flash_attention_2": False}  # 关键：禁用 Flash Attention
)
request_config = RequestConfig(max_tokens=512, temperature=0)

# 这里使用了2个infer_request来展示batch推理
infer_requests = [
    InferRequest(messages=[{'role': 'user', 'content': 'who are you?'}]),
    InferRequest(messages=[{'role': 'user', 'content': '浙江的省会在哪？'},
                           {'role': 'assistant', 'content': '浙江省的省会是杭州。'},
                           {'role': 'user', 'content': '这里有什么好玩的地方'},]),
]


resp_list = engine.infer(infer_requests, request_config)
query0 = infer_requests[0].messages[0]['content']
print(f'response0: {resp_list[0].choices[0].message.content}')
# print(f'response0: {strip_think_tags(resp_list[0].choices[0].message.content)}')
# print(f'response1: {strip_think_tags(resp_list[1].choices[0].message.content)}')
