#!/usr/bin/env python3
"""
vLLM工具调用完整demo - 使用Qwen3-4B-Thinking-2507模型
实现：工具定义 -> LLM调用 -> 工具解析 -> 工具执行 -> 结果返回LLM -> 最终输出
特别适配Qwen3-4B-Thinking-2507的thinking机制
优化版本：正确配置双GPU部署(GPU 0,1)
"""

import json
import re
import os
import logging
import torch
import psutil
from typing import List, Dict, Any, Optional, Union, Tuple
from dataclasses import dataclass
from vllm import LLM, SamplingParams
from transformers import AutoTokenizer
import requests
import math

# 设置环境变量 - 明确指定使用GPU 0和1
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# 确保使用multiprocessing后端，而不是Ray
os.environ["VLLM_USE_RAY_SPMD_WORKER"] = "0"
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

@dataclass
class ToolCall:
    """工具调用数据结构"""
    id: str
    name: str
    arguments: Dict[str, Any]

@dataclass
class ToolResult:
    """工具执行结果数据结构"""
    tool_call_id: str
    name: str
    content: str
    success: bool = True


class SystemMonitor:
    """系统监控器 - 监控GPU和内存使用情况"""
    
    @staticmethod
    def get_gpu_info():
        """获取GPU信息"""
        try:
            if torch.cuda.is_available():
                gpu_count = torch.cuda.device_count()
                logger.info(f"检测到 {gpu_count} 个可用GPU")
                
                for i in range(gpu_count):
                    gpu_name = torch.cuda.get_device_name(i)
                    memory_total = torch.cuda.get_device_properties(i).total_memory / (1024**3)
                    logger.info(f"GPU {i}: {gpu_name}, 总内存: {memory_total:.1f}GB")
                    
                    if torch.cuda.is_available():
                        torch.cuda.set_device(i)
                        memory_allocated = torch.cuda.memory_allocated(i) / (1024**3)
                        memory_reserved = torch.cuda.memory_reserved(i) / (1024**3)
                        logger.info(f"  已分配: {memory_allocated:.1f}GB, 已保留: {memory_reserved:.1f}GB")
                
                return gpu_count
            else:
                logger.warning("未检测到可用的CUDA设备")
                return 0
        except Exception as e:
            logger.error(f"获取GPU信息失败: {e}")
            return 0
    
    @staticmethod
    def get_memory_info():
        """获取系统内存信息"""
        memory = psutil.virtual_memory()
        logger.info(f"系统内存: 总计 {memory.total / (1024**3):.1f}GB, "
                   f"可用 {memory.available / (1024**3):.1f}GB, "
                   f"使用率 {memory.percent:.1f}%")


class ToolRegistry:
    """工具注册表，管理所有可用工具"""
    
    def __init__(self):
        self.tools = {}
        self.tool_definitions = []
    
    def register_tool(self, name: str, func: callable, description: str, parameters: Dict):
        """注册工具"""
        self.tools[name] = func
        tool_def = {
            "type": "function",
            "function": {
                "name": name,
                "description": description,
                "parameters": parameters
            }
        }
        self.tool_definitions.append(tool_def)
        logger.info(f"注册工具: {name}")
    
    def execute_tool(self, tool_call: ToolCall) -> ToolResult:
        """执行工具调用"""
        try:
            if tool_call.name not in self.tools:
                return ToolResult(
                    tool_call_id=tool_call.id,
                    name=tool_call.name,
                    content=f"错误: 工具 {tool_call.name} 不存在",
                    success=False
                )
            
            func = self.tools[tool_call.name]
            result = func(**tool_call.arguments)
            
            # 确保result是字符串格式
            if not isinstance(result, str):
                result = str(result)
                
            return ToolResult(
                tool_call_id=tool_call.id,
                name=tool_call.name,
                content=result,
                success=True
            )
            
        except Exception as e:
            return ToolResult(
                tool_call_id=tool_call.id,
                name=tool_call.name,
                content=f"工具执行失败: {str(e)}",
                success=False
            )


# 定义具体工具函数
def calculator(operation: str, a: float, b: float = 0) -> str:
    """基础计算器工具"""
    operations = {
        "add": lambda x, y: x + y,
        "subtract": lambda x, y: x - y,
        "multiply": lambda x, y: x * y,
        "divide": lambda x, y: x / y if y != 0 else "错误: 除数不能为0",
        "power": lambda x, y: x ** y,
        "sqrt": lambda x, y: math.sqrt(x)  # y参数忽略
    }
    
    if operation not in operations:
        return f"错误: 不支持的运算 {operation}"
    
    try:
        result = operations[operation](float(a), float(b))
        if isinstance(result, str):  # 错误信息
            return result
        return f"{a} {operation} {b} = {result}"
    except Exception as e:
        return f"计算错误: {str(e)}"


def get_weather(location: str, unit: str = "celsius") -> str:
    """获取天气信息（模拟）"""
    # 模拟天气数据
    weather_data = {
        "北京": {"temp": 25, "desc": "晴朗"},
        "上海": {"temp": 28, "desc": "多云"}, 
        "广州": {"temp": 32, "desc": "雨"},
        "深圳": {"temp": 30, "desc": "晴朗"},
        "杭州": {"temp": 26, "desc": "阴"},
        "成都": {"temp": 24, "desc": "多云"},
    }
    
    location = location.strip()
    if location in weather_data:
        data = weather_data[location]
        temp = data["temp"]
        if unit == "fahrenheit":
            temp = temp * 9/5 + 32
        return f"{location}当前天气：{data['desc']}，温度：{temp}°{'C' if unit == 'celsius' else 'F'}"
    else:
        return f"抱歉，暂时无法获取{location}的天气信息"


def web_search(query: str, max_results: int = 3) -> str:
    """网络搜索工具（模拟）"""
    # 模拟搜索结果
    search_templates = {
        "人工智能": [
            "AI技术在2024年取得重大突破，大模型能力显著提升",
            "机器学习算法在医疗诊断领域应用广泛",
            "自动驾驶技术进入商业化应用阶段"
        ],
        "Python": [
            "Python 3.12版本发布，性能优化显著",
            "FastAPI成为最受欢迎的Web框架之一",
            "数据科学领域Python库生态持续发展"
        ],
        "vLLM": [
            "vLLM 2024年性能优化显著，支持更多模型架构",
            "Tensor并行和Pipeline并行技术在vLLM中的应用",
            "vLLM在生产环境中的最佳实践和部署策略"
        ],
        "默认": [
            f"关于'{query}'的最新资讯和发展动态",
            f"{query}相关的技术文档和教程资源",
            f"{query}在实际应用中的案例分析"
        ]
    }
    
    # 选择搜索结果模板
    results = search_templates.get(query, search_templates["默认"])
    limited_results = results[:max_results]
    
    formatted_results = []
    for i, result in enumerate(limited_results, 1):
        formatted_results.append(f"{i}. {result}")
    
    return f"搜索'{query}'的结果：\n" + "\n".join(formatted_results)


def get_system_info() -> str:
    """获取系统信息工具"""
    try:
        # CPU信息
        cpu_count = psutil.cpu_count()
        cpu_percent = psutil.cpu_percent(interval=1)
        
        # 内存信息
        memory = psutil.virtual_memory()
        memory_info = f"总计: {memory.total / (1024**3):.1f}GB, 可用: {memory.available / (1024**3):.1f}GB"
        
        # GPU信息
        gpu_info = "无GPU"
        if torch.cuda.is_available():
            gpu_count = torch.cuda.device_count()
            gpu_names = [torch.cuda.get_device_name(i) for i in range(gpu_count)]
            gpu_info = f"{gpu_count}个GPU: {', '.join(gpu_names)}"
        
        return f"系统信息：\nCPU: {cpu_count}核心, 使用率: {cpu_percent}%\n内存: {memory_info}\nGPU: {gpu_info}"
    
    except Exception as e:
        return f"获取系统信息失败: {str(e)}"


class ThinkingParser:
    """Qwen3-4B-Thinking模型的思考内容解析器"""
    
    @staticmethod
    def parse_thinking_and_content(output_ids: List[int], tokenizer) -> Tuple[str, str]:
        """解析thinking内容和最终内容"""
        try:
            # 根据官方文档，寻找151668 (</think>)标记
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            # 如果没有找到</think>标记，说明全部都是thinking内容
            index = 0
        
        thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
        
        return thinking_content, content


class ToolCallParser:
    """工具调用解析器 - 解析LLM输出中的工具调用"""
    
    @staticmethod
    def parse_tool_calls(text: str) -> List[ToolCall]:
        """解析工具调用，支持多种格式"""
        tool_calls = []
        call_id_counter = 1
        
        # 方法1: 解析标准的工具调用格式 (类似OpenAI)
        # 寻找 <tool_call> 标签
        tool_call_pattern = r'<tool_call>\s*(\{.*?\})\s*</tool_call>'
        matches = re.findall(tool_call_pattern, text, re.DOTALL)
        
        for match in matches:
            try:
                call_data = json.loads(match)
                if "name" in call_data and "arguments" in call_data:
                    tool_calls.append(ToolCall(
                        id=call_data.get("id", f"call_{call_id_counter}"),
                        name=call_data["name"],
                        arguments=call_data["arguments"]
                    ))
                    call_id_counter += 1
            except json.JSONDecodeError as e:
                logger.warning(f"解析工具调用JSON失败: {e}")
                continue
        
        # 方法2: 解析函数调用格式
        if not tool_calls:
            func_pattern = r'```json\s*(\{.*?"name".*?"arguments".*?\})\s*```'
            matches = re.findall(func_pattern, text, re.DOTALL)
            
            for match in matches:
                try:
                    call_data = json.loads(match)
                    if "name" in call_data and "arguments" in call_data:
                        tool_calls.append(ToolCall(
                            id=f"call_{call_id_counter}",
                            name=call_data["name"], 
                            arguments=call_data["arguments"]
                        ))
                        call_id_counter += 1
                except json.JSONDecodeError:
                    continue
        
        # 方法3: 更灵活的JSON解析
        if not tool_calls:
            json_blocks = re.findall(r'\{[^{}]*"name"[^{}]*"arguments"[^{}]*\}', text)
            for block in json_blocks:
                try:
                    call_data = json.loads(block)
                    if "name" in call_data and "arguments" in call_data:
                        tool_calls.append(ToolCall(
                            id=f"call_{call_id_counter}",
                            name=call_data["name"],
                            arguments=call_data["arguments"]
                        ))
                        call_id_counter += 1
                except json.JSONDecodeError:
                    continue
        
        return tool_calls


class Qwen3ThinkingToolCaller:
    """Qwen3-4B-Thinking-2507工具调用主类 - 优化双GPU部署"""
    
    def __init__(self, model_path: str, target_gpus: List[int] = [0, 1]):
        self.model_path = model_path
        self.target_gpus = target_gpus
        
        # 验证系统环境
        self._validate_environment()
        
        # 初始化工具注册表
        self.tool_registry = ToolRegistry()
        self._register_default_tools()
        
        # 初始化解析器
        self.tool_parser = ToolCallParser()
        self.thinking_parser = ThinkingParser()
        
        # 初始化vLLM
        self._init_vllm()
        
    def _validate_environment(self):
        """验证部署环境"""
        logger.info("验证部署环境...")
        
        # 检查GPU可用性
        gpu_count = SystemMonitor.get_gpu_info()
        if gpu_count < len(self.target_gpus):
            logger.warning(f"警告: 要求使用{len(self.target_gpus)}个GPU，但只检测到{gpu_count}个GPU")
        
        # 检查内存
        SystemMonitor.get_memory_info()
        
        # 检查CUDA版本
        if torch.cuda.is_available():
            logger.info(f"CUDA版本: {torch.version.cuda}")
            logger.info(f"PyTorch版本: {torch.__version__}")
        else:
            logger.error("CUDA不可用，无法使用GPU加速")
            raise RuntimeError("CUDA不可用")
        
        logger.info("环境验证完成")
        
    def _register_default_tools(self):
        """注册默认工具"""
        # 注册计算器工具
        self.tool_registry.register_tool(
            name="calculator",
            func=calculator,
            description="执行基础数学运算，包括加减乘除、乘方、开方",
            parameters={
                "type": "object",
                "properties": {
                    "operation": {
                        "type": "string",
                        "enum": ["add", "subtract", "multiply", "divide", "power", "sqrt"],
                        "description": "要执行的数学运算类型"
                    },
                    "a": {
                        "type": "number",
                        "description": "第一个操作数"
                    },
                    "b": {
                        "type": "number", 
                        "description": "第二个操作数（对于sqrt操作可以忽略）",
                        "default": 0
                    }
                },
                "required": ["operation", "a"],
                "additionalProperties": False
            }
        )
        
        # 注册天气工具
        self.tool_registry.register_tool(
            name="get_weather",
            func=get_weather,
            description="获取指定城市的当前天气信息",
            parameters={
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "要查询天气的城市名称，如：北京、上海、广州等"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"],
                        "description": "温度单位，摄氏度或华氏度",
                        "default": "celsius"
                    }
                },
                "required": ["location"],
                "additionalProperties": False
            }
        )
        
        # 注册搜索工具
        self.tool_registry.register_tool(
            name="web_search",
            func=web_search,
            description="搜索网络信息，获取相关资讯和内容",
            parameters={
                "type": "object",
                "properties": {
                    "query": {
                        "type": "string",
                        "description": "搜索关键词或问题"
                    },
                    "max_results": {
                        "type": "integer",
                        "description": "返回的最大结果数量",
                        "default": 3,
                        "minimum": 1,
                        "maximum": 10
                    }
                },
                "required": ["query"],
                "additionalProperties": False
            }
        )
        
        # 注册系统信息工具
        self.tool_registry.register_tool(
            name="get_system_info",
            func=get_system_info,
            description="获取当前系统的CPU、内存、GPU等硬件信息",
            parameters={
                "type": "object",
                "properties": {},
                "required": [],
                "additionalProperties": False
            }
        )
    
    def _init_vllm(self):
        """初始化vLLM引擎 - 优化双GPU配置"""
        logger.info(f"初始化vLLM，使用模型: {self.model_path}")
        logger.info(f"目标GPU设备: {self.target_gpus}")
        
        # 初始化tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True
        )
        
        # 确保tokenizer有pad_token
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
        
        # 计算tensor parallel size
        tensor_parallel_size = len(self.target_gpus)
        logger.info(f"使用tensor_parallel_size: {tensor_parallel_size}")
        
        # 初始化vLLM引擎 - 针对双GPU优化配置
        try:
            self.llm = LLM(
                model=self.model_path,
                tensor_parallel_size=tensor_parallel_size,  # 使用双GPU并行
                dtype="auto",  # 自动选择最优数据类型
                trust_remote_code=True,
                enforce_eager=True,  # 使用eager模式，避免编译开销
                gpu_memory_utilization=0.85,  # 提高GPU内存利用率
                max_model_len=65536,  # 为thinking预留充足上下文空间
                distributed_executor_backend="mp",  # 使用multiprocessing而不是Ray
                disable_log_stats=False,  # 启用统计日志
                # 新增的优化参数
                enable_prefix_caching=True,  # 启用前缀缓存
                use_v2_block_manager=True,   # 使用v2块管理器
                swap_space=4,  # 4GB交换空间
                block_size=16,  # 优化块大小
            )
            
            logger.info("vLLM初始化成功")
            
        except Exception as e:
            logger.error(f"vLLM初始化失败: {e}")
            logger.info("尝试降级配置...")
            
            # 降级配置
            self.llm = LLM(
                model=self.model_path,
                tensor_parallel_size=tensor_parallel_size,
                dtype="auto",
                trust_remote_code=True,
                enforce_eager=True,
                gpu_memory_utilization=0.75,  # 降低内存使用
                max_model_len=32768,  # 减少最大长度
                distributed_executor_backend="mp",
            )
            
            logger.info("使用降级配置初始化vLLM成功")
        
        # 设置采样参数 - 根据最新官方建议优化
        self.sampling_params = SamplingParams(
            temperature=0.7,      # 稍微提高创造性
            top_p=0.9,           # 核采样参数
            top_k=40,            # top-k采样
            min_p=0.05,          # 最小概率阈值
            max_tokens=16384,    # 给thinking和回答足够空间
            stop=["<|im_end|>", "<|endoftext|>"],
            presence_penalty=0.1, # 轻微的存在惩罚
            frequency_penalty=0.1, # 轻微的频率惩罚
        )
        
        logger.info("vLLM初始化完成")
        
        # 预热模型
        self._warmup_model()
    
    def _warmup_model(self):
        """预热模型以获得更准确的性能测试"""
        logger.info("开始模型预热...")
        try:
            warmup_prompt = "你好，这是一个测试消息。"
            warmup_messages = [{"role": "user", "content": warmup_prompt}]
            
            prompt = self.tokenizer.apply_chat_template(
                warmup_messages,
                tokenize=False,
                add_generation_prompt=True
            )
            
            # 使用较短的参数进行预热
            warmup_params = SamplingParams(
                temperature=0.1,
                max_tokens=50,
                stop=["<|im_end|>", "<|endoftext|>"]
            )
            
            outputs = self.llm.generate([prompt], warmup_params)
            logger.info("模型预热完成")
            
        except Exception as e:
            logger.warning(f"模型预热失败，但不影响正常使用: {e}")
    
    def _format_messages_with_tools(self, user_query: str, conversation_history: List[Dict] = None) -> List[Dict]:
        """格式化包含工具信息的对话消息"""
        messages = []
        
        # 添加系统消息
        system_content = """你是一个智能助手，可以调用工具来帮助用户解决问题。

你有以下工具可以使用：
1. calculator: 执行基础数学运算
2. get_weather: 获取天气信息
3. web_search: 搜索网络信息
4. get_system_info: 获取系统硬件信息

当需要调用工具时，请使用以下格式：
<tool_call>
{"name": "工具名称", "arguments": {"参数名": "参数值"}}
</tool_call>

请根据用户的问题，判断是否需要使用工具。如果需要，就调用相应的工具获取信息，然后基于工具结果回答用户的问题。"""
        
        messages.append({"role": "system", "content": system_content})
        
        # 添加对话历史（不包含thinking内容）
        if conversation_history:
            messages.extend(conversation_history)
        
        # 添加用户查询
        messages.append({"role": "user", "content": user_query})
        
        return messages
    
    def _format_tool_response_messages(self, 
                                     original_messages: List[Dict], 
                                     assistant_response: str,
                                     tool_calls: List[ToolCall], 
                                     tool_results: List[ToolResult]) -> List[Dict]:
        """格式化包含工具执行结果的消息"""
        messages = original_messages.copy()
        
        # 添加助手的工具调用响应（只包含最终内容，不包含thinking）
        messages.append({"role": "assistant", "content": assistant_response})
        
        # 添加工具执行结果
        for result in tool_results:
            tool_message = {
                "role": "tool",
                "name": result.name,
                "content": result.content,
                "tool_call_id": result.tool_call_id
            }
            messages.append(tool_message)
        
        return messages
    
    def generate_response(self, messages: List[Dict], use_tools: bool = True) -> Tuple[str, str]:
        """使用vLLM生成响应，返回thinking内容和最终内容"""
        try:
            # 使用apply_chat_template，针对Qwen3-4B-Thinking特殊处理
            if use_tools:
                prompt = self.tokenizer.apply_chat_template(
                    messages,
                    tools=self.tool_registry.tool_definitions,
                    tokenize=False,
                    add_generation_prompt=True
                )
            else:
                prompt = self.tokenizer.apply_chat_template(
                    messages,
                    tokenize=False,
                    add_generation_prompt=True
                )
            
            logger.info(f"生成的prompt长度: {len(prompt)}")
            
            # 生成响应
            outputs = self.llm.generate([prompt], self.sampling_params)
            
            # 获取生成的token ids
            generated_ids = outputs[0].outputs[0].token_ids
            
            # 解析thinking内容和最终内容
            thinking_content, final_content = self.thinking_parser.parse_thinking_and_content(
                generated_ids, self.tokenizer
            )
            
            logger.info(f"Thinking内容长度: {len(thinking_content)}")
            logger.info(f"最终响应内容长度: {len(final_content)}")
            
            return thinking_content, final_content
            
        except Exception as e:
            logger.error(f"生成响应时出错: {e}")
            raise
    
    def process_query(self, user_query: str, show_thinking: bool = False) -> str:
        """处理用户查询的完整流程"""
        logger.info(f"开始处理用户查询: {user_query}")
        
        try:
            # 第一步：构建带工具的消息
            initial_messages = self._format_messages_with_tools(user_query)
            
            # 第二步：生成初始响应（包含thinking和可能的工具调用）
            thinking_content, initial_response = self.generate_response(initial_messages, use_tools=True)
            
            if show_thinking:
                print(f"\n🧠 Thinking过程:\n{thinking_content}\n")
                print("-" * 80)
            
            # 第三步：解析工具调用（从thinking内容和最终响应中都尝试解析）
            tool_calls_from_thinking = self.tool_parser.parse_tool_calls(thinking_content)
            tool_calls_from_response = self.tool_parser.parse_tool_calls(initial_response)
            
            # 合并工具调用
            all_tool_calls = tool_calls_from_thinking + tool_calls_from_response
            
            if not all_tool_calls:
                logger.info("未检测到工具调用，直接返回响应")
                return initial_response
            
            logger.info(f"检测到 {len(all_tool_calls)} 个工具调用")
            
            # 第四步：执行所有工具调用
            tool_results = []
            for tool_call in all_tool_calls:
                logger.info(f"执行工具: {tool_call.name}，参数: {tool_call.arguments}")
                result = self.tool_registry.execute_tool(tool_call)
                tool_results.append(result)
                
                if result.success:
                    logger.info(f"工具执行成功: {result.content[:100]}...")
                else:
                    logger.error(f"工具执行失败: {result.content}")
            
            # 第五步：构建包含工具结果的消息
            tool_response_messages = self._format_tool_response_messages(
                initial_messages, initial_response, all_tool_calls, tool_results
            )
            
            # 添加请求最终回答的消息
            tool_response_messages.append({
                "role": "user", 
                "content": "请基于以上工具执行结果，为我提供完整详细的回答。"
            })
            
            # 第六步：生成包含工具结果的最终响应
            final_thinking, final_response = self.generate_response(tool_response_messages, use_tools=False)
            
            if show_thinking:
                print(f"\n🧠 最终思考过程:\n{final_thinking}\n")
            
            logger.info("查询处理完成")
            return final_response
            
        except Exception as e:
            logger.error(f"处理查询时出错: {e}")
            return f"抱歉，处理您的请求时遇到了错误: {str(e)}"
    
    def get_model_stats(self) -> Dict[str, Any]:
        """获取模型统计信息"""
        try:
            stats = {}
            
            # GPU内存使用情况
            if torch.cuda.is_available():
                for i in self.target_gpus:
                    if i < torch.cuda.device_count():
                        stats[f'gpu_{i}_memory_allocated'] = torch.cuda.memory_allocated(i) / (1024**3)
                        stats[f'gpu_{i}_memory_reserved'] = torch.cuda.memory_reserved(i) / (1024**3)
                        stats[f'gpu_{i}_memory_total'] = torch.cuda.get_device_properties(i).total_memory / (1024**3)
            
            # 系统内存
            memory = psutil.virtual_memory()
            stats['system_memory_used'] = (memory.total - memory.available) / (1024**3)
            stats['system_memory_total'] = memory.total / (1024**3)
            stats['system_memory_percent'] = memory.percent
            
            return stats
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}


def benchmark_model(tool_caller: Qwen3ThinkingToolCaller, queries: List[str], iterations: int = 1):
    """模型性能基准测试"""
    import time
    
    logger.info(f"开始性能基准测试，查询数量: {len(queries)}, 迭代次数: {iterations}")
    
    results = []
    
    for iteration in range(iterations):
        logger.info(f"开始第 {iteration + 1} 轮测试")
        
        for i, query in enumerate(queries):
            start_time = time.time()
            
            try:
                response = tool_caller.process_query(query, show_thinking=False)
                end_time = time.time()
                
                duration = end_time - start_time
                stats = tool_caller.get_model_stats()
                
                result = {
                    'iteration': iteration + 1,
                    'query_index': i + 1,
                    'query': query,
                    'response_length': len(response),
                    'duration_seconds': duration,
                    'tokens_per_second': len(response.split()) / duration if duration > 0 else 0,
                    'gpu_memory_usage': {k: v for k, v in stats.items() if 'gpu' in k},
                    'system_memory_usage': stats.get('system_memory_percent', 0)
                }
                
                results.append(result)
                logger.info(f"查询 {i+1} 完成，耗时: {duration:.2f}s, 响应长度: {len(response)}")
                
            except Exception as e:
                logger.error(f"查询 {i+1} 失败: {e}")
                results.append({
                    'iteration': iteration + 1,
                    'query_index': i + 1,
                    'query': query,
                    'error': str(e),
                    'duration_seconds': 0
                })
    
    # 统计汇总
    successful_results = [r for r in results if 'error' not in r]
    if successful_results:
        avg_duration = sum(r['duration_seconds'] for r in successful_results) / len(successful_results)
        avg_tokens_per_sec = sum(r['tokens_per_second'] for r in successful_results) / len(successful_results)
        
        logger.info(f"基准测试完成:")
        logger.info(f"  成功查询: {len(successful_results)}/{len(results)}")
        logger.info(f"  平均响应时间: {avg_duration:.2f}秒")
        logger.info(f"  平均生成速度: {avg_tokens_per_sec:.2f} tokens/秒")
    
    return results


def main():
    """主函数 - demo演示"""
    
    # 配置
    model_path = "/home/yangcx24/Jayx/Models/Qwen3-4B-Thinking-2507"  # 更新为Thinking版本
    target_gpus = [0, 1]  # 明确指定使用GPU 0和1
    
    print("="*80)
    print("vLLM工具调用Demo - Qwen3-4B-Thinking-2507版本 (双GPU优化)")
    print("="*80)
    
    # 初始化工具调用系统
    print("正在初始化Qwen3-4B-Thinking-2507工具调用系统...")
    try:
        tool_caller = Qwen3ThinkingToolCaller(model_path, target_gpus)
        print("✅ 初始化完成！")
        print(f"📋 已注册 {len(tool_caller.tool_registry.tool_definitions)} 个工具:")
        for tool_def in tool_caller.tool_registry.tool_definitions:
            func_info = tool_def["function"]
            print(f"   - {func_info['name']}: {func_info['description']}")
        
        # 显示GPU状态
        stats = tool_caller.get_model_stats()
        print(f"\n💾 GPU内存使用状态:")
        for key, value in stats.items():
            if 'gpu' in key:
                print(f"   - {key}: {value:.2f}GB")
        
    except Exception as e:
        print(f"❌ 初始化失败: {e}")
        import traceback
        traceback.print_exc()
        return
    
    # 测试案例
    test_queries = [
        "帮我计算 25 乘以 4 的结果，再请告诉我北京今天的天气如何",
        "请告诉我北京今天的天气如何",
        "搜索一下人工智能的最新发展",
        "计算 16 的平方根是多少", 
        "上海和深圳的天气情况对比一下",
        "计算 2 的 10 次方是多少",
        "获取一下当前系统的硬件信息",
        "你好，请介绍一下你自己",  # 无需工具调用的普通对话
        "帮我计算 (12 + 8) × 3 - 15 的结果",  # 多步计算
        "搜索vLLM的最新优化技术"
    ]
    
    print("\n🧪 开始测试工具调用功能:")
    print("="*80)
    
    for i, query in enumerate(test_queries, 1):
        print(f"\n🔍 测试 {i}: {query}")
        print("-"*60)
        
        try:
            # 可以选择是否显示thinking过程
            response = tool_caller.process_query(query, show_thinking=True)
            print(f"\n💬 最终回答: {response}")
            
            # 显示当前内存使用情况
            stats = tool_caller.get_model_stats()
            memory_info = f"GPU内存: {stats.get('gpu_0_memory_allocated', 0):.1f}GB/{stats.get('gpu_0_memory_total', 0):.1f}GB"
            if len(target_gpus) > 1:
                memory_info += f", {stats.get('gpu_1_memory_allocated', 0):.1f}GB/{stats.get('gpu_1_memory_total', 0):.1f}GB"
            print(f"📊 {memory_info}, 系统内存: {stats.get('system_memory_percent', 0):.1f}%")
            
        except Exception as e:
            print(f"❌ 处理失败: {e}")
            import traceback
            traceback.print_exc()
        
        print("-"*60)
    
    # 性能基准测试
    print("\n🚀 开始性能基准测试:")
    print("="*80)
    
    benchmark_queries = [
        "计算 100 的平方根",
        "查询广州的天气",
        "搜索Python最新版本",
        "获取系统信息"
    ]
    
    try:
        benchmark_results = benchmark_model(tool_caller, benchmark_queries, iterations=2)
        print("基准测试结果已保存")
    except Exception as e:
        print(f"基准测试失败: {e}")
    
    # 交互式模式
    print("\n🎯 进入交互模式:")
    print("命令说明:")
    print("  - 输入 'quit' 或 'exit' 退出")
    print("  - 输入 'thinking' 切换思考过程显示")
    print("  - 输入 'stats' 查看系统状态")
    print("  - 输入 'benchmark' 运行快速基准测试")
    print("="*80)
    
    show_thinking = False
    while True:
        try:
            user_input = input(f"\n🙋 请输入您的问题 {'(显示思考)' if show_thinking else '(隐藏思考)'}: ").strip()
            
            if user_input.lower() in ['quit', 'exit', '退出', 'q']:
                print("👋 再见！")
                break
            
            if user_input.lower() in ['thinking', 'think', '思考']:
                show_thinking = not show_thinking
                print(f"💭 {'开启' if show_thinking else '关闭'}思考过程显示")
                continue
            
            if user_input.lower() in ['stats', 'status', '状态']:
                stats = tool_caller.get_model_stats()
                print("📊 当前系统状态:")
                for key, value in stats.items():
                    if isinstance(value, float):
                        print(f"   - {key}: {value:.2f}")
                    else:
                        print(f"   - {key}: {value}")
                continue
            
            if user_input.lower() in ['benchmark', 'bench', '基准']:
                print("🚀 运行快速基准测试...")
                quick_queries = ["计算 50 + 50", "查询北京天气"]
                benchmark_model(tool_caller, quick_queries, iterations=1)
                continue
            
            if not user_input:
                continue
            
            print("\n🤖 Qwen3正在思考和处理...")
            response = tool_caller.process_query(user_input, show_thinking=show_thinking)
            print(f"\n💬 回答: {response}")
            
            # 显示简要统计信息
            stats = tool_caller.get_model_stats()
            gpu_usage = f"{stats.get('gpu_0_memory_allocated', 0):.1f}GB"
            if len(target_gpus) > 1:
                gpu_usage += f"/{stats.get('gpu_1_memory_allocated', 0):.1f}GB"
            print(f"📈 GPU使用: {gpu_usage}, 系统内存: {stats.get('system_memory_percent', 0):.1f}%")
            
        except KeyboardInterrupt:
            print("\n\n👋 再见！")
            break
        except Exception as e:
            print(f"❌ 处理失败: {e}")
            import traceback
            traceback.print_exc()


if __name__ == "__main__":
    main()