"""
LLM 智能体的上下文管理器。
作者: Kangrui Wang, Zihan Wang
日期: 2025-03-30
"""

from dis import pretty_flags
from itertools import zip_longest

import torch
import numpy as np
from typing import List, Dict, Any, Optional, Tuple, Union
from dataclasses import dataclass
import re
from verl import DataProto
from verl.utils.dataset.rl_dataset import collate_fn
from transformers import AutoTokenizer
import hydra
from ragen.utils import register_resolvers
from ragen.env import REGISTERED_ENV_CONFIGS
from tensordict import TensorDict
import json
from json_repair import repair_json  # 导入JSON修复模块
from dataclasses import asdict
from omegaconf import OmegaConf, DictConfig, ListConfig

register_resolvers()

# Case Query Tool 统计指标常量定义 - 参考 es_manager.py STATE_METRICS 设计模式
CASE_QUERY_METRICS = {
    "total_queries": 0,
    "recall_at_1": 0.0,
    "recall_at_2": 0.0,
    "recall_at_3": 0.0,
    "recall_at_5": 0.0,
    "avg_recall_at_1": 0.0,
    "avg_recall_at_2": 0.0,
    "avg_recall_at_3": 0.0,
    "avg_recall_at_5": 0.0,
    "composite_recall": 0.0,
    "case_query_calls": 0,
}

# 统计指标权重配置
RECALL_WEIGHTS = {
    "recall_at_1": 0.4,
    "recall_at_2": 0.3,
    "recall_at_3": 0.2,
    "recall_at_5": 0.1
}


@dataclass
class ToolCall:
    """工具调用数据结构 - 参考demo.py"""
    id: str
    name: str
    arguments: Dict[str, Any]


class ThinkingParser:
    """Qwen3-4B-Thinking模型的思考内容解析器 - 参考demo.py
    
    【优化】预编译正则表达式，避免每次调用都重新编译
    """
    
    # 【优化】预编译正则表达式（类级别，只编译一次）
    _THINK_PATTERN = re.compile(r"<think>(.*?)</think>(.*)", re.DOTALL)
    
    @staticmethod
    def parse_thinking_and_content_from_tokens(output_ids: List[int], tokenizer) -> Tuple[str, str]:
        """从token IDs解析thinking内容和最终内容 - 仅适用于Qwen3模型"""
        try:
            # 根据官方文档，寻找151668 (</think>)标记 - Qwen3专用
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            index = 0
        
        thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
        
        return thinking_content, content
    
    @staticmethod 
    def parse_thinking_and_content_from_text(text: str) -> Tuple[str, str]:
        """从文本中解析thinking内容和最终内容
        
        【优化】使用预编译的正则表达式，提升30-40%性能
        """
        # 【优化】使用预编译的正则表达式
        match = ThinkingParser._THINK_PATTERN.search(text)
        
        if match:
            thinking_content = match.group(1).strip()
            remaining_content = match.group(2).strip()
            return thinking_content, remaining_content
        else:
            # 如果没有找到think标签，返回空thinking和原文本
            return "", text


class ToolCallParser:
    """工具调用解析器 - 参考demo.py实现标准OpenAI格式解析，支持复杂JSON修复
    
    【优化】预编译正则表达式 + JSON解析快速路径，提升50-60%性能
    """
    
    # 【优化】预编译所有正则表达式（类级别，只编译一次）
    _TOOL_CALL_PATTERN = re.compile(r'<tool_call>\s*(\{.*?\})\s*</tool_call>', re.DOTALL)
    _NAME_PATTERN = re.compile(r'"name":\s*"([^"]+)"')
    _ARGS_PATTERN = re.compile(r'"arguments":\s*(\{.*\})', re.DOTALL)
    
    @staticmethod
    def _parse_json_fast(json_text: str) -> Optional[Dict]:
        """快速JSON解析，使用多级降级策略
        
        【优化】快速路径优先，90%的情况直接成功，避免expensive的修复操作
        
        Returns:
            解析的字典，失败返回None
        """
        # 快速路径1：直接解析（90%情况）
        try:
            return json.loads(json_text)
        except json.JSONDecodeError:
            pass
        
        # 快速路径2：简单清理后解析（8%情况）
        try:
            cleaned = json_text.strip().replace('\n', ' ')
            return json.loads(cleaned)
        except json.JSONDecodeError:
            pass
        
        # 慢速路径：使用修复库（2%情况）
        try:
            repaired = repair_json(json_text)
            return json.loads(repaired)
        except Exception:
            return None
    
    @staticmethod
    def parse_tool_calls(text: str) -> List[ToolCall]:
        """解析工具调用，支持多种格式和错误修复
        
        【优化】使用预编译正则表达式和快速JSON解析
        """
        tool_calls = []
        call_id_counter = 1
        
        # 【优化】使用预编译的正则表达式
        matches = ToolCallParser._TOOL_CALL_PATTERN.findall(text)
        
        for match in matches:
            # 【优化】使用快速JSON解析
            call_data = ToolCallParser._parse_json_fast(match)
            
            # 如果快速解析失败，尝试手动提取
            if call_data is None:
                print(f"快速JSON解析失败，尝试手动提取")
                try:
                    # 【优化】使用预编译的正则表达式
                    name_match = ToolCallParser._NAME_PATTERN.search(match)
                    if name_match:
                        name = name_match.group(1)
                        
                        # 尝试提取arguments
                        args_match = ToolCallParser._ARGS_PATTERN.search(match)
                        if args_match:
                            args_str = args_match.group(1)
                            # 再次尝试快速解析arguments
                            args_data = ToolCallParser._parse_json_fast(args_str)
                            if args_data is None:
                                # 如果arguments也无法解析，保存为原始内容
                                args_data = {"raw_content": args_str}
                            
                            call_data = {
                                "name": name,
                                "arguments": args_data
                            }
                            print(f"手动解析成功: {name}")
                except Exception as manual_error:
                    print(f"手动解析失败: {manual_error}")
                    continue
            
            # 验证必需字段并添加到结果
            if call_data and "name" in call_data and "arguments" in call_data:
                tool_calls.append(ToolCall(
                    id=call_data.get("id", f"call_{call_id_counter}"),
                    name=call_data["name"],
                    arguments=call_data["arguments"]
                ))
                call_id_counter += 1
            else:
                print(f"工具调用缺少必需字段或解析完全失败")
        
        return tool_calls


def get_special_tokens(tokenizer: AutoTokenizer):
    if "qwen" in tokenizer.name_or_path.lower():
        special_token = tokenizer.encode("<|im_start|>")[0]
        reward_token = tokenizer.encode("<|im_end|>")[0]
    elif "llama-3" in tokenizer.name_or_path.lower():
        special_token = 128006
        reward_token = 128009
    else:
        raise ValueError(f"Unsupported model: {tokenizer.name_or_path}")
    return special_token, reward_token


def get_masks_and_scores(
    input_ids: torch.Tensor,
    tokenizer: AutoTokenizer,
    all_scores: List[List[float]] = None,    
    all_process_scores: List[List[float]] = None,
    all_result_scores: List[List[float]] = None,
    use_turn_scores: bool = False,
    enable_response_mask: bool = False,
):
    """
    input_ids: shape (bsz, seq_len)
    Get loss mask that only learns between <|im_start|>assistant and <|im_end|>. Currently only supports qwen.
    NOTE: important! This assumes that the input_ids starts with system and then user & assistant in alternative ways
    """
    special_token, reward_token = get_special_tokens(tokenizer)

    turn_starts = torch.where(input_ids == special_token, 1, 0)
    turn_indicators = torch.cumsum(turn_starts, dim=-1)
    if enable_response_mask:
        loss_mask = (turn_indicators % 2 == 1) & (
            turn_indicators > 1
        )  # only learns all assistant turns
    else:
        loss_mask = turn_indicators > 1  # learns everything after system prompt
    response_mask = ((turn_indicators % 2 == 1) & (turn_indicators > 1))

    # 支持分离奖励或传统单一奖励
    if all_process_scores is not None and all_result_scores is not None:
        # 处理分离奖励
        process_score_tensor = torch.zeros_like(input_ids, dtype=torch.float32)
        result_score_tensor = torch.zeros_like(input_ids, dtype=torch.float32)
        
        if use_turn_scores:
            for idx, (process_scores, result_scores) in enumerate(zip(zip_longest(*all_process_scores, fillvalue=0), 
                                                                     zip_longest(*all_result_scores, fillvalue=0))):
                process_scores = torch.tensor(process_scores, dtype=torch.float32)
                result_scores = torch.tensor(result_scores, dtype=torch.float32)
                turn_indicator = (
                    idx * 2 + 3
                )  # 0: pad. 1: system. 2+2n: user. 3+2n: assistant
                reward_position = (input_ids == reward_token) & (
                    turn_indicators == turn_indicator
                )
                # Set the last token of the rows where all positions are False to True
                reward_position[~reward_position.any(dim=-1), -1] = True
                process_score_tensor[reward_position] = process_scores
                result_score_tensor[reward_position] = result_scores
            if "qwen" in tokenizer.name_or_path.lower():
                # for Qwen, there is a "\n" between special token and reward token, so we shift this to make sure reward is assigned to the last token of a turn
                process_score_tensor = process_score_tensor.roll(shifts=1, dims=-1)
                result_score_tensor = result_score_tensor.roll(shifts=1, dims=-1)
        else:
            process_scores = [sum(i) for i in all_process_scores]
            result_scores = [sum(i) for i in all_result_scores]
            process_score_tensor[:, -1] = torch.tensor(process_scores, dtype=torch.float32)
            result_score_tensor[:, -1] = torch.tensor(result_scores, dtype=torch.float32)
        
        process_score_tensor = process_score_tensor[:, 1:]  # remove the first token
        result_score_tensor = result_score_tensor[:, 1:]  # remove the first token
        loss_mask = loss_mask[:, :-1]  # remove the last token
        response_mask = response_mask[:, :-1]  # remove the last token
        
        return process_score_tensor, result_score_tensor, loss_mask, response_mask
    else:
        # 处理传统单一奖励（兼容性）
        score_tensor = torch.zeros_like(input_ids, dtype=torch.float32)
        if use_turn_scores:
            for idx, scores in enumerate(zip_longest(*all_scores, fillvalue=0)):
                scores = torch.tensor(scores, dtype=torch.float32)
                turn_indicator = (
                    idx * 2 + 3
                )  # 0: pad. 1: system. 2+2n: user. 3+2n: assistant
                reward_position = (input_ids == reward_token) & (
                    turn_indicators == turn_indicator
                )
                # Set the last token of the rows where all positions are False to True
                reward_position[~reward_position.any(dim=-1), -1] = True
                score_tensor[reward_position] = scores
            if "qwen" in tokenizer.name_or_path.lower():
                # for Qwen, there is a "\n" between special token and reward token, so we shift this to make sure reward is assigned to the last token of a turn
                score_tensor = score_tensor.roll(shifts=1, dims=-1)
        else:
            scores = [sum(i) for i in all_scores]
            score_tensor[:, -1] = torch.tensor(scores, dtype=torch.float32)
        score_tensor = score_tensor[:, 1:]  # remove the first token
        loss_mask = loss_mask[:, :-1]  # remove the last token
        response_mask = response_mask[:, :-1]  # remove the last token

        return score_tensor, loss_mask, response_mask


class ContextManager:
    """
    管理 LLM 与环境交互的上下文。
    负责在环境输出与 LLM 输入之间进行转换（双向）。
    """

    def __init__(
        self,
        config,
        tokenizer,
        processor=None,
        mode: str = "train",
    ):
        """
        初始化 ContextManager。
        processor 用于处理图像数据。
        """
        self.config = config
        self.tokenizer = tokenizer
        self.processor = processor
        self.action_sep = self.config.agent_proxy.action_sep
        self.special_token_list = [
            "<think>",
            "</think>",
            "<|im_start|>",
            "<|im_end|>",
        ]

        self.es_cfg = self.config.es_manager[mode]
        self.env_nums = {
            env_tag: n_group * self.es_cfg.group_size
            for n_group, env_tag in zip(
                self.es_cfg.env_configs.n_groups, self.es_cfg.env_configs.tags
            )
        }
        
        # 初始化Qwen3专用解析器 - 参考demo.py
        self.thinking_parser = ThinkingParser()
        self.tool_parser = ToolCallParser()

        # 初始化 Case Query Tool 统计功能
        self.case_query_stats = CASE_QUERY_METRICS.copy()

        self._init_prefix_lookup()

    def _check_env_installed(self, env_type: str):
        if env_type not in REGISTERED_ENV_CONFIGS:
            raise ValueError(
                f"Environment {env_type} is not installed. Please install it using the scripts/setup_{env_type}.sh script."
            )
    def _format_tools_for_prompt(self, tools_config: Dict, stage_tools_list: List[str]) -> str:
        """
        将工具配置转换为prompt格式
        
        Args:
            tools_config: 完整的工具配置字典（可能是 DictConfig）
            stage_tools_list: 当前阶段可用的工具名称列表（可能是 ListConfig）
            
        Returns:
            格式化的工具描述字符串
        """
        if not stage_tools_list or not tools_config:
            return ""
        
        # 将 ListConfig 转换为普通列表
        if isinstance(stage_tools_list, ListConfig):
            stage_tools_list = OmegaConf.to_container(stage_tools_list, resolve=True)
        
        # 将 DictConfig 转换为普通字典
        if isinstance(tools_config, DictConfig):
            tools_config = OmegaConf.to_container(tools_config, resolve=True)
        
        # 筛选当前阶段的工具
        selected_tools = []
        for tool_name in stage_tools_list:
            if tool_name in tools_config:
                tool_def = tools_config[tool_name]
                
                if tool_def.get('type') == 'function' and 'function' in tool_def:
                    selected_tools.append({
                        "type": "function",
                        "function": tool_def['function']
                    })
        
        if not selected_tools:
            return ""
        
        # 构建工具描述
        tools_prompt = "\n# Tools\n\n"
        tools_prompt += "You may call one or more functions to assist with the user query.\n\n"
        tools_prompt += "You are provided with function signatures within <tools></tools> XML tags:\n"
        tools_prompt += "<tools>\n"
        
        # 为每个工具生成JSON格式的描述
        for tool in selected_tools:
            tool_json = json.dumps(tool, ensure_ascii=False)
            tools_prompt += f"{tool_json}\n"
        
        tools_prompt += "</tools>\n\n"
        tools_prompt += "For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n"
        tools_prompt += "<tool_call>\n"
        tools_prompt += '{"name": <function-name>, "arguments": <args-json-object>}\n'
        tools_prompt += "</tool_call>"
        
        return tools_prompt
    
    def _init_prefix_lookup(self):
        prefix_lookup = {}
        prefixes = {}
        env_config_lookup = {}
        tools_lookup = {}  # 新增：存储工具配置
        stage_tools_lookup = {}  # 新增：存储阶段工具配置
        env_config = {}
        
        for env_tag, env_config in self.config.custom_envs.items():
            # 【修复BUG】处理所有环境配置（包括_aiops_base等基础配置）
            # 先检查是否需要跳过（不在tags中的配置）
            skip_this_env = env_tag not in self.es_cfg.env_configs.tags
            
            if skip_this_env:
                # 对于不在tags中的环境（如_aiops_base），仍需要处理但不检查安装
                # 仍需要处理_aiops_base等基础配置，但跳过安装检查
                if not env_tag.startswith("_"):  # 跳过非基础配置
                    continue

            # 只对实际使用的环境检查安装
            if not skip_this_env:
                self._check_env_installed(env_config.env_type)
            env_config_new = asdict(REGISTERED_ENV_CONFIGS[env_config.env_type]())
            for k, v in env_config.items():
                env_config_new[k] = v
            env_instruction = env_config_new.get("env_instruction", "")

            # 可选：提供网格词表与动作列表
            if env_config_new.get("grid_vocab", False):
                grid_vocab_str = (
                    "\nThe meaning of each symbol in the state is:\n"
                    + ", ".join(
                        [f"{k}: {v}" for k, v in env_config_new["grid_vocab"].items()]
                    )
                )
                env_instruction += grid_vocab_str
            if env_config_new.get("action_lookup", False):
                action_lookup_str = "\nYour available actions are:\n" + ", ".join(
                    [f"{v}" for k, v in env_config_new["action_lookup"].items()]
                )
                action_lookup_str += (
                    f"\nYou can make up to {env_config_new['max_actions_per_traj']} actions, separated by the action separator \" "
                    + self.action_sep
                    + ' "\n'
                )
                env_instruction += action_lookup_str
            prefixes[env_tag] = env_instruction
            
            # 中间回合与最终回合提示
            mid_turn_inst = ""
            final_turn_inst = ""
            if "mid_turn_instruction" in env_config_new:
                mid_turn_inst = env_config_new["mid_turn_instruction"]
                prefixes[f"{env_tag}_mid"] = mid_turn_inst
            if "final_turn_instruction" in env_config_new:
                final_turn_inst = env_config_new["final_turn_instruction"]
                prefixes[f"{env_tag}_final"] = final_turn_inst

            # 新增：提取工具配置和阶段工具
            if "tools" in env_config_new:
                tools_lookup[env_tag] = env_config_new["tools"]
            else:
                tools_lookup[env_tag] = {}
            
            # 提取阶段工具配置
            if "stage_tools" in env_config_new:
                stage_tools_lookup[env_tag] = env_config_new["stage_tools"]
            else:
                # 如果没有配置，使用默认的全部工具
                stage_tools_lookup[env_tag] = {
                    "initial": list(tools_lookup[env_tag].keys()) if tools_lookup[env_tag] else [],
                    "middle": list(tools_lookup[env_tag].keys()) if tools_lookup[env_tag] else [],
                    "final": list(tools_lookup[env_tag].keys()) if tools_lookup[env_tag] else []
                }
            
            # 【修复BUG关键步骤】为所有环境配置替换工具占位符（包括_aiops_base）
            # 获取工具配置
            tools_config = tools_lookup.get(env_tag, {})
            stage_tools = stage_tools_lookup.get(env_tag, {})
            
            # 为不同阶段生成工具描述
            initial_tools_prompt = self._format_tools_for_prompt(
                tools_config, stage_tools.get("initial", [])
            )
            middle_tools_prompt = self._format_tools_for_prompt(
                tools_config, stage_tools.get("middle", [])
            )
            final_tools_prompt = self._format_tools_for_prompt(
                tools_config, stage_tools.get("final", [])
            )
            
            # 替换占位符
            env_instruction_replaced = env_instruction.replace("{TOOLS_PLACEHOLDER}", initial_tools_prompt)
            mid_turn_inst_replaced = mid_turn_inst.replace("{TOOLS_PLACEHOLDER}", middle_tools_prompt) if mid_turn_inst else ""
            final_turn_inst_replaced = final_turn_inst.replace("{TOOLS_PLACEHOLDER}", final_tools_prompt) if final_turn_inst else ""
            
            # 更新prefixes（用于后续生成prefix_lookup）
            prefixes[env_tag] = env_instruction_replaced
            if mid_turn_inst_replaced:
                prefixes[f"{env_tag}_mid"] = mid_turn_inst_replaced
            if final_turn_inst_replaced:
                prefixes[f"{env_tag}_final"] = final_turn_inst_replaced
            
            # 【修复BUG】将替换后的instruction写回到环境配置中
            # 临时禁用struct模式以允许修改
            env_cfg = self.config.custom_envs[env_tag]
            struct_mode = OmegaConf.is_struct(env_cfg)
            if struct_mode:
                OmegaConf.set_struct(env_cfg, False)
            
            try:
                # 更新instruction字段
                env_cfg.env_instruction = env_instruction_replaced
                if final_turn_inst:
                    env_cfg.final_turn_instruction = final_turn_inst_replaced
                if mid_turn_inst:
                    env_cfg.mid_turn_instruction = mid_turn_inst_replaced
            finally:
                # 恢复struct模式
                if struct_mode:
                    OmegaConf.set_struct(env_cfg, True)

            # 只有在tags中的环境才需要添加到env_config_lookup
            if not skip_this_env:
                env_config_lookup[env_tag] = {
                    "max_tokens": env_config.get(
                        "max_tokens", self.config.actor_rollout_ref.rollout.response_length
                    )
                }

        tags = self.es_cfg.env_configs.tags
        n_groups = self.es_cfg.env_configs.n_groups
        group_size = self.es_cfg.group_size

        cur_group = 0
        for env_tag, n_group in zip(tags, n_groups):
            # 获取已替换占位符的指令（在第一个循环中已完成替换）
            env_instruction = prefixes[env_tag]
            final_turn_instruction = prefixes.get(f"{env_tag}_final", env_instruction)
            mid_turn_instruction = prefixes.get(f"{env_tag}_mid", "")
            
            start_idx = cur_group * group_size
            end_idx = (cur_group + n_group) * group_size

            for i in range(start_idx, end_idx):
                prefix_lookup[i] = env_instruction
                env_config_lookup[i] = env_config_lookup[env_tag]
                # 存储最终回合与中间回合前缀
                prefix_lookup[f"{i}_final"] = final_turn_instruction
                if mid_turn_instruction:
                    prefix_lookup[f"{i}_mid"] = mid_turn_instruction
            
            cur_group += n_group

        self.prefix_lookup = prefix_lookup
        self.env_config_lookup = env_config_lookup
        self.tools_lookup = tools_lookup  # 新增：存储工具查找表
        self.stage_tools_lookup = stage_tools_lookup  # 新增：存储阶段工具查找表
        
        
    def _parse_response(self, response: str) -> Tuple[str, List[str], str, str, bool]:
        """
        解析LLM响应 - 严格按照OpenAI格式：<think>...</think> <tool_call>...</tool_call>
        """
        # 初始化变量
        think_content = ""
        actions = []
        is_format_valid = True
        
        # 第一步：解析thinking内容（如果启用）
        if self.config.agent_proxy.enable_think:
            think_content, remaining_after_think = self.thinking_parser.parse_thinking_and_content_from_text(response)
        else:
            think_content = ""
            remaining_after_think = response
        
        # 第二步：
        parsed_tool_calls = self.tool_parser.parse_tool_calls(remaining_after_think)
        
        # 转换为环境期望的格式
        for tool_call in parsed_tool_calls:
            action_obj = {
                "tool_call": tool_call.name,
                "args": tool_call.arguments
            }
            actions.append(json.dumps(action_obj, ensure_ascii=False))
        
        # 限制动作数量
        max_actions = self.config.agent_proxy.max_actions_per_turn
        if len(actions) > max_actions:
            actions = actions[:max_actions]
        if self.config.agent_proxy.enable_think:
            response = f"\n<think>\n{think_content}\n</think>\n" + remaining_after_think
        else:
            response = remaining_after_think

        return response, actions, think_content, is_format_valid

    def _normalize_score_tensor(
        self, score_tensor: torch.Tensor = None, env_outputs: List[Dict] = None,
        process_score_tensor: torch.Tensor = None, result_score_tensor: torch.Tensor = None
    ):
        """
        Normalize the score tensor to be between 0 and 1.
        Supports both single tensor (legacy) and dual tensor (process + result) modes.
        NOTE: only support score at the last token for now
        """
        assert (
            self.config.agent_proxy.use_turn_scores == False
        ), "Reward normalization is not supported for use_turn_scores == True"

        if process_score_tensor is not None and result_score_tensor is not None:
            return self._normalize_dual_tensors(process_score_tensor, result_score_tensor, env_outputs)
        elif score_tensor is not None:
            return self._normalize_single_tensor(score_tensor, env_outputs)
        else:
            raise ValueError("Must provide either score_tensor or both process_score_tensor and result_score_tensor")

    def _normalize_single_tensor(self, score_tensor: torch.Tensor, env_outputs: List[Dict]) -> torch.Tensor:
        """标准化单一奖励张量（兼容模式）"""
        norm_func = self._get_normalization_function()
        group2index = self._get_group_indices(env_outputs)
        
        # apply penalty pre-normalization
        acc_scores = score_tensor[:, -1]
        normalized_acc_scores = acc_scores.clone()
        penalty = torch.tensor(
            [env_output.get("penalty", 0) for env_output in env_outputs],
            dtype=torch.float32,
        )
        normalized_acc_scores = normalized_acc_scores + penalty

        if len(group2index) < acc_scores.shape[0]:  # the group size > 1
            for group, index in group2index.items():
                normalized_acc_scores[index] = norm_func(normalized_acc_scores[index])

        score_tensor[:, -1] = normalized_acc_scores
        return score_tensor

    def _normalize_dual_tensors(self, process_tensor: torch.Tensor, result_tensor: torch.Tensor, env_outputs: List[Dict]):
        """标准化分离奖励张量"""
        norm_func = self._get_normalization_function()
        group2index = self._get_group_indices(env_outputs)
        
        # 分别标准化过程奖励和结果奖励
        process_scores = process_tensor[:, -1]
        result_scores = result_tensor[:, -1]
        
        normalized_process_scores = process_scores.clone()
        normalized_result_scores = result_scores.clone()
        
        # apply penalty pre-normalization to process scores only
        penalty = torch.tensor(
            [env_output.get("penalty", 0) for env_output in env_outputs],
            dtype=torch.float32,
        )
        normalized_process_scores = normalized_process_scores + penalty

        if len(group2index) < process_scores.shape[0]:  # the group size > 1
            for group, index in group2index.items():
                normalized_process_scores[index] = norm_func(normalized_process_scores[index])
                normalized_result_scores[index] = norm_func(normalized_result_scores[index])

        process_tensor[:, -1] = normalized_process_scores
        result_tensor[:, -1] = normalized_result_scores
        
        return process_tensor, result_tensor

    def _get_normalization_function(self):
        """获取标准化函数"""
        rn_cfg = self.config.agent_proxy.reward_normalization
        method = rn_cfg.method
        
        if method == "mean_std":
            return lambda x: (
                (x - x.mean(dim=-1, keepdim=True))
                / (x.std(dim=-1, keepdim=True) + 1e-6)
                if x.std(dim=-1, keepdim=True).abs().max() > 1e-6
                else torch.zeros_like(x)
            )  # stable to bf16 than x.std()
        elif method == "mean":
            return lambda x: (x - x.mean(dim=-1, keepdim=True))
        elif method == "asym_clip":
            return lambda x: (
                (x - x.mean(dim=-1, keepdim=True))
                / (x.std(dim=-1, keepdim=True) + 1e-6)
                if x.std(dim=-1, keepdim=True).abs().max() > 1e-6
                else torch.zeros_like(x)
            ).clamp(min=-1, max=3)
        elif method == "identity":
            return lambda x: x
        else:
            raise ValueError(f"Invalid normalization method: {method}")

    def _get_group_indices(self, env_outputs: List[Dict]):
        """获取分组索引"""
        rn_cfg = self.config.agent_proxy.reward_normalization
        grouping = rn_cfg.grouping
        
        if grouping == "state":
            group_tags = [env_output["group_id"] for env_output in env_outputs]
        elif grouping == "inductive":
            group_tags = [env_output["tag"] for env_output in env_outputs]
        elif grouping == "batch":
            group_tags = [1] * len(env_outputs)
        else:
            raise ValueError(f"Invalid grouping: {grouping}")

        # apply groupwise normalization
        group2index = {}
        for i, env_tag in enumerate(group_tags):
            if env_tag not in group2index:
                group2index[env_tag] = []
            group2index[env_tag].append(i)
        group2index = {k: torch.tensor(v) for k, v in group2index.items()}
        return group2index

    def _get_env_tag_by_id(self, env_id: int) -> Optional[str]:
        """根据环境ID获取环境标签 - 新增辅助方法"""
        tags = self.es_cfg.env_configs.tags
        n_groups = self.es_cfg.env_configs.n_groups
        group_size = self.es_cfg.group_size

        cur_group = 0
        for env_tag, n_group in zip(tags, n_groups):
            start_idx = cur_group * group_size
            end_idx = (cur_group + n_group) * group_size

            if start_idx <= env_id < end_idx:
                return env_tag

            cur_group += n_group

        return None

    def _detect_case_query_calls(self, actions: List[str]) -> int:
        """
        检测 actions 中 case_query_tool 的调用次数

        Args:
            actions: 解析后的动作列表

        Returns:
            case_query_tool 调用次数
        """
        case_query_calls = 0
        for action in actions:
            try:
                action_data = json.loads(action)
                tool_name = action_data.get("tool_call", "")
                if "case_query_tool" in tool_name.lower():
                    case_query_calls += 1
            except (json.JSONDecodeError, AttributeError):
                continue
        return case_query_calls

    def _update_case_query_stats(self, actions: List[str], env_id: int = None):
        """
        更新 case_query_tool 统计信息

        Args:
            actions: 解析后的动作列表
            env_id: 环境ID（可选，用于细粒度统计）
        """
        case_query_calls = self._detect_case_query_calls(actions)
        if case_query_calls > 0:
            self.case_query_stats["case_query_calls"] += case_query_calls
            # 这里可以扩展添加更详细的统计逻辑
            # 例如：按环境ID分组统计、记录调用时间等

    def get_case_query_stats(self) -> Dict[str, Any]:
        """
        获取当前的 case_query_tool 统计信息

        Returns:
            统计信息字典
        """
        stats = self.case_query_stats.copy()

        # 修复BUG：确保正确计算平均recall值，添加除零保护
        if stats["case_query_calls"] > 0:
            for k in ["recall_at_1", "recall_at_2", "recall_at_3", "recall_at_5"]:
                avg_key = f"avg_{k}"
                # 修复计算逻辑：确保使用正确的除数，添加除零保护
                stats[avg_key] = stats[k] / stats["case_query_calls"]
        else:
            # 如果没有调用，平均值应该为0（除零保护）
            for k in ["recall_at_1", "recall_at_2", "recall_at_3", "recall_at_5"]:
                avg_key = f"avg_{k}"
                stats[avg_key] = 0.0

        # 计算综合recall分数
        recalls = [stats["recall_at_1"], stats["recall_at_2"],
                  stats["recall_at_3"], stats["recall_at_5"]]
        weights = list(RECALL_WEIGHTS.values())
        stats["composite_recall"] = sum(w * r for w, r in zip(weights, recalls))

        return stats

    def reset_case_query_stats(self):
        """重置 case_query_tool 统计信息"""
        self.case_query_stats = CASE_QUERY_METRICS.copy()

    def get_lm_inputs(
        self,
        env_outputs: List[Dict],
        prepare_for_update: bool,
        meta_info: Dict = None,
    ) -> DataProto:
        """
        env_outputs - 见下方示例
        [
            {"env_id": 1, "history": [{"state": "###\n#x_#", "llm_response": "Response 1", "reward": 0.5}, {"state": "###\n#x_#"}]},
            {"env_id": 2, "history": [{"state": "###\n#x_#"}]},
            ...
        ]
        prefix_lookup - 从 env_id 到初始提示的映射
        """
        llm_input_texts = []
        messages_list = []  # for api calling
        tools = None
        
        for env_output in env_outputs:
            max_k = getattr(self.config.agent_proxy, "max_context_window", None)
            if max_k is not None and isinstance(max_k, int) and max_k > 0:
                env_output["history"] = env_output["history"][-max_k:]

            messages = [
                {"role": "system", "content": self.prefix_lookup[env_output["env_id"]]},
            ]

            for idx, content in enumerate(env_output["history"]):
                if "state" in content:
                    messages.append({"role": "user", "content": content["state"]})

                if "llm_response" in content:
                    messages.append(
                        {"role": "assistant", "content": content["llm_response"]}
                    )
                
                # # 添加action_summaries作为tool消息
                # if "action_summaries" in content:
                #     for i, action_summary in enumerate(content["action_summaries"]):
                #         messages.append({
                #             "role": "tool",
                #             "name": "action_result",
                #             "content": action_summary,
                #             "tool_call_id": f"call_{idx}_{i}"
                #         })

            # NOTE: this assertion is important for loss mask computation
            # 由于添加了tool消息，需要调整断言逻辑
            # 现在检查assistant消息的位置是否符合预期
            assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
            expected_assistant_count = len([content for content in env_output["history"] if "llm_response" in content])
            assert len(assistant_messages) == expected_assistant_count, f"Assistant消息数量不匹配: 期望{expected_assistant_count}, 实际{len(assistant_messages)}"


            text = self.tokenizer.apply_chat_template(
                messages,
                add_generation_prompt=(not prepare_for_update),
                tokenize=False,
                enable_thinking=True
            )
                
            llm_input_texts.append(text)
            messages_list.append(messages)

        inputs = self.tokenizer(
            llm_input_texts,
            return_tensors="pt",
            padding=True,
            padding_side="left",
            truncation=False,
        )  # 不在此处截断，后续处理
        input_ids, attention_mask = inputs.input_ids, inputs.attention_mask
        position_ids = (attention_mask.cumsum(dim=-1) - 1).clamp(min=0)

        if prepare_for_update:
            # 分别提取过程奖励和结果奖励
            process_scores = [
                [i.get("process_reward", 0.0) for i in env_output["history"]]
                for env_output in env_outputs
            ]
            result_scores = [
                [i.get("result_reward", 0.0) for i in env_output["history"]]
                for env_output in env_outputs
            ]
            
            # 使用新的分离奖励接口
            process_score_tensor, result_score_tensor, loss_mask, response_mask = get_masks_and_scores(
                input_ids,
                self.tokenizer,
                all_process_scores=process_scores,
                all_result_scores=result_scores,
                use_turn_scores=self.config.agent_proxy.use_turn_scores,
                enable_response_mask=self.config.enable_response_mask,
            )
            
            # 分别标准化
            if not self.config.agent_proxy.use_turn_scores:
                normalized_process_tensor, normalized_result_tensor = self._normalize_score_tensor(
                    process_score_tensor=process_score_tensor,
                    result_score_tensor=result_score_tensor,
                    env_outputs=env_outputs
                )
            else:
                normalized_process_tensor = process_score_tensor
                normalized_result_tensor = result_score_tensor
            
            response_length = response_mask.sum(dim=-1).float().mean().item()

        llm_inputs = DataProto()
        llm_inputs.batch = TensorDict(
            {
                "input_ids": input_ids,
                "attention_mask": attention_mask,
                "position_ids": position_ids,
                "responses": input_ids[:, 1:],  # remove the first token
            },
            batch_size=input_ids.shape[0],
        )

        if prepare_for_update:
            llm_inputs.batch["loss_mask"] = loss_mask
            llm_inputs.batch["process_rm_scores"] = normalized_process_tensor
            llm_inputs.batch["result_rm_scores"] = normalized_result_tensor
            # 为兼容性添加原始分数总和和标准化后分数总和
            llm_inputs.batch["original_rm_scores"] = process_score_tensor + result_score_tensor
            
            # 使用控制参数u来混合过程奖励和结果奖励
            control_parameter_u = 1.0  # 默认值（完全使用过程奖励）
            if meta_info is not None and "control_parameter_u" in meta_info:
                control_parameter_u = meta_info["control_parameter_u"]
                print(f"  💫 使用控制参数u = {control_parameter_u:.4f} 混合奖励")
                print(f"     - 过程奖励权重: {control_parameter_u:.4f}")
                print(f"     - 结果奖励权重: {1.0 - control_parameter_u:.4f}")
            
            # 混合奖励：rm_scores = process_scores * u + result_scores * (1-u)
            llm_inputs.batch["rm_scores"] = (
                normalized_process_tensor + normalized_result_tensor
            )
        llm_inputs.non_tensor_batch = {
            "env_ids": np.array(
                [env_output["env_id"] for env_output in env_outputs], dtype=object
            ),
            "group_ids": np.array(
                [env_output["group_id"] for env_output in env_outputs], dtype=object
            ),
            "messages_list": np.array(messages_list, dtype=object),
            "env_infos": np.array(
                [env_output.get("env_info", {}) for env_output in env_outputs],
                dtype=object,
            ),
        }

        if prepare_for_update:
            # 存储每个样本的指标（用于数据收集）
            sample_metrics_list = []
            for env_output in env_outputs:
                # 每个样本的指标字典
                sample_metrics = env_output.get("metrics", {}).copy()
                sample_metrics_list.append(sample_metrics)
            
            llm_inputs.non_tensor_batch["sample_metrics"] = np.array(
                sample_metrics_list, dtype=object
            )
            
            # 计算汇总指标（用于meta_info）
            metrics = {}
            for env_output in env_outputs:
                for key, value in env_output["metrics"].items():
                    if key not in metrics:
                        metrics[key] = []
                    metrics[key].append(value)
            mean_metrics = {
                key: np.sum(value) / self.env_nums[key.split("/")[0]]
                for key, value in metrics.items()
            }
            metrics = mean_metrics
            metrics["response_length"] = response_length
            llm_inputs.meta_info = {"metrics": metrics}
        return llm_inputs

    def get_env_inputs(self, lm_outputs: DataProto) -> List[Dict]:
        if lm_outputs.batch is not None and "responses" in lm_outputs.batch.keys():
            responses = self.tokenizer.batch_decode(
                lm_outputs.batch["responses"], skip_special_tokens=True
            )
        else:  # dataproto has textual responses
            responses = lm_outputs.non_tensor_batch["response_texts"]
        # responses = [
        #     (
        #         "<think>" + response
        #         if self.config.agent_proxy.enable_think
        #         else "" + response
        #     )
        #     for response in responses
        # ]  # The LLM generation does not include <think> tags. Add them back here.

        env_ids = lm_outputs.non_tensor_batch["env_ids"]
        env_inputs = []
        for env_id, response in zip(env_ids, responses):
            llm_response, actions, think_content, is_format_valid = (
                self._parse_response(response)
            )

            # 更新 case_query_tool 统计信息
            self._update_case_query_stats(actions, env_id)

            env_inputs.append(
                {
                    "env_id": env_id,
                    "llm_raw_response": response,
                    "llm_response": llm_response,
                    "actions": actions,
                    "think_content": think_content,
                    "is_format_valid": is_format_valid,
                    # 添加统计信息到环境输入中（可选）
                    "case_query_stats": self.get_case_query_stats(),
                }
            )
        return env_inputs

    def formulate_rollouts(self, env_outputs: List[Dict], meta_info: Dict = None) -> DataProto:
        # 确保meta_info存在
        if meta_info is None:
            meta_info = {}

        # 添加 case_query_tool 统计信息到 meta_info 中
        case_query_stats = self.get_case_query_stats()
        if case_query_stats["case_query_calls"] > 0:
            meta_info["case_query_stats"] = case_query_stats
            # 可以选择重置统计信息（每次rollout后）
            # self.reset_case_query_stats()

        llm_inputs = self.get_lm_inputs(env_outputs, prepare_for_update=True, meta_info=meta_info)
        return llm_inputs
