#!/usr/bin/env python3
"""
生成verl SFT格式的数据集，支持合并多个parquet文件并按比例分割训练/测试集。

主要功能：
- 合并多个parquet文件（支持train.parquet、test.parquet或任意.parquet文件）
- 按split_ratio分割为训练集和测试集
- 数据预处理（可选，默认启用）：
  1. 工具调用格式验证：
     * 验证<tool_call>...</tool_call>块的JSON格式
     * 验证必需字段：'name'（字符串）、'arguments'（对象）
     * 根据envs.yaml中的工具定义验证参数完整性和类型
     * 删除包含无效工具调用的assistant消息及其后续user消息
  2. Metrics质量过滤（四项质量标准，支持任意前缀）：
     * 包含'success'的指标 必须为 1.0
     * 包含'top_5'的指标 必须为 1.0
     * 包含'action_is_effective'的指标 必须 > 0.9
     * 包含'num_actions'的指标 必须 >= 25
     * 不满足任一标准的样本将被过滤
     * 支持任意前缀（如 AIOps-22/, MyTask/ 等）
- 生成info.json统计信息及详细的数据过滤报告

数据格式要求：
- 输入parquet必须包含'messages'列（标准ChatML格式）
- 可选'metrics'列（训练指标，用于质量过滤）
- 可选'info'列（元数据信息）

使用示例：
  # 从指定文件夹合并，使用8:2分割（默认启用预处理）
  python merge_dataset.py --inputs /path/to/folder1 /path/to/folder2 \
                          --output /path/to/output \
                          --split-ratio 0.8

  # 从父目录合并所有子文件夹，使用9:1分割
  python merge_dataset.py --parent /path/to/dataset \
                          --output /path/to/output \
                          --split-ratio 0.9
                          
  # 禁用数据预处理（保留所有原始数据）
  python merge_dataset.py --parent /path/to/dataset \
                          --output /path/to/output \
                          --no-preprocess
"""

import os
import re
import sys
import json
import argparse
import logging
from datetime import datetime
from typing import List, Dict, Optional, Tuple, Any

import pandas as pd
import numpy as np
import yaml


def configure_logging(level: str = "INFO") -> None:
    # Force a clean configuration and attach a StreamHandler to stdout
    root = logging.getLogger()
    root.handlers.clear()
    root.setLevel(logging.getLevelName(level.upper()))
    handler = logging.StreamHandler(stream=sys.stdout)
    formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
    handler.setFormatter(formatter)
    root.addHandler(handler)


def read_parquet_safe(path: str) -> pd.DataFrame:
    if not os.path.isfile(path):
        return pd.DataFrame()
    # Try pyarrow first, fallback to fastparquet
    try:
        return pd.read_parquet(path, engine="pyarrow")
    except Exception:
        return pd.read_parquet(path, engine="fastparquet")


def write_parquet_safe(df: pd.DataFrame, path: str) -> None:
    # Prefer pyarrow for nested data support, fallback to fastparquet
    try:
        df.to_parquet(path, index=False, engine="pyarrow")
    except Exception:
        df.to_parquet(path, index=False, engine="fastparquet")


def collect_all_parquets(dirs: List[str]) -> pd.DataFrame:
    """
    从多个目录中收集所有parquet文件并合并。
    支持train.parquet、test.parquet以及任意其他.parquet文件。
    
    Args:
        dirs: 输入目录列表
        
    Returns:
        合并后的DataFrame
    """
    dataframes: List[pd.DataFrame] = []
    
    for directory in dirs:
        if not os.path.isdir(directory):
            logging.warning(f"跳过非目录路径: {directory}")
            continue
            
        # 查找该目录下的所有parquet文件
        parquet_files = []
        for filename in os.listdir(directory):
            if filename.endswith('.parquet'):
                file_path = os.path.join(directory, filename)
                parquet_files.append((filename, file_path))
        
        if not parquet_files:
            logging.info(f"目录中未找到parquet文件: {directory}")
            continue
        
        # 读取所有parquet文件
        for filename, file_path in parquet_files:
            try:
                df = read_parquet_safe(file_path)
                if not df.empty:
                    dataframes.append(df)
                    logging.info(f"✓ 加载文件: {file_path} ({len(df)} 行)")
                else:
                    logging.warning(f"文件为空: {file_path}")
            except Exception as e:
                logging.error(f"读取文件失败 {file_path}: {e}")
                continue
    
    if dataframes:
        merged_df = pd.concat(dataframes, ignore_index=True)
        logging.info(f"📊 合并完成: 共 {len(merged_df)} 行数据（来自 {len(dataframes)} 个文件）")
        return merged_df
    
    logging.warning("未找到任何有效的parquet文件")
    return pd.DataFrame()


def load_info(path: str) -> Optional[Dict]:
    info_path = os.path.join(path, "info.json")
    if not os.path.isfile(info_path):
        return None
    try:
        with open(info_path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception:
        return None


def weighted_average_infos(infos: List[Dict]) -> Dict:
    total_samples_sum = 0
    train_samples_sum = 0
    test_samples_sum = 0

    metric_weighted_sum: Dict[str, float] = {}
    metric_weight_sum: Dict[str, float] = {}

    for info in infos:
        if not info:
            continue
        total_samples = float(info.get("total_samples", 0.0) or 0.0)
        train_samples_sum += int(info.get("train_samples", 0) or 0)
        test_samples_sum += int(info.get("test_samples", 0) or 0)
        total_samples_sum += int(total_samples)

        metrics = info.get("metrics", {}) or {}
        for key, value in metrics.items():
            try:
                metric_value = float(value)
            except Exception:
                # Skip non-numeric metrics
                continue
            # Only weigh by total_samples of this info entry
            metric_weighted_sum[key] = (
                metric_weighted_sum.get(key, 0.0) + metric_value * total_samples
            )
            metric_weight_sum[key] = metric_weight_sum.get(key, 0.0) + total_samples

    merged_metrics: Dict[str, float] = {}
    for key, numerator in metric_weighted_sum.items():
        denom = metric_weight_sum.get(key, 0.0)
        merged_metrics[key] = float(numerator / denom) if denom > 0 else 0.0

    merged_info = {
        "timestamp": datetime.now().strftime("%Y%m%d_%H%M%S"),
        "total_samples": int(total_samples_sum),
        "train_samples": int(train_samples_sum),
        "test_samples": int(test_samples_sum),
        "metrics": merged_metrics,
    }
    return merged_info


TOOL_CALL_PATTERN = re.compile(r"<tool_call>(.*?)</tool_call>", re.IGNORECASE | re.DOTALL)


def extract_tool_call_json_from_content(
    content: str,
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
    """
    Extract and validate tool call from assistant message content.
    
    Returns:
        (parsed_json, error_reason)
        - (None, None): No tool call found - this is VALID (pure text response)
        - (dict, None): Valid tool call found and parsed
        - (None, error_msg): Tool call found but INVALID (format/validation error)
    """
    if not isinstance(content, str) or not content:
        return None, "assistant content is empty or non-string"

    match = TOOL_CALL_PATTERN.search(content)
    if not match:
        # No tool call found - this is VALID (pure text response, not an error)
        return None, None

    # Tool call block found, now validate it
    block = match.group(1).strip()
    try:
        obj = json.loads(block)
    except Exception as e:
        return None, f"invalid JSON in <tool_call>: {e}"

    if not isinstance(obj, dict):
        return None, "parsed <tool_call> is not a JSON object"

    # Must have 'name' field
    if "name" not in obj:
        return None, "tool_call must have 'name' field"
    
    if not isinstance(obj["name"], str):
        return None, "tool_call 'name' field must be a string"

    # Must have 'arguments' field
    if "arguments" not in obj:
        return None, "tool_call must have 'arguments' field"
    
    if not isinstance(obj["arguments"], dict):
        return None, "tool_call 'arguments' field must be a JSON object"

    return obj, None


class ToolDefinitionValidator:
    """工具定义验证器 - 根据envs.yaml中的schema验证工具调用"""
    
    def __init__(self, tools_config: Dict[str, Any]):
        """
        初始化验证器
        
        Args:
            tools_config: 从envs.yaml加载的tools配置
        """
        self.tools_config = tools_config
        self.valid_tools = set(tools_config.keys())
        logging.info(f"✅ 初始化工具验证器，支持工具: {self.valid_tools}")
    
    def validate_tool_call(self, tool_call: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
        """
        验证工具调用是否符合定义
        
        Args:
            tool_call: 解析后的工具调用字典 {"name": "xxx", "arguments": {...}}
            
        Returns:
            (is_valid, error_message)
        """
        # 1. 验证基本结构
        if not isinstance(tool_call, dict):
            return False, "工具调用不是字典类型"
        
        if "name" not in tool_call:
            return False, "缺少'name'字段"
        
        if "arguments" not in tool_call:
            return False, "缺少'arguments'字段"
        
        tool_name = tool_call["name"]
        arguments = tool_call["arguments"]
        
        # 2. 验证tool_name类型
        if not isinstance(tool_name, str):
            return False, f"'name'字段必须是字符串，实际类型: {type(tool_name)}"
        
        # 3. 验证arguments类型
        if not isinstance(arguments, dict):
            return False, f"'arguments'字段必须是字典，实际类型: {type(arguments)}"
        
        # 4. 验证工具名称是否在支持列表中
        if tool_name not in self.valid_tools:
            return False, f"未知的工具名称: {tool_name}"
        
        # 5. 根据工具schema验证参数
        tool_def = self.tools_config[tool_name]
        if tool_def.get("type") != "function":
            return False, f"工具{tool_name}类型错误"
        
        function_def = tool_def.get("function", {})
        parameters = function_def.get("parameters", {})
        
        # 6. 验证必需参数
        required_params = parameters.get("required", [])
        for param_name in required_params:
            if param_name not in arguments:
                return False, f"工具{tool_name}缺少必需参数: {param_name}"
        
        # 7. 验证参数类型（基本验证）
        properties = parameters.get("properties", {})
        for arg_name, arg_value in arguments.items():
            if arg_name not in properties:
                # 检查是否允许额外属性
                if not parameters.get("additionalProperties", True):
                    return False, f"工具{tool_name}不支持参数: {arg_name}"
                continue
            
            # 验证参数类型
            param_type = properties[arg_name].get("type")
            if param_type:
                if not self._validate_type(arg_value, param_type):
                    return False, f"工具{tool_name}参数{arg_name}类型错误，期望{param_type}"
        
        return True, None
    
    def _validate_type(self, value: Any, expected_type: str) -> bool:
        """验证值的类型是否符合JSON Schema类型"""
        type_mapping = {
            "string": str,
            "number": (int, float),
            "integer": int,
            "boolean": bool,
            "object": dict,
            "array": list,
        }
        
        expected_python_type = type_mapping.get(expected_type)
        if expected_python_type is None:
            return True  # 未知类型，跳过验证
        
        return isinstance(value, expected_python_type)


def load_tools_config(config_path: str = "config/envs.yaml") -> Optional[Dict[str, Any]]:
    """
    从envs.yaml加载工具配置
    
    Args:
        config_path: config文件路径
        
    Returns:
        tools配置字典，如果加载失败返回None
    """
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        
        # 提取_aiops_base的tools配置
        if "custom_envs" not in config or "_aiops_base" not in config["custom_envs"]:
            logging.warning("配置文件中未找到custom_envs._aiops_base")
            return None
        
        tools = config["custom_envs"]["_aiops_base"].get("tools", {})
        
        if not tools:
            logging.warning("未找到工具配置")
            return None
        
        logging.info(f"✅ 加载了 {len(tools)} 个工具定义: {list(tools.keys())}")
        return tools
    except Exception as e:
        logging.warning(f"加载工具配置失败: {e}")
        return None


def check_metrics_quality(sample_dict: Dict) -> Tuple[bool, Optional[str]]:
    """
    检查metrics质量，根据多个指标过滤低质量样本
    
    过滤规则（支持任意前缀，使用关键字匹配）：
    1. 包含'success'的key 必须是 1.0
    2. 包含'top_5'的key 必须是 1.0
    3. 包含'action_is_effective'的key 必须大于 0.9
    4. 包含'num_actions'的key 必须大于等于 25
    
    Args:
        sample_dict: 包含metrics的训练样本字典（可能在顶层或info中）
        
    Returns:
        (is_valid, reason): True表示样本有效，False表示应过滤，reason为过滤原因
    """
    # 尝试从两个位置获取metrics：
    # 1. 直接从顶层 sample_dict['metrics']
    # 2. 从嵌套的 sample_dict['info']['metrics']
    metrics = sample_dict.get("metrics", {})
    
    # 如果顶层没有metrics，尝试从info中获取
    if not metrics or not isinstance(metrics, dict):
        info = sample_dict.get("info", {})
        if isinstance(info, dict):
            metrics = info.get("metrics", {})
    
    # 如果还是没有metrics字段，则过滤（因为无法验证质量）
    if not metrics or not isinstance(metrics, dict):
        return False, "缺少metrics字段"
    
    # 辅助函数：查找包含指定关键字的第一个key
    def find_metric_key(keyword: str) -> Optional[str]:
        for key in metrics.keys():
            if keyword in key.lower():
                return key
        return None
    
    # 1. 检查 success 指标
    success_key = find_metric_key("success")
    if success_key is None:
        return False, "缺少包含'success'的指标"
    success = metrics.get(success_key)
    if success != 1.0:
        return False, f"{success_key}={success} (必须为1.0)"
    
    # 2. 检查 top_5 指标
    top_5_key = find_metric_key("top_5")
    if top_5_key is None:
        return False, "缺少包含'top_5'的指标"
    top_5 = metrics.get(top_5_key)
    if top_5 != 1.0:
        return False, f"{top_5_key}={top_5} (必须为1.0)"
    
    # 3. 检查 action_is_effective 指标
    action_is_effective_key = find_metric_key("action_is_effective")
    if action_is_effective_key is None:
        return False, "缺少包含'action_is_effective'的指标"
    action_is_effective = metrics.get(action_is_effective_key)
    if action_is_effective <= 0.9:
        return False, f"{action_is_effective_key}={action_is_effective} (必须>0.9)"
    
    # 4. 检查 num_actions 指标
    num_actions_key = find_metric_key("num_actions")
    if num_actions_key is None:
        return False, "缺少包含'num_actions'的指标"
    num_actions = metrics.get(num_actions_key)
    if num_actions < 25:
        return False, f"{num_actions_key}={num_actions} (必须>=25)"
    
    return True, None


def clean_messages(
    messages: Any, 
    sample_ctx: str, 
    counters: Optional[Dict[str, int]] = None,
    validator: Optional[ToolDefinitionValidator] = None
) -> Any:
    """
    Clean a messages list (or 1-D numpy object array) and return a cleaned structure with
    invalid assistant turns removed, along with the immediately following user turn if present.
    Logs errors with context when removals happen. Optionally updates counters.
    Returns the same container type as input (list or numpy.ndarray).
    
    Args:
        messages: 消息列表或numpy数组
        sample_ctx: 样本上下文信息（用于日志）
        counters: 计数器字典
        validator: 工具定义验证器（如果提供，则验证工具调用参数）
    """
    was_ndarray = isinstance(messages, np.ndarray)

    # Normalize to a Python list for processing
    if was_ndarray:
        try:
            if messages.ndim != 1:
                logging.warning(
                    "[preprocess] messages is not 1-D array; skipping clean: %s",
                    sample_ctx,
                )
                return messages
            # Ensure object dtype for safe element access
            if messages.dtype != object:
                messages = messages.astype(object)
            working = messages.tolist()
        except Exception as e:
            logging.exception(
                "[preprocess] failed to convert ndarray messages to list: %s | %s",
                sample_ctx,
                e,
            )
            return messages
    else:
        if not isinstance(messages, list):
            return messages
        working = messages

    if counters is None:
        counters = {}
    counters.setdefault("invalid_assistant", 0)
    counters.setdefault("removed_assistant", 0)
    counters.setdefault("removed_user", 0)

    cleaned: List[Any] = []
    i = 0
    length = len(working)
    while i < length:
        msg = working[i]
        role = None
        content = None
        try:
            role = msg.get("role") if isinstance(msg, dict) else None
            content = msg.get("content") if isinstance(msg, dict) else None
        except Exception:
            # Keep unrecognized structures as-is
            cleaned.append(msg)
            i += 1
            continue

        if role == "assistant":
            tool_call_obj, err = extract_tool_call_json_from_content(content)
            
            # 第一步：基本格式验证
            if err is not None:
                counters["invalid_assistant"] += 1
                counters["removed_assistant"] += 1
                # Log and remove assistant + next user (if immediate next is user)
                logging.error(
                    "[preprocess] removing invalid assistant message: %s | reason=%s",
                    sample_ctx,
                    err,
                )
                # Skip assistant
                i += 1
                # Skip immediate next if it's a user
                if i < length:
                    next_msg = working[i]
                    try:
                        next_role = (
                            next_msg.get("role") if isinstance(next_msg, dict) else None
                        )
                    except Exception:
                        next_role = None
                    if next_role == "user":
                        counters["removed_user"] += 1
                        logging.error(
                            "[preprocess] also removing immediate next user message: %s",
                            sample_ctx,
                        )
                        i += 1
                # Continue without appending removed messages
                continue
            
            # 第二步：工具定义验证（如果提供了validator且有工具调用）
            if validator is not None and tool_call_obj is not None:
                is_valid, validation_err = validator.validate_tool_call(tool_call_obj)
                if not is_valid:
                    counters["invalid_assistant"] += 1
                    counters["removed_assistant"] += 1
                    counters.setdefault("removed_by_tool_validation", 0)
                    counters["removed_by_tool_validation"] += 1
                    # Log and remove assistant + next user
                    logging.error(
                        "[preprocess] removing assistant with invalid tool call: %s | reason=%s",
                        sample_ctx,
                        validation_err,
                    )
                    # Skip assistant
                    i += 1
                    # Skip immediate next if it's a user
                    if i < length:
                        next_msg = working[i]
                        try:
                            next_role = (
                                next_msg.get("role") if isinstance(next_msg, dict) else None
                            )
                        except Exception:
                            next_role = None
                        if next_role == "user":
                            counters["removed_user"] += 1
                            logging.error(
                                "[preprocess] also removing immediate next user message: %s",
                                sample_ctx,
                            )
                            i += 1
                    # Continue without appending removed messages
                    continue
            
            # Valid assistant message, keep
            cleaned.append(msg)
            i += 1
            continue
        else:
            cleaned.append(msg)
            i += 1

    # Convert back to original container type
    if was_ndarray:
        try:
            return np.array(cleaned, dtype=object)
        except Exception as e:
            logging.exception(
                "[preprocess] failed to convert cleaned list back to ndarray: %s | %s",
                sample_ctx,
                e,
            )
            return cleaned
    return cleaned


def preprocess_dataframe(
    df: pd.DataFrame, 
    tag: str, 
    validator: Optional[ToolDefinitionValidator] = None,
    filter_low_quality: bool = True
) -> Tuple[pd.DataFrame, Dict[str, int]]:
    """
    预处理DataFrame：清理无效消息并过滤低质量样本
    
    Args:
        df: 输入DataFrame
        tag: 数据集标签（用于日志）
        validator: 工具定义验证器（如果提供，则验证工具调用参数）
        filter_low_quality: 是否根据metrics质量标准过滤样本
    
    Returns:
        (清洗后的DataFrame, 统计信息字典)
    """
    if df is None or df.empty:
        logging.info("[preprocess] '%s' is empty; skipping", tag)
        return df, {}

    # Determine available context columns for better logs
    context_cols = [c for c in ["id", "trace_id", "scenario_id"] if c in df.columns]

    counters: Dict[str, int] = {}
    counters["removed_by_metrics"] = 0
    counters["original_samples"] = len(df)

    if "messages" not in df.columns:
        logging.info("[preprocess] '%s' has no 'messages' column; skipping", tag)
        return df, counters

    df = df.copy()
    
    # 第一步：清理messages中的无效工具调用
    for idx, row in df.iterrows():
        try:
            messages = row.get("messages") if isinstance(row, pd.Series) else None
            if messages is None:
                continue
            ctx_parts = [f"{c}={row[c]}" for c in context_cols]
            ctx = f"{tag}|index={idx}"
            if ctx_parts:
                ctx += "|" + ",".join(ctx_parts)
            cleaned = clean_messages(messages, ctx, counters, validator)
            df.at[idx, "messages"] = cleaned
        except Exception as e:
            logging.exception(
                "[preprocess] row '%s|index=%s' failed: %s", tag, idx, e
            )
    
    # 第一步后处理：删除messages为空或过短的样本
    original_len = len(df)
    # 过滤出messages有效的样本（至少包含一个user-assistant对话）
    valid_message_indices = []
    for idx, row in df.iterrows():
        messages = row.get("messages")
        if messages is None:
            continue
        # 转换为列表以便检查
        if hasattr(messages, 'tolist'):
            messages = messages.tolist()
        elif not isinstance(messages, list):
            continue
        # 检查是否有有效的对话（至少2条消息）
        if len(messages) >= 2:
            valid_message_indices.append(idx)
        else:
            counters.setdefault("removed_by_empty_messages", 0)
            counters["removed_by_empty_messages"] += 1
    
    df = df.loc[valid_message_indices].reset_index(drop=True)
    filtered_by_messages = original_len - len(df)
    if filtered_by_messages > 0:
        logging.info(
            "[preprocess] '%s' filtered %d samples with empty/short messages",
            tag,
            filtered_by_messages
        )
    
    # 第二步：过滤低质量样本（基于metrics指标）
    # 检查metrics可能在顶层列或info列中
    has_metrics_column = "metrics" in df.columns
    has_info_column = "info" in df.columns
    
    if filter_low_quality and (has_metrics_column or has_info_column):
        original_len = len(df)
        valid_indices = []
        
        # 统计各类过滤原因
        filter_reasons: Dict[str, int] = {}
        
        logging.info(
            "[preprocess] '%s' starting metrics quality filtering (has_metrics_column=%s, has_info_column=%s)",
            tag, has_metrics_column, has_info_column
        )
        
        for idx, row in df.iterrows():
            row_dict = row.to_dict()
            is_valid, reason = check_metrics_quality(row_dict)
            
            if is_valid:
                valid_indices.append(idx)
            else:
                counters["removed_by_metrics"] += 1
                
                # 统计过滤原因
                if reason:
                    filter_reasons[reason] = filter_reasons.get(reason, 0) + 1
                
                # 记录前10个被过滤的样本（用于调试）
                if counters["removed_by_metrics"] <= 10:
                    # 尝试获取metrics用于日志
                    metrics = row_dict.get("metrics", {})
                    if not metrics and "info" in row_dict:
                        info = row_dict.get("info", {})
                        if isinstance(info, dict):
                            metrics = info.get("metrics", {})
                    
                    logging.warning(
                        "[preprocess] filtering sample: %s | reason=%s | metrics=%s",
                        f"{tag}|index={idx}",
                        reason,
                        metrics
                    )
        
        df = df.loc[valid_indices].reset_index(drop=True)
        filtered_count = original_len - len(df)
        if filtered_count > 0:
            logging.info(
                "[preprocess] '%s' filtered %d samples by metrics quality (%.2f%%)",
                tag,
                filtered_count,
                100 * filtered_count / original_len
            )
            # 输出详细的过滤原因统计
            logging.info("[preprocess] Filter reasons breakdown:")
            for reason, count in sorted(filter_reasons.items(), key=lambda x: x[1], reverse=True):
                logging.info("  - %s: %d samples (%.2f%%)", reason, count, 100 * count / original_len)
        
        # 保存过滤原因详情到counters
        counters["filter_reasons"] = filter_reasons
    elif filter_low_quality:
        # 如果启用了过滤但没有metrics/info列，记录警告
        logging.warning(
            "[preprocess] '%s' has no 'metrics' or 'info' column, skipping metrics quality filtering",
            tag
        )

    # Summary
    invalid = counters.get("invalid_assistant", 0)
    rem_a = counters.get("removed_assistant", 0)
    rem_u = counters.get("removed_user", 0)
    rem_tool = counters.get("removed_by_tool_validation", 0)
    rem_messages = counters.get("removed_by_empty_messages", 0)
    rem_metrics = counters.get("removed_by_metrics", 0)
    
    # 添加最终样本数
    counters["final_samples"] = len(df)
    
    logging.info(
        "[preprocess] '%s' summary: invalid_assistant=%d, removed_assistant=%d, removed_user=%d, removed_by_tool_validation=%d, removed_by_empty_messages=%d, removed_by_metrics=%d",
        tag,
        invalid,
        rem_a,
        rem_u,
        rem_tool,
        rem_messages,
        rem_metrics,
    )

    return df, counters


def split_dataset(
    df: pd.DataFrame, 
    split_ratio: float, 
    shuffle: bool = True, 
    random_state: int = 42
) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    将DataFrame按比例分割为训练集和测试集。
    
    Args:
        df: 输入DataFrame
        split_ratio: 训练集比例（0~1之间）
        shuffle: 是否打乱数据
        random_state: 随机种子
        
    Returns:
        (train_df, test_df)
    """
    if df is None or df.empty:
        return pd.DataFrame(), pd.DataFrame()
    
    if not 0 < split_ratio < 1:
        raise ValueError(f"split_ratio必须在0到1之间，当前值: {split_ratio}")
    
    total_samples = len(df)
    train_size = int(total_samples * split_ratio)
    
    if shuffle:
        # 打乱数据
        df_shuffled = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
    else:
        df_shuffled = df.reset_index(drop=True)
    
    train_df = df_shuffled.iloc[:train_size]
    test_df = df_shuffled.iloc[train_size:]
    
    logging.info(
        f"📊 数据集分割: 总样本={total_samples}, "
        f"训练集={len(train_df)} ({len(train_df)/total_samples*100:.1f}%), "
        f"测试集={len(test_df)} ({len(test_df)/total_samples*100:.1f}%)"
    )
    
    return train_df, test_df


def compute_dataset_statistics(
    train_df: pd.DataFrame, 
    test_df: pd.DataFrame,
    merged_info: Optional[Dict] = None
) -> Dict:
    """
    计算数据集统计信息。
    
    Args:
        train_df: 训练集DataFrame
        test_df: 测试集DataFrame
        merged_info: 已有的合并信息（可选）
        
    Returns:
        统计信息字典
    """
    info = {
        "timestamp": datetime.now().strftime("%Y%m%d_%H%M%S"),
        "total_samples": len(train_df) + len(test_df),
        "train_samples": len(train_df),
        "test_samples": len(test_df),
    }
    
    # 如果有已合并的metrics，保留它们
    if merged_info and "metrics" in merged_info:
        info["metrics"] = merged_info["metrics"]
    else:
        info["metrics"] = {}
    
    # 计算messages相关统计
    if not train_df.empty and "messages" in train_df.columns:
        try:
            train_messages_lens = train_df["messages"].apply(
                lambda x: len(x) if isinstance(x, (list, np.ndarray)) else 0
            )
            info["metrics"]["avg_messages_length_train"] = float(train_messages_lens.mean())
            info["metrics"]["max_messages_length_train"] = int(train_messages_lens.max())
            info["metrics"]["min_messages_length_train"] = int(train_messages_lens.min())
        except Exception as e:
            logging.warning(f"计算训练集messages统计时出错: {e}")
    
    if not test_df.empty and "messages" in test_df.columns:
        try:
            test_messages_lens = test_df["messages"].apply(
                lambda x: len(x) if isinstance(x, (list, np.ndarray)) else 0
            )
            info["metrics"]["avg_messages_length_test"] = float(test_messages_lens.mean())
            info["metrics"]["max_messages_length_test"] = int(test_messages_lens.max())
            info["metrics"]["min_messages_length_test"] = int(test_messages_lens.min())
        except Exception as e:
            logging.warning(f"计算测试集messages统计时出错: {e}")
    
    return info


def list_candidate_dirs(parent: str) -> List[str]:
    if not os.path.isdir(parent):
        return []
    subdirs: List[str] = []
    for name in os.listdir(parent):
        full = os.path.join(parent, name)
        if os.path.isdir(full):
            # Keep all immediate subdirectories; optionally filter by pattern YYYYMMDD_HHMMSS
            # If you want strict pattern matching, uncomment the following condition:
            # if len(name) == 15 and name[8] == "_" and name[:8].isdigit() and name[9:].isdigit():
            #     subdirs.append(full)
            # else:
            #     continue
            subdirs.append(full)
    return sorted(set(subdirs))


def merge_datasets(
    input_dirs: List[str], 
    output_dir: str, 
    split_ratio: float = 0.8,
    shuffle: bool = True,
    random_state: int = 42,
    enable_preprocess: bool = True,
    enable_thinking: Optional[bool] = None
) -> None:
    """
    合并多个数据集并按比例分割为训练集和测试集。
    
    Args:
        input_dirs: 输入目录列表
        output_dir: 输出目录
        split_ratio: 训练集比例（0~1之间）
        shuffle: 是否打乱数据
        random_state: 随机种子
        enable_preprocess: 是否启用数据预处理
        enable_thinking: 是否为所有样本启用thinking模式（None=保持原样）
    """
    os.makedirs(output_dir, exist_ok=True)

    # 1) 收集并合并所有parquet文件
    logging.info("=" * 60)
    logging.info("步骤1: 收集并合并parquet文件")
    logging.info("=" * 60)
    
    merged_df = collect_all_parquets(input_dirs)
    
    if merged_df.empty:
        raise ValueError("未找到任何有效的parquet数据，请检查输入目录")
    
    # 验证必需的列
    if "messages" not in merged_df.columns:
        raise ValueError("合并后的数据缺少'messages'列，这是verl SFT格式的必需列")
    
    logging.info(f"✓ 成功合并数据: {len(merged_df)} 个样本")
    logging.info(f"  列名: {merged_df.columns.tolist()}")
    
    # 1.1) 预处理数据（可选）
    if enable_preprocess:
        logging.info("=" * 60)
        logging.info("步骤2: 数据预处理")
        logging.info("=" * 60)
        
        # 加载工具配置和创建验证器（用于工具调用验证）
        validator = None
        tools_config = load_tools_config("config/envs.yaml")
        if tools_config:
            validator = ToolDefinitionValidator(tools_config)
        else:
            logging.warning("⚠️  未加载工具配置，跳过工具调用参数验证")
        
        # 预处理数据（包括工具调用验证和metrics过滤）
        merged_df, preprocess_stats = preprocess_dataframe(
            merged_df, 
            tag="merged", 
            validator=validator,
            filter_low_quality=True
        )
        
        if merged_df.empty:
            raise ValueError("预处理后所有数据都被过滤，请检查数据质量或禁用预处理")
    else:
        logging.info("跳过数据预处理（--no-preprocess）")
        preprocess_stats = {
            "original_samples": len(merged_df),
            "final_samples": len(merged_df),
        }
    
    # 1.2) 设置enable_thinking列（可选）
    if enable_thinking is not None:
        if 'enable_thinking' in merged_df.columns:
            logging.warning("数据中已存在enable_thinking列，将被覆盖")
        merged_df['enable_thinking'] = enable_thinking
        logging.info(f"✓ 设置enable_thinking列为: {enable_thinking}")
    else:
        if 'enable_thinking' not in merged_df.columns:
            logging.info("数据中没有enable_thinking列，将使用默认值（None=禁用thinking模式）")
    
    # 2) 按比例分割训练集和测试集
    logging.info("=" * 60)
    logging.info(f"步骤3: 分割数据集 (训练集比例: {split_ratio:.1%})")
    logging.info("=" * 60)
    
    train_df, test_df = split_dataset(
        merged_df, 
        split_ratio=split_ratio,
        shuffle=shuffle,
        random_state=random_state
    )
    
    # 3) 保存训练集和测试集
    logging.info("=" * 60)
    logging.info("步骤4: 保存数据集")
    logging.info("=" * 60)
    
    if not train_df.empty:
        train_out = os.path.join(output_dir, "train.parquet")
        write_parquet_safe(train_df, train_out)
        logging.info(f"✓ 训练集已保存: {train_out} ({len(train_df)} 行)")
        print(f"✓ 训练集: {train_out} ({len(train_df)} 样本)")
    else:
        logging.warning("训练集为空，未保存")
        print("⚠️  训练集为空")

    if not test_df.empty:
        test_out = os.path.join(output_dir, "test.parquet")
        write_parquet_safe(test_df, test_out)
        logging.info(f"✓ 测试集已保存: {test_out} ({len(test_df)} 行)")
        print(f"✓ 测试集: {test_out} ({len(test_df)} 样本)")
    else:
        logging.warning("测试集为空，未保存")
        print("⚠️  测试集为空")

    # 4) 合并info.json统计信息
    logging.info("=" * 60)
    logging.info("步骤5: 生成统计信息")
    logging.info("=" * 60)
    
    infos: List[Dict] = []
    for directory in input_dirs:
        info = load_info(directory)
        if info:
            infos.append(info)

    # 合并已有的info.json
    merged_info = None
    if infos:
        merged_info = weighted_average_infos(infos)
        logging.info(f"✓ 合并了 {len(infos)} 个info.json文件")
    
    # 计算最终统计信息
    final_info = compute_dataset_statistics(train_df, test_df, merged_info)
    
    info_out = os.path.join(output_dir, "info.json")
    with open(info_out, "w", encoding="utf-8") as f:
        json.dump(final_info, f, ensure_ascii=False, indent=2)
    
    logging.info(f"✓ 统计信息已保存: {info_out}")
    print(f"✓ 统计信息: {info_out}")
    
    # 打印汇总
    logging.info("=" * 60)
    logging.info("完成! 数据集统计:")
    logging.info(f"  总样本数: {final_info['total_samples']}")
    logging.info(f"  训练集: {final_info['train_samples']} ({final_info['train_samples']/final_info['total_samples']*100:.1f}%)")
    logging.info(f"  测试集: {final_info['test_samples']} ({final_info['test_samples']/final_info['total_samples']*100:.1f}%)")
    logging.info("=" * 60)
    
    print("\n" + "=" * 60)
    print("📊 数据集生成完成!")
    print(f"  总样本数: {final_info['total_samples']}")
    print(f"  训练集: {final_info['train_samples']} ({final_info['train_samples']/final_info['total_samples']*100:.1f}%)")
    print(f"  测试集: {final_info['test_samples']} ({final_info['test_samples']/final_info['total_samples']*100:.1f}%)")
    print(f"  输出目录: {output_dir}")
    print("=" * 60)
    
    # 输出详细的数据过滤统计
    if enable_preprocess and preprocess_stats:
        print("\n" + "=" * 60)
        print("🔍 数据过滤统计报告")
        print("=" * 60)
        
        original = preprocess_stats.get("original_samples", 0)
        final = preprocess_stats.get("final_samples", 0)
        total_removed = original - final
        
        print(f"\n📌 原始样本数: {original}")
        print(f"✅ 最终保留样本数: {final}")
        print(f"❌ 总共过滤样本数: {total_removed} ({100 * total_removed / original:.2f}%)" if original > 0 else "❌ 总共过滤样本数: 0")
        
        if total_removed > 0:
            print("\n📋 过滤原因详细统计:")
            print("-" * 60)
            
            # 1. 工具调用格式错误
            invalid_assistant = preprocess_stats.get("invalid_assistant", 0)
            removed_assistant = preprocess_stats.get("removed_assistant", 0)
            removed_user = preprocess_stats.get("removed_user", 0)
            removed_tool = preprocess_stats.get("removed_by_tool_validation", 0)
            
            if invalid_assistant > 0:
                print(f"\n1️⃣  工具调用格式验证失败:")
                print(f"   - 无效的assistant消息: {invalid_assistant} 条")
                print(f"   - 被移除的assistant消息: {removed_assistant} 条")
                print(f"   - 连带移除的user消息: {removed_user} 条")
                if removed_tool > 0:
                    print(f"   - 其中工具参数验证失败: {removed_tool} 条")
                print(f"   说明: assistant消息中的<tool_call>格式不正确或工具参数不完整")
            
            # 2. 消息为空或过短
            removed_messages = preprocess_stats.get("removed_by_empty_messages", 0)
            if removed_messages > 0:
                print(f"\n2️⃣  消息为空或过短:")
                print(f"   - 被过滤的样本: {removed_messages} 个 ({100 * removed_messages / original:.2f}%)")
                print(f"   说明: 清理无效消息后，对话轮次少于2轮")
            
            # 3. Metrics质量过滤
            removed_metrics = preprocess_stats.get("removed_by_metrics", 0)
            if removed_metrics > 0:
                print(f"\n3️⃣  Metrics质量过滤:")
                print(f"   - 被过滤的样本总数: {removed_metrics} 个 ({100 * removed_metrics / original:.2f}%)")
                print(f"   说明: 不满足以下任一质量标准的样本被过滤")
                print(f"   质量标准（支持任意前缀，使用关键字匹配）:")
                print(f"     • 包含'success'的指标 必须为 1.0")
                print(f"     • 包含'top_5'的指标 必须为 1.0")
                print(f"     • 包含'action_is_effective'的指标 必须 > 0.9")
                print(f"     • 包含'num_actions'的指标 必须 >= 25")
                
                # 显示详细的过滤原因分布
                filter_reasons = preprocess_stats.get("filter_reasons", {})
                if filter_reasons:
                    print(f"\n   过滤原因详细分布:")
                    for reason, count in sorted(filter_reasons.items(), key=lambda x: x[1], reverse=True):
                        print(f"     • {reason}: {count} 个样本 ({100 * count / original:.2f}%)")
            
            print("\n" + "-" * 60)
            
            # 计算各类过滤占比
            if original > 0:
                print(f"\n📈 过滤占比分析:")
                if invalid_assistant > 0:
                    print(f"   - 工具调用问题: {100 * invalid_assistant / original:.2f}%")
                if removed_messages > 0:
                    print(f"   - 空/短消息: {100 * removed_messages / original:.2f}%")
                if removed_metrics > 0:
                    print(f"   - 低质量样本: {100 * removed_metrics / original:.2f}%")
        else:
            print("\n✨ 所有样本均通过验证，无数据被过滤！")
        
        print("=" * 60)
    elif not enable_preprocess:
        print("\n💡 提示: 未启用数据预处理，所有原始数据均被保留")


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(
        description="生成verl SFT格式数据集：合并parquet文件并按比例分割训练/测试集",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 从指定文件夹合并，使用默认8:2分割
  python merge_dataset.py --inputs /path/to/folder1 /path/to/folder2 --output /path/to/output
  
  # 从父目录合并所有子文件夹，使用9:1分割
  python merge_dataset.py --parent /path/to/dataset --output /path/to/output --split-ratio 0.9
  
  # 启用thinking模式
  python merge_dataset.py --parent /path/to/dataset --output /path/to/output --enable-thinking
  
  # 禁用数据预处理和打乱
  python merge_dataset.py --parent /path/to/dataset --output /path/to/output --no-preprocess --no-shuffle

  # 默认
   python scripts/merge_dataset.py --preprocess --enable-thinking
        """
    )
    
    # 输入源（互斥）
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--parent",
        type=str,
        default="/home/yangcx24/Jayx/RAGEN/data/sft",
        help="父目录，包含多个时间戳子文件夹（默认: %(default)s）",
    )
    group.add_argument(
        "--inputs",
        type=str,
        nargs="+",
        help="显式指定要合并的文件夹路径列表",
    )
    
    # 输出
    parser.add_argument(
        "--output",
        type=str,
        default="/home/yangcx24/Jayx/RAGEN/data/sft/merged",
        help="输出目录，用于保存train.parquet、test.parquet和info.json（默认: %(default)s）",
    )
    
    # 数据分割
    parser.add_argument(
        "--split-ratio",
        type=float,
        default=0.8,
        help="训练集比例，范围0-1（默认: 0.8表示80%%训练集，20%%测试集）",
    )
    
    parser.add_argument(
        "--shuffle",
        dest="shuffle",
        action="store_true",
        default=True,
        help="在分割前打乱数据（默认: True）",
    )
    
    parser.add_argument(
        "--no-shuffle",
        dest="shuffle",
        action="store_false",
        help="禁用数据打乱，保持原始顺序分割",
    )
    
    parser.add_argument(
        "--random-state",
        type=int,
        default=42,
        help="随机种子，用于可复现的数据打乱和分割（默认: 42）",
    )
    
    # 数据预处理
    parser.add_argument(
        "--preprocess",
        dest="enable_preprocess",
        action="store_true",
        default=True,
        help="启用数据预处理，验证assistant消息中的tool_call格式（默认: True）",
    )
    
    parser.add_argument(
        "--no-preprocess",
        dest="enable_preprocess",
        action="store_false",
        help="禁用数据预处理，保留所有原始数据",
    )
    
    # Thinking模式
    parser.add_argument(
        "--enable-thinking",
        dest="enable_thinking",
        action="store_true",
        help="为所有样本启用thinking模式（添加enable_thinking=True列）",
    )
    
    parser.add_argument(
        "--disable-thinking",
        dest="enable_thinking",
        action="store_false",
        help="为所有样本禁用thinking模式（添加enable_thinking=False列）",
    )
    
    # 日志
    parser.add_argument(
        "--log-level",
        type=str,
        default="INFO",
        choices=["DEBUG", "INFO", "WARNING", "ERROR"],
        help="日志级别（默认: INFO）",
    )
    
    args = parser.parse_args()
    
    # 验证split_ratio范围
    if not 0 < args.split_ratio < 1:
        parser.error(f"--split-ratio必须在0到1之间，当前值: {args.split_ratio}")
    
    return args


def main() -> None:
    args = parse_args()
    configure_logging(args.log_level)

    # 确定输入目录
    if args.inputs:
        input_dirs = [d for d in args.inputs if os.path.isdir(d)]
        if len(input_dirs) < len(args.inputs):
            invalid_dirs = set(args.inputs) - set(input_dirs)
            logging.warning(f"以下路径不是有效目录，已跳过: {invalid_dirs}")
    else:
        input_dirs = list_candidate_dirs(args.parent)

    if not input_dirs:
        raise SystemExit("❌ 错误: 未找到任何输入目录")

    # 打印配置信息
    print("=" * 60)
    print("📁 verl SFT数据集生成工具")
    print("=" * 60)
    print(f"输入目录数量: {len(input_dirs)}")
    for i, d in enumerate(input_dirs, 1):
        print(f"  {i}. {d}")
    print(f"\n输出目录: {args.output}")
    print(f"训练/测试分割比例: {args.split_ratio:.1%} / {1-args.split_ratio:.1%}")
    print(f"数据打乱: {'是' if args.shuffle else '否'}")
    print(f"随机种子: {args.random_state}")
    print(f"数据预处理: {'启用' if args.enable_preprocess else '禁用'}")
    
    # 显示thinking模式设置
    if args.enable_thinking is True:
        print(f"Thinking模式: 启用（所有样本）")
    elif args.enable_thinking is False:
        print(f"Thinking模式: 禁用（所有样本）")
    else:
        print(f"Thinking模式: 保持原样（使用数据中的enable_thinking列，如果存在）")
    
    print("=" * 60)
    print()

    # 执行合并和分割
    try:
        merge_datasets(
            input_dirs, 
            args.output,
            split_ratio=args.split_ratio,
            shuffle=args.shuffle,
            random_state=args.random_state,
            enable_preprocess=args.enable_preprocess,
            enable_thinking=args.enable_thinking
        )
    except Exception as e:
        logging.exception("处理数据集时发生错误")
        raise SystemExit(f"❌ 错误: {e}")


if __name__ == "__main__":
    main()
