#!/usr/bin/env python3
"""
Merge timestamped dataset folders (train/test parquet + weighted info.json).

- Concatenate train.parquet and test.parquet across input folders
- Merge info.json by computing weighted averages of metrics using total_samples as weights
- Preprocess messages: for assistant role, extract <answer>...</answer> JSON and validate:
  * JSON must parse
  * Top-level key must be exactly 'tools'
  * tools.tool_calls must be a non-empty array
  Invalid assistant messages are removed along with the immediately following user message. Errors are logged, and per-split summary is printed.

Usage examples:
  python merge_dataset.py --inputs /home/yangcx24/Jayx/RAGEN/dataset/20250814_150100 \
                          /home/yangcx24/Jayx/RAGEN/dataset/20250814_144541 \
                          /home/yangcx24/Jayx/RAGEN/dataset/20250814_155055 \
                          --output /home/yangcx24/Jayx/RAGEN/dataset/merged_agg

  python merge_dataset.py --parent /home/yangcx24/Jayx/RAGEN/dataset \
                          --output /home/yangcx24/Jayx/RAGEN/dataset/merged_agg
"""

import os
import re
import sys
import json
import argparse
import logging
from datetime import datetime
from typing import List, Dict, Optional, Tuple, Any

import pandas as pd
import numpy as np


def configure_logging(level: str = "INFO") -> None:
    # Force a clean configuration and attach a StreamHandler to stdout
    root = logging.getLogger()
    root.handlers.clear()
    root.setLevel(logging.getLevelName(level.upper()))
    handler = logging.StreamHandler(stream=sys.stdout)
    formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
    handler.setFormatter(formatter)
    root.addHandler(handler)


def read_parquet_safe(path: str) -> pd.DataFrame:
    if not os.path.isfile(path):
        return pd.DataFrame()
    # Try pyarrow first, fallback to fastparquet
    try:
        return pd.read_parquet(path, engine="pyarrow")
    except Exception:
        return pd.read_parquet(path, engine="fastparquet")


def write_parquet_safe(df: pd.DataFrame, path: str) -> None:
    # Prefer pyarrow for nested data support, fallback to fastparquet
    try:
        df.to_parquet(path, index=False, engine="pyarrow")
    except Exception:
        df.to_parquet(path, index=False, engine="fastparquet")


def concat_parquets(dirs: List[str], filename: str) -> pd.DataFrame:
    dataframes: List[pd.DataFrame] = []
    for directory in dirs:
        file_path = os.path.join(directory, filename)
        if os.path.isfile(file_path):
            df = read_parquet_safe(file_path)
            if not df.empty:
                dataframes.append(df)
    if dataframes:
        return pd.concat(dataframes, ignore_index=True)
    return pd.DataFrame()


def load_info(path: str) -> Optional[Dict]:
    info_path = os.path.join(path, "info.json")
    if not os.path.isfile(info_path):
        return None
    try:
        with open(info_path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception:
        return None


def weighted_average_infos(infos: List[Dict]) -> Dict:
    total_samples_sum = 0
    train_samples_sum = 0
    test_samples_sum = 0

    metric_weighted_sum: Dict[str, float] = {}
    metric_weight_sum: Dict[str, float] = {}

    for info in infos:
        if not info:
            continue
        total_samples = float(info.get("total_samples", 0.0) or 0.0)
        train_samples_sum += int(info.get("train_samples", 0) or 0)
        test_samples_sum += int(info.get("test_samples", 0) or 0)
        total_samples_sum += int(total_samples)

        metrics = info.get("metrics", {}) or {}
        for key, value in metrics.items():
            try:
                metric_value = float(value)
            except Exception:
                # Skip non-numeric metrics
                continue
            # Only weigh by total_samples of this info entry
            metric_weighted_sum[key] = (
                metric_weighted_sum.get(key, 0.0) + metric_value * total_samples
            )
            metric_weight_sum[key] = metric_weight_sum.get(key, 0.0) + total_samples

    merged_metrics: Dict[str, float] = {}
    for key, numerator in metric_weighted_sum.items():
        denom = metric_weight_sum.get(key, 0.0)
        merged_metrics[key] = float(numerator / denom) if denom > 0 else 0.0

    merged_info = {
        "timestamp": datetime.now().strftime("%Y%m%d_%H%M%S"),
        "total_samples": int(total_samples_sum),
        "train_samples": int(train_samples_sum),
        "test_samples": int(test_samples_sum),
        "metrics": merged_metrics,
    }
    return merged_info


ANSWER_PATTERN = re.compile(r"<answer>(.*?)</answer>", re.IGNORECASE | re.DOTALL)


def extract_answer_json_from_content(
    content: str,
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
    """Return (parsed_json, error_reason)."""
    if not isinstance(content, str) or not content:
        return None, "assistant content is empty or non-string"

    match = ANSWER_PATTERN.search(content)
    if not match:
        return None, "missing <answer>...</answer> block"

    block = match.group(1).strip()
    try:
        obj = json.loads(block)
    except Exception as e:
        return None, f"invalid JSON in <answer>: {e}"

    if not isinstance(obj, dict):
        return None, "parsed <answer> is not a JSON object"

    # Top-level key must be exactly 'tools'
    if set(obj.keys()) != {"tool_calls"}:
        return None, "top-level key is not exactly 'tool_calls'"

    tool_calls = obj.get("tool_calls")

    if not isinstance(tool_calls, list):
        return None, "'tools.tool_calls' must be an array"

    if len(tool_calls) == 0:
        return None, "'tools.tool_calls' must be a non-empty array"

    return obj, None


def clean_messages(
    messages: Any, sample_ctx: str, counters: Optional[Dict[str, int]] = None
) -> Any:
    """
    Clean a messages list (or 1-D numpy object array) and return a cleaned structure with
    invalid assistant turns removed, along with the immediately following user turn if present.
    Logs errors with context when removals happen. Optionally updates counters.
    Returns the same container type as input (list or numpy.ndarray).
    """
    was_ndarray = isinstance(messages, np.ndarray)

    # Normalize to a Python list for processing
    if was_ndarray:
        try:
            if messages.ndim != 1:
                logging.warning(
                    "[preprocess] messages is not 1-D array; skipping clean: %s",
                    sample_ctx,
                )
                return messages
            # Ensure object dtype for safe element access
            if messages.dtype != object:
                messages = messages.astype(object)
            working = messages.tolist()
        except Exception as e:
            logging.exception(
                "[preprocess] failed to convert ndarray messages to list: %s | %s",
                sample_ctx,
                e,
            )
            return messages
    else:
        if not isinstance(messages, list):
            return messages
        working = messages

    if counters is None:
        counters = {}
    counters.setdefault("invalid_assistant", 0)
    counters.setdefault("removed_assistant", 0)
    counters.setdefault("removed_user", 0)

    cleaned: List[Any] = []
    i = 0
    length = len(working)
    while i < length:
        msg = working[i]
        role = None
        content = None
        try:
            role = msg.get("role") if isinstance(msg, dict) else None
            content = msg.get("content") if isinstance(msg, dict) else None
        except Exception:
            # Keep unrecognized structures as-is
            cleaned.append(msg)
            i += 1
            continue

        if role == "assistant":
            _, err = extract_answer_json_from_content(content)
            if err is not None:
                counters["invalid_assistant"] += 1
                counters["removed_assistant"] += 1
                # Log and remove assistant + next user (if immediate next is user)
                logging.error(
                    "[preprocess] removing invalid assistant message: %s | reason=%s",
                    sample_ctx,
                    err,
                )
                # Skip assistant
                i += 1
                # Skip immediate next if it's a user
                if i < length:
                    next_msg = working[i]
                    try:
                        next_role = (
                            next_msg.get("role") if isinstance(next_msg, dict) else None
                        )
                    except Exception:
                        next_role = None
                    if next_role == "user":
                        counters["removed_user"] += 1
                        logging.error(
                            "[preprocess] also removing immediate next user message: %s",
                            sample_ctx,
                        )
                        i += 1
                # Continue without appending removed messages
                continue
            else:
                # Valid assistant message, keep
                cleaned.append(msg)
                i += 1
                continue
        else:
            cleaned.append(msg)
            i += 1

    # Convert back to original container type
    if was_ndarray:
        try:
            return np.array(cleaned, dtype=object)
        except Exception as e:
            logging.exception(
                "[preprocess] failed to convert cleaned list back to ndarray: %s | %s",
                sample_ctx,
                e,
            )
            return cleaned
    return cleaned


def preprocess_dataframe(df: pd.DataFrame, tag: str) -> pd.DataFrame:
    if df is None or df.empty:
        logging.info("[preprocess] '%s' is empty; skipping", tag)
        return df

    # Determine available context columns for better logs
    context_cols = [c for c in ["id", "trace_id", "scenario_id"] if c in df.columns]

    counters: Dict[str, int] = {}

    if "messages" not in df.columns:
        logging.info("[preprocess] '%s' has no 'messages' column; skipping", tag)
        return df

    df = df.copy()
    for idx, row in df.iterrows():
        try:
            messages = row.get("messages") if isinstance(row, pd.Series) else None
            if messages is None:
                continue
            ctx_parts = [f"{c}={row[c]}" for c in context_cols]
            ctx = f"{tag}|index={idx}"
            if ctx_parts:
                ctx += "|" + ",".join(ctx_parts)
            cleaned = clean_messages(messages, ctx, counters)
            df.at[idx, "messages"] = cleaned
        except Exception as e:
            logging.exception(
                "[preprocess] row '%s|index=%s' failed: %s", tag, idx, e
            )

    # Summary
    invalid = counters.get("invalid_assistant", 0)
    rem_a = counters.get("removed_assistant", 0)
    rem_u = counters.get("removed_user", 0)
    logging.info(
        "[preprocess] '%s' summary: invalid_assistant=%d, removed_assistant=%d, removed_user=%d",
        tag,
        invalid,
        rem_a,
        rem_u,
    )

    return df


def list_candidate_dirs(parent: str) -> List[str]:
    if not os.path.isdir(parent):
        return []
    subdirs: List[str] = []
    for name in os.listdir(parent):
        full = os.path.join(parent, name)
        if os.path.isdir(full):
            # Keep all immediate subdirectories; optionally filter by pattern YYYYMMDD_HHMMSS
            # If you want strict pattern matching, uncomment the following condition:
            # if len(name) == 15 and name[8] == "_" and name[:8].isdigit() and name[9:].isdigit():
            #     subdirs.append(full)
            # else:
            #     continue
            subdirs.append(full)
    return sorted(set(subdirs))


def merge_datasets(input_dirs: List[str], output_dir: str) -> None:
    os.makedirs(output_dir, exist_ok=True)

    # 1) Merge train/test parquet
    train_df = concat_parquets(input_dirs, "train.parquet")
    test_df = concat_parquets(input_dirs, "test.parquet")

    # 1.1) Preprocess dialogues
    train_df = (
        preprocess_dataframe(train_df, tag="train") if not train_df.empty else train_df
    )
    test_df = (
        preprocess_dataframe(test_df, tag="test") if not test_df.empty else test_df
    )

    if not train_df.empty:
        train_out = os.path.join(output_dir, "train.parquet")
        write_parquet_safe(train_df, train_out)
        print(f"Wrote {train_out} with {len(train_df)} rows")
    else:
        print("No train.parquet data found; nothing written for train")

    if not test_df.empty:
        test_out = os.path.join(output_dir, "test.parquet")
        write_parquet_safe(test_df, test_out)
        print(f"Wrote {test_out} with {len(test_df)} rows")
    else:
        print("No test.parquet data found; nothing written for test")

    # 2) Merge info.json with weighted averages (weights=total_samples per info)
    infos: List[Dict] = []
    for directory in input_dirs:
        info = load_info(directory)
        if info:
            infos.append(info)

    if infos:
        merged_info = weighted_average_infos(infos)
        info_out = os.path.join(output_dir, "info.json")
        with open(info_out, "w", encoding="utf-8") as f:
            json.dump(merged_info, f, ensure_ascii=False, indent=2)
        print(f"Wrote {info_out}")
    else:
        print("No info.json files found; nothing written for info.json")


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(
        description="Merge timestamped dataset folders (train/test parquet + weighted info.json + preprocessing)"
    )
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--parent",
        type=str,
        default="/home/yangcx24/Jayx/RAGEN/dataset",
        help="Parent directory containing timestamp subfolders",
    )
    group.add_argument(
        "--inputs",
        type=str,
        nargs="+",
        help="Explicit list of timestamp folder paths to merge",
    )
    parser.add_argument(
        "--output",
        type=str,
        default="/home/yangcx24/Jayx/RAGEN/dataset/merged",
        help="Output directory for merged train/test/info",
    )
    parser.add_argument(
        "--log-level",
        type=str,
        default="INFO",
        help="Logging level (DEBUG, INFO, WARNING, ERROR)",
    )
    return parser.parse_args()


def main() -> None:
    args = parse_args()
    configure_logging(args.log_level)

    if args.inputs:
        input_dirs = [d for d in args.inputs if os.path.isdir(d)]
    else:
        input_dirs = list_candidate_dirs(args.parent)

    if not input_dirs:
        raise SystemExit("No input directories found")

    print("Merging from directories:")
    for d in input_dirs:
        print(f"- {d}")

    merge_datasets(input_dirs, args.output)


if __name__ == "__main__":
    main()
