# Copyright (c) 2025, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import json
import os
import pickle
import shutil
import sys
import time
import traceback
from pathlib import Path
from typing import Any

import yaml
from loguru import logger

from nemo_curator.tasks.utils import TaskPerfUtils
from nemo_curator.utils.file_utils import create_or_overwrite_dir

_this_script_dir = Path(__file__).parent

# TODO: How do we want to package this tool? Perhaps a package extra for
#  nemo-curator, i.e. nemo-curator[benchmarking]?
# For now, add this directory to PYTHONPATH to import the runner modules
sys.path.insert(0, _this_script_dir)

# ruff: noqa: E402
from runner.datasets import DatasetResolver
from runner.entry import Entry
from runner.env_capture import dump_env
from runner.path_resolver import PathResolver
from runner.process import run_command_with_timeout
from runner.ray_cluster import (
    setup_ray_cluster_and_env,
    teardown_ray_cluster_and_env,
)
from runner.session import Session
from runner.utils import find_result, get_obj_for_json, resolve_env_vars


def ensure_dir(dir_path: Path) -> None:
    """Ensure dir_path and parents exists, creating them if necessary."""
    dir_path.mkdir(parents=True, exist_ok=True)


def get_entry_script_persisted_data(benchmark_results_path: Path) -> dict[str, Any]:
    """Read the files that are expected to be generated by the individual benchmark scripts."""
    params_json = benchmark_results_path / "params.json"
    if not params_json.exists():
        logger.warning(f"Params JSON file not found at {params_json}")
        script_params = {}
    else:
        with open(params_json) as f:
            script_params = json.load(f)

    metrics_json = benchmark_results_path / "metrics.json"
    if not metrics_json.exists():
        logger.warning(f"Metrics JSON file not found at {metrics_json}")
        script_metrics = {}
    else:
        with open(metrics_json) as f:
            script_metrics = json.load(f)

    tasks_pkl = benchmark_results_path / "tasks.pkl"
    if not tasks_pkl.exists():
        logger.warning(f"Tasks pickle file not found at {tasks_pkl}")
        script_tasks = []
    else:
        with open(tasks_pkl, "rb") as f:
            script_tasks = pickle.load(f)  # noqa: S301
        if isinstance(script_tasks, list):
            script_metrics.update(TaskPerfUtils.aggregate_task_metrics(script_tasks, prefix="task"))
        elif isinstance(script_tasks, dict):
            for pipeline_name, pipeline_tasks in script_tasks.items():
                script_metrics.update(
                    TaskPerfUtils.aggregate_task_metrics(pipeline_tasks, prefix=pipeline_name.lower())
                )

    return {"params": script_params, "metrics": script_metrics}


def check_requirements_update_results(result_data: dict[str, Any], requirements: dict[str, Any]) -> bool:
    """
    Check if the benchmark meets the requirements. Creates a new "requirements" key in the result_data
    dictionary with the results of the requirements checks.
    Returns True if the benchmark meets the requirements, False otherwise.
    """
    meets_requirements = True
    requirements_data = {}

    for metric_name, requirement_dict in requirements.items():
        reason_not_met = None
        actual_value = find_result(result_data, metric_name)
        if actual_value is None:
            reason_not_met = f"{metric_name} not found in metrics"
        else:
            has_min = "min_value" in requirement_dict
            has_max = "max_value" in requirement_dict
            if has_min:
                min_value = requirement_dict["min_value"]
                if actual_value < min_value:
                    reason_not_met = f"{metric_name} < {min_value}"
            if has_max:
                max_value = requirement_dict["max_value"]
                if actual_value > max_value:
                    reason_not_met = f"{metric_name} > {max_value}"
            if not has_min and not has_max:
                reason_not_met = f"No min or max value specified for {metric_name}"

        # Update the requirements_data dictionary with the result of the requirements check
        meets_requirements &= reason_not_met is None
        if reason_not_met is None:
            logger.debug(f"\t\t✅ Requirement for {metric_name} was met")
        else:
            requirements_data[metric_name] = reason_not_met
            logger.error(f"\t\t❌ Requirement for {metric_name} was not met: {reason_not_met}")

    result_data["requirements_not_met"] = requirements_data
    return meets_requirements


def run_entry(
    entry: Entry,
    path_resolver: PathResolver,
    dataset_resolver: DatasetResolver,
    session_path: Path,
    result_data: dict[str, Any],
) -> bool:
    session_entry_path = session_path / entry.name

    # scratch_path : This is the directory provided to users for saving scratch/temp data; it'll be cleaned up after the entry is done if delete_scratch is True
    # ray_cluster_path : This is the directory where Ray debug/log files are saved
    # logs_path : This is the directory where stdout/stderr and Ray startup logs are saved
    # benchmark_results_path : This is the directory where benchmark results are stored
    scratch_path, ray_cluster_path, logs_path, benchmark_results_path = [
        (session_entry_path / d).absolute() for d in ["scratch", "ray_cluster", "logs", "benchmark_results"]
    ]
    cmd = entry.get_command_to_run(session_entry_path, benchmark_results_path, path_resolver, dataset_resolver)
    run_id = result_data.get("run_id", f"{entry.name}-{int(time.time())}")
    ray_client = ray_temp_dir = None

    try:
        # Create directories individually
        for directory in [scratch_path, ray_cluster_path, logs_path, benchmark_results_path]:
            create_or_overwrite_dir(directory)

        ray_client, ray_temp_dir = setup_ray_cluster_and_env(
            num_cpus=entry.ray.get("num_cpus", os.cpu_count() or 1),
            num_gpus=entry.ray.get("num_gpus", 0),
            enable_object_spilling=bool(entry.ray.get("enable_object_spilling", False)),
            ray_log_path=logs_path / "ray.log",
            object_store_size_bytes=entry.object_store_size_bytes,
        )

        # Execute command with timeout
        logger.info(f"\t\tRunning command {' '.join(cmd) if isinstance(cmd, list) else cmd}")
        started_exec = time.time()
        run_data = run_command_with_timeout(
            command=cmd,
            timeout=entry.timeout_s,
            stdouterr_path=logs_path / "stdouterr.log",
            run_id=run_id,
            fancy=os.environ.get("CURATOR_BENCHMARKING_DEBUG", "0") == "0",
        )
        ended_exec = time.time()
        duration = ended_exec - started_exec

        # Update result_data
        result_data.update(
            {
                "cmd": cmd,
                "exec_started_at": started_exec,
                "exec_time_s": duration,
                "exit_code": run_data["returncode"],
                "timed_out": run_data["timed_out"],
                "logs_dir": logs_path,
            }
        )
        ray_data = {}
        # script_persisted_data is a dictionary with keys "params" and "metrics"
        # "params" will contain everything the script wrote to its params.json file
        # "metrics" will contain everything the script wrote to its metrics.json file plus metrics
        # from the Task objects restored from the tasks.pkl file.
        script_persisted_data = get_entry_script_persisted_data(benchmark_results_path)
        result_data.update(
            {
                "ray_data": ray_data,
                "metrics": script_persisted_data["metrics"],
                "params": script_persisted_data["params"],
            }
        )

        # Check if the run itself returned a success code, if so, use the updated
        # result_data to check if requirements were met.
        if run_data["returncode"] == 0:
            success = check_requirements_update_results(result_data, entry.requirements)
        else:
            success = False
            logger.error(f"\t\t❌ Run Failed in {duration:.1f} seconds")
            if run_data["timed_out"]:
                logger.warning(f"\t\t⏰ Timed out after {entry.timeout_s}s")

        result_data["success"] = success
        logger.info(f"\t\tLogs found in {logs_path}")
        Path(session_entry_path / "results.json").write_text(json.dumps(get_obj_for_json(result_data)))

        return success

    finally:
        teardown_ray_cluster_and_env(ray_client, ray_temp_dir, ray_cluster_path)

        # Clean up the scratch dir if configured to delete
        if entry.delete_scratch:
            shutil.rmtree(scratch_path, ignore_errors=True)


def main() -> int:
    parser = argparse.ArgumentParser(description="Runs the benchmarking application")
    parser.add_argument(
        "--config",
        type=Path,
        action="append",
        required=True,
        help=(
            "Path to YAML config for benchmark matrix, machine paths, etc. Can be "
            "specified multiple times to merge configs."
        ),
    )
    parser.add_argument(
        "--session-name",
        default=None,
        help=("Optional human-readable session name. Default is benchmark-run__<timestamp>."),
    )
    args = parser.parse_args()

    # Consolidate the configuration from all YAML files into a single dict
    config_dict = {}
    for yml_file in args.config:
        with open(yml_file) as f:
            config_dicts = yaml.full_load_all(f)
            for d in config_dicts:
                config_dict.update(d)
    # Preprocess the config dict prior to creating objects from it
    try:
        Session.assert_valid_config_dict(config_dict)
        config_dict = resolve_env_vars(config_dict)
    except ValueError as e:
        logger.error(f"Invalid configuration: {e}")
        return 1

    session = Session.create_from_dict(config_dict)

    # Create session folder under results_dir
    session_name = args.session_name or time.strftime("benchmark-run__%Y-%m-%d__%H-%M-%S")
    session_path = (session.results_path / session_name).absolute()
    ensure_dir(session_path)

    session_overall_success = True
    logger.info(f"Started session {session_name}...")
    env_dict = dump_env(session_obj=session, output_path=session_path)

    for sink in session.sinks:
        sink.initialize(session_name=session_name, matrix_config=session, env_dict=env_dict)

    # Print a summary of the entries that will be run in the for loop below
    # Disabled entries will not be printed
    # TODO: should entries be created unconditionally and have an "enabled" field instead?
    logger.info("Benchmark entries to be run in this session:")
    for idx, entry in enumerate(session.entries, start=1):
        logger.info(f"\t{idx}. {entry.name}")

    for entry in session.entries:
        run_success = False
        run_id = f"{entry.name}-{int(time.time())}"
        result_data = {
            "name": entry.name,
            "run_id": run_id,
            "success": run_success,
        }
        logger.info(f"🚀 Running {entry.name} (run ID: {run_id})")
        try:
            run_success = run_entry(
                entry=entry,
                path_resolver=session.path_resolver,
                dataset_resolver=session.dataset_resolver,
                session_path=session_path,
                result_data=result_data,
            )

        except Exception as e:  # noqa: BLE001
            run_success = False
            error_traceback = traceback.format_exc()
            logger.error(f"\t\t❌ Entry failed with exception: {e}")
            logger.debug(f"Full traceback:\n{error_traceback}")
            result_data.update(
                {
                    "error": str(e),
                    "traceback": error_traceback,
                    "success": run_success,
                }
            )

        finally:
            session_overall_success &= run_success
            for sink in session.sinks:
                sink.process_result(result_dict=result_data, matrix_entry=entry)

    for sink in session.sinks:
        sink.finalize()
    logger.info(f"Session {session_name} completed with overall success: {session_overall_success}")
    return 0 if session_overall_success else 1


if __name__ == "__main__":
    raise SystemExit(main())
