# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import subprocess
from pathlib import Path

import pytest

_ALL_WORKFLOWS = [
    "getting_started",
    "first_agent_attempt",
    "second_agent_attempt",
    "third_agent_attempt",
    "retail_sales_agent",
    "retail_sales_agent_nb3",
    "retail_sales_agent_nb5",
    "retail_sales_agent_nb6",
    "tmp_workflow",
    "mcp_dev_workflow",
    "simple_calculator_notebook"
]

# Other files produced by notebooks, relative to the notebooks directory, please keep this list sorted
_OTHER_FILES = [
    "average_daily_revenue.png",  # bar chart produced by notebooks
    # Descriptions of fictitious electronic devices used in RAG examples in notebooks
    "data/rag/product_catalog.md",
    # Retail sales data for devices described in data/rag/product_catalog.md used in notebooks
    "data/retail_sales_data.csv",
    # The `eval_output` files are output from evaluation in observability_evaluation_and_profiling
    "eval_output/accuracy_output.json",
    "eval_output/groundedness_output.json",
    "eval_output/relevance_output.json",
    "eval_output/trajectory_accuracy_output.json",
    "eval_output/workflow_output.json",
    "langchain_agent.py",  # Example existing agent being incorporated into NAT in bringing_your_own_agent
    "nat_embedded.py",  # Python script generated by getting_started_with_nat, runs the workflow via the Python API
    # The `profile_output` files are output from profiling in observability_evaluation_and_profiling
    "profile_output/all_requests_profiler_traces.json",
    "profile_output/gantt_chart.png",
    "profile_output/inference_optimization.json",
    "profile_output/standardized_data_all.csv",
    "profile_output/workflow_output.json",
    "profile_output/workflow_profiling_metrics.json",
    "profile_output/workflow_profiling_report.txt",
    "revenue_across_stores.png",  # Chart produced
    "sales_trend.png",  # line chart
    "search_agent.yml",  # Workflow generated by bringing_your_own_agent, uses Tavily to perform searches
    # "simple_calculator/",  # Example code directory created by mcp_setup_and_integration
]


@pytest.fixture(name="notebooks_dir", scope='session')
def notebooks_dir_fixture(examples_dir: Path) -> Path:
    return examples_dir / "notebooks"


def _is_installed(package_name: str) -> bool:
    """Check if a package is installed."""
    try:
        subprocess.run(
            ["uv", "pip", "show", "-q", package_name],
            check=True,
            stdout=subprocess.DEVNULL,
            stderr=subprocess.DEVNULL,
        )
        return True
    except subprocess.CalledProcessError:
        return False


def _delete_all_workflows():
    for workflow in _ALL_WORKFLOWS:
        if _is_installed(workflow):
            cmd = ["nat", "workflow", "delete", "--yes", workflow]
            subprocess.run(cmd, check=False)


def _delete_other_files(notebooks_dir: Path):
    for file in _OTHER_FILES:
        file_path = notebooks_dir / file
        if file_path.exists():
            file_path.unlink()


def _cleanup_all(notebooks_dir: Path):
    _delete_all_workflows()
    _delete_other_files(notebooks_dir)


@pytest.fixture(name="workflow_cleanups", scope='function', autouse=True)
def workflow_cleanups_fixture(notebooks_dir: Path):
    _cleanup_all(notebooks_dir)
    yield
    _cleanup_all(notebooks_dir)


def _run_notebook(notebook_path: Path, expected_packages: list[str], timeout_seconds: int = 120):
    """Run a Jupyter notebook and check for errors."""
    cmd = [
        "jupyter",
        "execute",
        f"--timeout={timeout_seconds}",
        "--NbClientApp.skip_cells_with_tag=skip_e2e_test",
        str(notebook_path.absolute()),
    ]

    # Ideally if the notebook times out we want jupyter to catch it and exit gracefully with the most informative error
    # possible. However in the potential situation where jupyter itself hangs, we add a 10s buffer to the timeout.
    result = subprocess.run(cmd, check=False, capture_output=True, text=True, timeout=timeout_seconds + 10)
    assert result.returncode == 0, f"Notebook execution failed:\n{result.stderr}"

    for package in expected_packages:
        assert _is_installed(package), f"Expected package '{package}' is not installed."


@pytest.mark.slow
@pytest.mark.integration
@pytest.mark.usefixtures("nvidia_api_key")
@pytest.mark.parametrize(
    "notebook_file_name, expected_packages, timeout_seconds",
    [
        pytest.param("getting_started_with_nat.ipynb", ["getting_started"], 120, id="getting_started_with_nat"),
        pytest.param("adding_tools_to_agents.ipynb", ["retail_sales_agent_nb3"], 300, id="adding_tools_to_agents"),
        pytest.param("mcp_setup_and_integration.ipynb", ["mcp_dev_workflow", "nat_simple_calculator_notebook"],
                     300,
                     id="mcp_setup_and_integration"),
        pytest.param("multi_agent_orchestration.ipynb", ["retail_sales_agent_nb5"], 120,
                     id="multi_agent_orchestration"),
        pytest.param("observability_evaluation_and_profiling.ipynb", ["retail_sales_agent_nb6"],
                     1000,
                     id="observability_evaluation_and_profiling"),
        pytest.param("optimize_model_selection.ipynb", ["tmp_workflow"], 300, id="optimize_model_selection"),
    ])
def test_notebooks(notebooks_dir: Path, notebook_file_name: str, expected_packages: list[str], timeout_seconds: int):
    _run_notebook(notebooks_dir / notebook_file_name,
                  expected_packages=expected_packages,
                  timeout_seconds=timeout_seconds)


@pytest.mark.slow
@pytest.mark.integration
@pytest.mark.usefixtures("nvidia_api_key", "tavily_api_key")
def test_2_bringing_your_own_agent(notebooks_dir: Path):
    # This test is the same as the others but requires a Tavily API key to run
    _run_notebook(notebooks_dir / "bringing_your_own_agent.ipynb",
                  expected_packages=["first_agent_attempt", "second_agent_attempt", "third_agent_attempt"])
