# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html

import datetime
import importlib.util
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import subprocess
import sys

import pygit2
from docutils import nodes

sys.path.insert(0, os.path.abspath('.'))

project = 'TensorRT LLM'
copyright = '2025, NVidia'
author = 'NVidia'
html_show_sphinx = False

# Get the git commit hash
repo = pygit2.Repository('.')
commit_hash = str(repo.head.target)[:7]  # Get first 7 characters of commit hash

# Get current date
last_updated = datetime.datetime.now(
    datetime.timezone.utc).strftime("%B %d, %Y")

# Get the version from the version.py file
version_path = os.path.abspath(
    os.path.join(os.path.dirname(__file__), "../../tensorrt_llm/version.py"))
spec = importlib.util.spec_from_file_location("version_module", version_path)
version_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(version_module)
version = version_module.__version__

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

templates_path = ['_templates']
exclude_patterns = ['performance/performance-tuning-guide/introduction.md']

extensions = [
    'sphinx.ext.duration',
    'sphinx.ext.autodoc',
    'sphinx.ext.autosummary',
    'sphinx.ext.viewcode',
    'sphinx.ext.napoleon',
    'sphinx.ext.mathjax',
    'myst_parser',  # for markdown support
    "breathe",
    'sphinx.ext.todo',
    'sphinx.ext.autosectionlabel',
    'sphinxarg.ext',
    'sphinx_click',
    'sphinx_copybutton',
    'sphinxcontrib.autodoc_pydantic',
    'sphinx_togglebutton',
]

autodoc_member_order = 'bysource'
autodoc_pydantic_model_show_json = True
autodoc_pydantic_model_show_config_summary = True
autodoc_pydantic_field_doc_policy = "description"
autodoc_pydantic_model_show_field_list = True  # Display field list with descriptions
autodoc_pydantic_model_member_order = "groupwise"
autodoc_pydantic_model_hide_pydantic_methods = True
autodoc_pydantic_field_list_validators = False
autodoc_pydantic_settings_signature_prefix = ""  # remove any prefix
autodoc_pydantic_settings_hide_reused_validator = True  # hide all the validator should be better

myst_url_schemes = {
    "http":
    None,
    "https":
    None,
    "source":
    "https://github.com/NVIDIA/TensorRT-LLM/tree/" + commit_hash + "/{{path}}",
}

myst_heading_anchors = 4

myst_enable_extensions = [
    "deflist",
    "substitution",
    "dollarmath",
    "amsmath",
]

myst_substitutions = {
    "version":
    version,
    "version_quote":
    f"`{version}`",
    "container_tag_admonition":
    r"""
```{admonition} Container image tags
:class: dropdown note
In the example shell commands, `x.y.z` corresponds to the TensorRT-LLM container
version to use. If omitted, `IMAGE_TAG` will default to `tensorrt_llm.__version__`
(e.g., this documentation was generated from the {{version_quote}} source tree).
If this does not work, e.g., because a container for the version you are
currently working with has not been released yet, you can try using a
container published for a previous
[GitHub pre-release or release](https://github.com/NVIDIA/TensorRT-LLM/releases)
(see also [NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/tensorrt-llm/containers/release/tags)).
```
    """
}

autosummary_generate = True
copybutton_exclude = '.linenos, .gp, .go'
copybutton_prompt_text = ">>> |$ |# "
copybutton_line_continuation_character = "\\"

# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output

source_suffix = {
    '.rst': 'restructuredtext',
    '.txt': 'markdown',
    '.md': 'markdown',
    '.json': 'json',
}

html_theme = 'nvidia_sphinx_theme'
html_static_path = ['_static']
html_theme_options = {
    "switcher": {
        "json_url": "./_static/switcher.json",
        "version_match": version,
        "check_switcher": True,
    },
    "extra_footer": [
        f'<p>Last updated on {last_updated}.</p>',
        f'<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/{commit_hash}">{commit_hash}</a>.</p>'
    ]
}

# ------------------------  C++ Doc related  --------------------------
# Breathe configuration
breathe_default_project = "TensorRT-LLM"
breathe_projects = {"TensorRT-LLM": "../cpp_docs/xml"}

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

CPP_INCLUDE_DIR = os.path.join(SCRIPT_DIR, '../../cpp/include/tensorrt_llm')
CPP_GEN_DIR = os.path.join(SCRIPT_DIR, '_cpp_gen')
print('CPP_INCLUDE_DIR', CPP_INCLUDE_DIR)
print('CPP_GEN_DIR', CPP_GEN_DIR)

html_css_files = [
    'custom.css',
]


def tag_role(name, rawtext, text, lineno, inliner, options=None, content=None):
    """A custom role for displaying tags."""
    options = options or {}
    content = content or []
    tag_name = text.lower()
    node = nodes.literal(text, text, classes=['tag', tag_name])
    return [node], []


def setup(app):
    from helper import generate_examples, generate_llmapi, update_version

    try:
        from tensorrt_llm.llmapi.utils import tag_llm_params
        tag_llm_params()
    except ImportError:
        print("Warning: tensorrt_llm not available, skipping tag_llm_params")

    app.add_role('tag', tag_role)

    generate_examples()
    generate_llmapi()
    update_version()


def gen_cpp_doc(ofile_name: str, header_dir: str, summary: str):
    cpp_header_files = [
        file for file in os.listdir(header_dir) if file.endswith('.h')
    ]

    with open(ofile_name, 'w') as ofile:
        ofile.write(summary + "\n")
        for header in cpp_header_files:
            ofile.write(f"{header}\n")
            ofile.write("_" * len(header) + "\n\n")

            ofile.write(f".. doxygenfile:: {header}\n")
            ofile.write("   :project: TensorRT-LLM\n\n")


runtime_summary = f"""
Runtime
==========

.. Here are files in the cpp/include/runtime
.. We manually add subsection to enable detailed description in the future
.. It is also doable to automatically generate this file and list all the modules in the conf.py
    """.strip()

# compile cpp doc
subprocess.run(['mkdir', '-p', CPP_GEN_DIR])
gen_cpp_doc(CPP_GEN_DIR + '/runtime.rst', CPP_INCLUDE_DIR + '/runtime',
            runtime_summary)

executor_summary = f"""
Executor
==========

.. Here are files in the cpp/include/executor
.. We manually add subsection to enable detailed description in the future
.. It is also doable to automatically generate this file and list all the modules in the conf.py
    """.strip()

subprocess.run(['mkdir', '-p', CPP_GEN_DIR])
gen_cpp_doc(CPP_GEN_DIR + '/executor.rst', CPP_INCLUDE_DIR + '/executor',
            executor_summary)
