"""Setup script for accelerator-benchmark package"""

from setuptools import setup, find_packages
from pathlib import Path

# Read the contents of README file
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text(encoding="utf-8")

setup(
    name="accelerator-benchmark",
    version="0.1.0",
    author="Accelerator Benchmark Team",
    description="Lightweight Inference Benchmark Framework (ONNX-origin, Backend-agnostic)",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://gitee.com/ding-jiachao/accelerator-benchmark",
    packages=find_packages(where="src"),
    package_dir={"": "src"},
    python_requires=">=3.10",
    # 框架核心依赖（所有用户都需要）
    install_requires=[
        "onnx>=1.15.0",  # 用于模型元数据读取
        "numpy>=1.24.0",
        "pandas>=2.0.0",
        "psutil>=5.9.0",
        "rich>=13.0.0",
        "typer>=0.12.0",
        "huggingface_hub>=0.20.0",
    ],
    extras_require={
        # ========================================================================
        # 参考实现依赖（可选）
        # 这些依赖仅用于框架内置的 ONNX Runtime adapter 和功耗监控后端
        # 硬件厂商实现自己的 adapter 时不需要安装这些
        # ========================================================================
        
        # CPU 版本 ONNX Runtime（参考实现）
        "cpu": [
            "onnxruntime>=1.18.0",
        ],
        # NVIDIA GPU 版本 ONNX Runtime + NVML 功耗监控（参考实现）
        "gpu": [
            "onnxruntime-gpu>=1.18.0",
            "nvidia-cuda-runtime-cu12>=12.6.0",
            "nvidia-cudnn-cu12>=9.0.0",
            "nvidia-ml-py>=12.535.133",  # NVML PowerBackend
        ],
        "gpu-nvidia": [
            "onnxruntime-gpu>=1.18.0",
            "nvidia-cuda-runtime-cu12>=12.6.0",
            "nvidia-cudnn-cu12>=9.0.0",
            "nvidia-ml-py>=12.535.133",
        ],
        # AMD GPU（支持有限，建议硬件厂商实现自己的 adapter）
        "gpu-amd": [
            "onnxruntime>=1.18.0",  # 目前没有官方 ROCm 版本
        ],
        # ========================================================================
        # 开发工具（可选）
        # ========================================================================
        "dev": [
            "pytest>=7.0.0",
            "pytest-cov>=4.0.0",
            "pytest-mock>=3.10.0",
            "black>=23.0.0",
            "flake8>=6.0.0",
            "mypy>=1.0.0",
            "isort>=5.12.0",
        ],
        # 完整安装（GPU 参考实现 + 开发工具）
        "all": [
            "onnxruntime-gpu>=1.18.0",
            "nvidia-cuda-runtime-cu12>=12.6.0",
            "nvidia-cudnn-cu12>=9.0.0",
            "nvidia-ml-py>=12.535.133",
            "pytest>=7.0.0",
            "pytest-cov>=4.0.0",
            "black>=23.0.0",
            "matplotlib>=3.7.0",
        ],
    },
    entry_points={
        "console_scripts": [
            "bench=bench.cli:main",
        ],
    },
    classifiers=[
        "Development Status :: 3 - Alpha",
        "Intended Audience :: Developers",
        "Intended Audience :: Science/Research",
        "Topic :: Scientific/Engineering :: Artificial Intelligence",
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python :: 3",
        "Programming Language :: Python :: 3.10",
        "Programming Language :: Python :: 3.11",
        "Programming Language :: Python :: 3.12",
    ],
    keywords="benchmark inference onnx ai accelerator performance",
    project_urls={
        "Documentation": "https://gitee.com/ding-jiachao/accelerator-benchmark/docs",
        "Source": "https://gitee.com/ding-jiachao/accelerator-benchmark",
        "Tracker": "https://gitee.com/ding-jiachao/accelerator-benchmark/issues",
    },
)
