import json
import os
import time
from pathlib import Path
from typing import Optional

import typer

from .batch_runner import run_multi_model_benchmark
from .utils.logging import console
from .models.registry import get_model_spec, list_models
from .adapters import list_adapters
from .metrics import list_power_backends

# 初始化Typer命令行应用
app = typer.Typer(add_completion=False, no_args_is_help=True)

@app.command()
def env():
    """打印详细的环境溯源信息（硬件、驱动、系统配置、可用后端等）"""
    import platform
    import sys
    
    console.print("[bold cyan]Environment Information[/bold cyan]")
    console.print("=" * 80)
    
    # ===== 系统信息 =====
    console.print("\n[bold]System Information:[/bold]")
    system_info = {
        "os": platform.system(),
        "os_version": platform.version(),
        "platform": platform.platform(),
        "architecture": platform.machine(),
        "processor": platform.processor() or "N/A",
        "python_version": platform.python_version(),
        "python_implementation": platform.python_implementation(),
    }
    for key, value in system_info.items():
        console.print(f"  {key:20s}: {value}")
    
    # ===== CPU 信息 =====
    console.print("\n[bold]CPU Information:[/bold]")
    try:
        import psutil
        cpu_count_physical = psutil.cpu_count(logical=False)
        cpu_count_logical = psutil.cpu_count(logical=True)
        cpu_freq = psutil.cpu_freq()
        console.print(f"  {'physical_cores':20s}: {cpu_count_physical}")
        console.print(f"  {'logical_cores':20s}: {cpu_count_logical}")
        if cpu_freq:
            console.print(f"  {'cpu_freq_current':20s}: {cpu_freq.current:.2f} MHz")
            console.print(f"  {'cpu_freq_max':20s}: {cpu_freq.max:.2f} MHz")
    except ImportError:
        console.print("  [yellow]psutil not installed, CPU details unavailable[/yellow]")
    
    # ===== GPU 信息 =====
    console.print("\n[bold]GPU Information:[/bold]")
    gpu_detected = False
    
    # NVIDIA GPU
    try:
        import pynvml
        pynvml.nvmlInit()
        device_count = pynvml.nvmlDeviceGetCount()
        console.print(f"  [green]NVIDIA GPU Detected: {device_count} device(s)[/green]")
        for i in range(device_count):
            handle = pynvml.nvmlDeviceGetHandleByIndex(i)
            name = pynvml.nvmlDeviceGetName(handle)
            driver_version = pynvml.nvmlSystemGetDriverVersion()
            cuda_version = pynvml.nvmlSystemGetCudaDriverVersion()
            memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            console.print(f"    GPU {i}:")
            console.print(f"      name           : {name}")
            console.print(f"      memory_total   : {memory_info.total / 1024**3:.2f} GB")
            console.print(f"      driver_version : {driver_version}")
            console.print(f"      cuda_version   : {cuda_version // 1000}.{(cuda_version % 1000) // 10}")
        pynvml.nvmlShutdown()
        gpu_detected = True
    except Exception as e:
        console.print(f"  [dim]NVIDIA GPU: Not available ({type(e).__name__})[/dim]")
    
    # AMD GPU
    try:
        import subprocess
        result = subprocess.run(['rocm-smi', '--showproductname'], 
                              capture_output=True, text=True, timeout=5)
        if result.returncode == 0:
            console.print(f"  [green]AMD GPU Detected[/green]")
            console.print(f"    {result.stdout.strip()}")
            gpu_detected = True
    except Exception:
        console.print(f"  [dim]AMD GPU: Not available[/dim]")
    
    if not gpu_detected:
        console.print("  [yellow]No GPU detected or drivers not installed[/yellow]")
    
    # ===== 可用的 Adapter 后端 =====
    console.print("\n[bold]Available Adapters:[/bold]")
    all_adapters = list_adapters(include_unavailable=True)
    available_adapters = list_adapters(include_unavailable=False)
    
    if not all_adapters:
        console.print("  [yellow]No adapters registered[/yellow]")
    else:
        for adapter_backend in all_adapters:
            is_available = adapter_backend in available_adapters
            status_icon = "✓" if is_available else "✗"
            status_color = "green" if is_available else "red"
            status_text = "available" if is_available else "unavailable"
            console.print(f"  [{status_color}]{status_icon}[/{status_color}] {adapter_backend:20s} ({status_text})")
    
    # ===== 可用的功耗后端 =====
    console.print("\n[bold]Available Power Backends:[/bold]")
    all_backends = list_power_backends(include_unavailable=True)
    available_backends = list_power_backends(include_unavailable=False)
    
    if not all_backends:
        console.print("  [yellow]No power backends registered[/yellow]")
    else:
        for backend_name in all_backends:
            is_available = backend_name in available_backends
            status_icon = "✓" if is_available else "✗"
            status_color = "green" if is_available else "red"
            status_text = "available" if is_available else "unavailable"
            console.print(f"  [{status_color}]{status_icon}[/{status_color}] {backend_name:20s} ({status_text})")
    
    console.print("\n" + "=" * 80)


@app.command()
def list():
    """列出所有已注册模型。"""
    models = list_models()
    console.print(models)

@app.command()
def inspect(model: str = typer.Argument(..., help="Model name in registry or path")):
    """从注册表中检查模型输入输出规范。"""
    spec = get_model_spec(model)    # 获取模型规范信息
    console.print(spec)             # 打印模型规范


@app.command()
def run(
    models: str = typer.Option(None, help="comma-separated model names or 'all'"),
    inputs: int = typer.Option(None, help="number of inputs (overrides config file if specified)"),
    warmup: int = typer.Option(None, help="number of warmup runs (overrides config file if specified)"),
    config: Optional[Path] = typer.Option("default", help="path to benchmark config file (JSON) for per-model settings, or 'default' to use built-in config, 'none' to disable"),
    report: Optional[Path] = typer.Option(None, help="output JSON report path"),
    no_power: bool = typer.Option(False, help="disable power sampling"),
    adapter_backend: Optional[str] = typer.Option(None, help="adapter backend name (use 'list' to see available, 'auto' or None for auto-detection)"),
    power_backend: Optional[str] = typer.Option(None, help="power backend name (use 'list' to see available, 'auto' or None for auto-detection)"),
    device_id: int = typer.Option(0, min=0, help="device ID for power monitoring (0 for first device, 1 for second, etc.)"),
):
    """运行基准测试（多进程模式，支持单模型或多模型）
    
    配置优先级: CLI参数 > 配置文件 > 默认值(per-model优化配置)
    默认使用内置的per-model配置文件，可通过 --config none 禁用
    """
    
    # 处理 adapter_backend 参数：如果是 'list'，则显示所有可用适配器
    if adapter_backend and adapter_backend.lower() == "list":
        console.print("[bold cyan]Available Adapter Backends:[/bold cyan]")
        all_adapters = list_adapters(include_unavailable=True)
        available_adapters = list_adapters(include_unavailable=False)
        
        if not all_adapters:
            console.print("  [yellow]No adapter backends registered[/yellow]")
        else:
            for name in all_adapters:
                is_available = name in available_adapters
                status_icon = "✓" if is_available else "✗"
                status_color = "green" if is_available else "red"
                status_text = "available" if is_available else "unavailable"
                console.print(f"  [{status_color}]{status_icon}[/{status_color}] {name:20s} ({status_text})")
        raise typer.Exit()
    
    # 处理 power_backend 参数：如果是 'list'，则显示所有可用后端
    if power_backend and power_backend.lower() == "list":
        console.print("[bold cyan]Available Power Backends:[/bold cyan]")
        all_backends = list_power_backends(include_unavailable=True)
        available_backends = list_power_backends(include_unavailable=False)
        
        if not all_backends:
            console.print("  [yellow]No power backends registered[/yellow]")
        else:
            for name in all_backends:
                is_available = name in available_backends
                status_icon = "✓" if is_available else "✗"
                status_color = "green" if is_available else "red"
                status_text = "available" if is_available else "unavailable"
                console.print(f"  [{status_color}]{status_icon}[/{status_color}] {name:20s} ({status_text})")
        raise typer.Exit()
    
    # 解析模型列表
    if models == "all":
        model_list = list_models()
    else:
        model_list = [m.strip() for m in models.split(",")]
    
    if not model_list:
        raise typer.BadParameter("No models specified")
    
    # 处理适配器后端参数（None 或 'auto' 表示自动探测）
    adapter_backend_name = None if not adapter_backend or adapter_backend.lower() == "auto" else adapter_backend
    
    # 处理功耗后端参数（None 或 'auto' 表示自动探测）
    power_backend_name = None if no_power else (None if not power_backend or power_backend.lower() == "auto" else power_backend)
    
    # 处理配置文件参数
    model_configs = {}
    config_source = "none"
    
    # 如果用户明确指定 'none'，则不使用配置文件
    if config and str(config).lower() == "none":
        model_configs = {}
        config_source = "disabled"
    # 如果是 'default' 或未指定，使用内置配置文件
    elif not config or str(config).lower() == "default":
        # 使用内置配置文件
        default_config_path = Path(__file__).parent / "models" / "benchmark_config.json"
        if default_config_path.exists():
            try:
                with open(default_config_path, "r", encoding="utf-8") as f:
                    config_data = json.load(f)
                # 过滤掉以_开头的注释字段
                model_configs = {k: v for k, v in config_data.items() if not k.startswith("_")}
                config_source = "default"
                console.print(f"[green]Using default per-model config[/green]")
            except Exception as e:
                console.print(f"[yellow]Warning: Could not load default config: {e}[/yellow]")
                console.print(f"[yellow]Falling back to uniform config (inputs=100, warmup=10)[/yellow]")
        else:
            console.print(f"[yellow]Warning: Default config not found at {default_config_path}[/yellow]")
            console.print(f"[yellow]Using uniform config (inputs=100, warmup=10)[/yellow]")
    # 如果指定了自定义配置文件路径
    else:
        config_path = Path(config)
        if not config_path.exists():
            console.print(f"[red]Error: Config file not found: {config_path}[/red]")
            raise typer.Exit(code=1)
        try:
            with open(config_path, "r", encoding="utf-8") as f:
                config_data = json.load(f)
            # 过滤掉以_开头的注释字段
            model_configs = {k: v for k, v in config_data.items() if not k.startswith("_")}
            config_source = "custom"
            console.print(f"[green]Loaded config from: {config_path}[/green]")
        except Exception as e:
            console.print(f"[red]Error loading config file: {e}[/red]")
            raise typer.Exit(code=1)
    
    console.print(f"[bold green]Running multi-model benchmark[/bold green]")
    adapter_display = adapter_backend_name if adapter_backend_name else "auto (auto-detect)"
    console.print(f"Adapter backend: {adapter_display}")
    console.print(f"Models: {', '.join(model_list)}")
    
    # 显示配置信息
    if inputs is not None or warmup is not None:
        inputs_display = inputs if inputs is not None else "from config"
        warmup_display = warmup if warmup is not None else "from config"
        console.print(f"[cyan]Config:[/cyan] CLI override (inputs={inputs_display}, warmup={warmup_display})")
    elif model_configs:
        if config_source == "default":
            console.print(f"[cyan]Config:[/cyan] Default per-model config (see each model for details)")
        elif config_source == "custom":
            console.print(f"[cyan]Config:[/cyan] Custom per-model config from {config}")
    else:
        console.print(f"[cyan]Config:[/cyan] Uniform fallback (inputs=100, warmup=10)")
    
    console.print(f"Power sampling: {not no_power}")
    if not no_power:
        backend_display = power_backend_name if power_backend_name else "auto (auto-detect)"
        console.print(f"Power backend: {backend_display}")
        console.print(f"Device ID: {device_id}")
    console.print("─" * 60)
    
    # 启动多进程测试
    results = run_multi_model_benchmark(
        model_names=model_list,
        adapter_backend=adapter_backend_name,
        num_inputs=inputs,
        warmup=warmup,
        model_configs=model_configs,
        enable_power=not no_power,
        device_id=device_id,
        power_backend=power_backend_name
    )
    
    # 保存或显示报告
    if report:
        report.parent.mkdir(parents=True, exist_ok=True)
        with open(report, "w", encoding="utf-8") as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        console.print(f"\n[green]Report saved to {report}[/green]")
    else:
        console.print("\n[bold]Full Report:[/bold]")
        console.print_json(data=results)

@app.command()
def visualize(
    port: int = typer.Option(8080, "--port", help="Port to run visualization server on"),
    host: str = typer.Option("127.0.0.1", "--host", help="Host to bind server to"),
    results: str = typer.Option("results", "--results", help="Results directory containing JSON reports"),
    debug: bool = typer.Option(False, "--debug", help="Enable debug mode")
):
    """
    启动 Web 可视化服务器，用于对比分析多个硬件平台的基准测试结果
    
    示例:
        bench visualize --port 8080 --results results/
        bench visualize --host 0.0.0.0 --port 5000
    """
    try:
        from .visualize.app import init_app, app as flask_app
        
        console.print("\n[bold cyan]Starting Benchmark Visualization Server[/bold cyan]")
        console.print("=" * 80)
        
        # Initialize the Flask app
        init_app(results)
        
        console.print(f"[green]✓[/green] Server running at: [bold]http://{host}:{port}[/bold]")
        console.print(f"[green]✓[/green] Results directory: [bold]{results}[/bold]")
        console.print("[dim]Press Ctrl+C to stop the server[/dim]")
        console.print("=" * 80 + "\n")
        
        # Run Flask server
        flask_app.run(host=host, port=port, debug=debug)
        
    except ImportError as e:
        console.print(f"[red]Error: Flask not installed. Please install it:[/red]")
        console.print(f"  pip install flask")
        console.print(f"\n[dim]Details: {e}[/dim]")
        raise typer.Exit(1)
    except Exception as e:
        console.print(f"[red]Failed to start visualization server: {e}[/red]")
        raise typer.Exit(1)

def main():
    app()   # 启动作为命令行程序

if __name__ == "__main__":
    main()
