"""报告生成和统计模块"""

from __future__ import annotations

import platform
import sys
from datetime import datetime
from typing import Dict, Any, Optional

from .utils.logging import console


def collect_env_info(adapter_backend: Optional[str] = None, 
                     power_backend: Optional[str] = None,
                     device_id: int = 0) -> Dict[str, Any]:
    """
    收集环境信息（复用 env 命令的逻辑）
    
    Args:
        adapter_backend: 适配器后端名称
        power_backend: 功耗后端名称
        device_id: 设备ID
    
    Returns:
        环境信息字典
    """
    env_info = {
        "system": {
            "os": platform.system(),
            "os_version": platform.version(),
            "platform": platform.platform(),
            "architecture": platform.machine(),
            "processor": platform.processor() or "N/A",
            "python_version": platform.python_version(),
            "python_implementation": platform.python_implementation(),
        },
        "adapter": {
            "backend": adapter_backend or "auto",
        },
        "power": {
            "backend": power_backend or "auto" if power_backend is not None else None,
            "device_id": device_id,
        }
    }
    
    # CPU 信息
    try:
        import psutil
        env_info["cpu"] = {
            "physical_cores": psutil.cpu_count(logical=False),
            "logical_cores": psutil.cpu_count(logical=True),
        }
        cpu_freq = psutil.cpu_freq()
        if cpu_freq:
            env_info["cpu"]["freq_current_mhz"] = round(cpu_freq.current, 2)
            env_info["cpu"]["freq_max_mhz"] = round(cpu_freq.max, 2)
    except ImportError:
        env_info["cpu"] = {"info": "psutil not available"}
    
    # GPU 信息
    gpu_info = {}
    
    # NVIDIA GPU
    try:
        import pynvml
        pynvml.nvmlInit()
        device_count = pynvml.nvmlDeviceGetCount()
        
        if device_count > 0:
            nvidia_gpus = []
            for i in range(device_count):
                handle = pynvml.nvmlDeviceGetHandleByIndex(i)
                name = pynvml.nvmlDeviceGetName(handle)
                driver_version = pynvml.nvmlSystemGetDriverVersion()
                cuda_version = pynvml.nvmlSystemGetCudaDriverVersion()
                memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                
                nvidia_gpus.append({
                    "id": i,
                    "name": name,
                    "memory_total_gb": round(memory_info.total / 1024**3, 2),
                    "driver_version": driver_version,
                    "cuda_version": f"{cuda_version // 1000}.{(cuda_version % 1000) // 10}",
                })
            
            gpu_info["nvidia"] = {
                "count": device_count,
                "devices": nvidia_gpus
            }
        
        pynvml.nvmlShutdown()
    except Exception:
        gpu_info["nvidia"] = None
    
    # AMD GPU
    try:
        import subprocess
        result = subprocess.run(['rocm-smi', '--showproductname'], 
                              capture_output=True, text=True, timeout=5)
        if result.returncode == 0:
            gpu_info["amd"] = {"info": result.stdout.strip()}
        else:
            gpu_info["amd"] = None
    except Exception:
        gpu_info["amd"] = None
    
    env_info["gpu"] = gpu_info
    
    # 如果提供了 adapter_backend，获取详细的 adapter 信息
    if adapter_backend and adapter_backend != "auto":
        try:
            from .adapters import InferenceRunner
            runner = InferenceRunner(adapter_backend=adapter_backend)
            adapter_metadata = runner.get_metadata()
            env_info["adapter"].update(adapter_metadata)
        except Exception:
            pass
    
    return env_info


def compute_overall_summary(models_results: Dict[str, Any], 
                           duration_s: float) -> Dict[str, Any]:
    """
    计算所有模型的总体统计
    
    Args:
        models_results: 所有模型的测试结果字典
        duration_s: 总测试时长（秒）
    
    Returns:
        总体统计字典
    """
    total_models = len(models_results)
    successful = 0
    failed = 0
    failed_models = []
    qps_list = []
    energy_list = []
    
    for model_name, result in models_results.items():
        if "error" in result:
            failed += 1
            failed_models.append(model_name)
        else:
            successful += 1
            if result.get("qps"):
                qps_list.append(result["qps"])
            if result.get("energy_J"):
                energy_list.append(result["energy_J"])
    
    summary = {
        "total_models": total_models,
        "successful_models": successful,
        "failed_models": failed,
        "total_duration_s": round(duration_s, 2),
    }
    
    if failed_models:
        summary["failed_model_names"] = failed_models
    
    if qps_list:
        summary["qps"] = {
            "avg": round(sum(qps_list) / len(qps_list), 2),
            "min": round(min(qps_list), 2),
            "max": round(max(qps_list), 2),
        }
    
    if energy_list:
        summary["total_energy_J"] = round(sum(energy_list), 2)
        summary["avg_energy_J"] = round(sum(energy_list) / len(energy_list), 2)
    
    return summary


def enhance_results(results: Dict[str, Any], 
                    adapter_backend: Optional[str] = None,
                    power_backend: Optional[str] = None,
                    device_id: int = 0,
                    sample_rate_hz: float = 50.0,
                    start_time: Optional[float] = None,
                    end_time: Optional[float] = None,
                    model_configs: Optional[Dict[str, Dict]] = None) -> Dict[str, Any]:
    """
    增强测试结果，添加环境信息、运行配置和总体总结
    
    注意：从v1.1开始，warmup和inputs配置现在存储在每个模型的结果中
    
    Args:
        results: 原始测试结果（只包含 models 字段）
        adapter_backend: 适配器后端名称
        power_backend: 功耗后端名称
        device_id: 设备ID
        sample_rate_hz: 采样频率
        start_time: 开始时间戳
        end_time: 结束时间戳
        model_configs: Per-model配置字典（可选，用于run字段记录）
    
    Returns:
        增强后的完整结果字典
    """
    # 计算总时长
    if start_time and end_time:
        duration_s = end_time - start_time
    else:
        duration_s = 0.0
    
    # 构建run配置说明（如果使用了per-model配置）
    run_config = {
        "power_sampling": power_backend is not None,
        "device_id": device_id,
        "sample_rate_hz": sample_rate_hz,
        "adapter_backend": adapter_backend or "auto",
        "power_backend": power_backend or "auto" if power_backend is not None else None,
        "timestamp": datetime.now().isoformat(),
    }
    
    # 如果使用了per-model配置，添加说明
    if model_configs:
        run_config["config_note"] = "Per-model inputs/warmup stored in each model's config field"
        run_config["config_source"] = "per_model"
    else:
        # 向后兼容：从第一个模型获取配置（如果所有模型相同）
        models = results.get("models", {})
        if models:
            first_model = next(iter(models.values()))
            if "config" in first_model:
                run_config["warmup"] = first_model["config"]["warmup"]
                run_config["inputs"] = first_model["config"]["inputs"]
                run_config["config_source"] = "uniform"
    
    # 构建完整结果
    enhanced = {
        "env": collect_env_info(adapter_backend, power_backend, device_id),
        "run": run_config,
        "models": results.get("models", {}),
        "summary": compute_overall_summary(results.get("models", {}), duration_s)
    }
    
    return enhanced


def print_batch_metrics(batch_result: Dict[str, Any]) -> None:
    """
    打印单个批次的性能指标
    
    Args:
        batch_result: 批次测试结果字典
    """
    print(f"    QPS: {batch_result.get('qps', 0):.2f}")
    print(f"    Latency (avg): {batch_result.get('lat_avg_ms', 0):.2f} ms")
    print(f"    Latency (p99): {batch_result.get('p99_ms', 0):.2f} ms")
    
    if batch_result.get('power_avg_W'):
        print(f"    Power (avg): {batch_result['power_avg_W']:.2f} W")
        print(f"    Energy: {batch_result.get('energy_J', 0):.2f} J")


def print_model_summary(model_name: str, result: Dict[str, Any]) -> None:
    """
    打印模型测试结果摘要
    
    Args:
        model_name: 模型名称
        result: 模型测试结果字典
    """
    if "error" in result:
        return
    
    print(f"\n  Model Summary:")
    print(f"    QPS: {result.get('qps', 0):.2f}")
    print(f"    Batch Size: {result.get('batch_size', 'N/A')}")


def print_overall_summary(summary: Dict[str, Any]) -> None:
    """
    打印总体测试摘要
    
    Args:
        summary: 总体统计字典（来自 compute_overall_summary）
    """
    if not summary:
        return
    
    print(f"\n{'='*60}")
    print("OVERALL SUMMARY")
    print(f"{'='*60}")
    print(f"Total Models: {summary['total_models']}")
    print(f"  Successful: {summary['successful_models']}")
    print(f"  Failed: {summary['failed_models']}")
    
    if summary.get('failed_model_names'):
        print(f"  Failed Models: {', '.join(summary['failed_model_names'])}")
    
    print(f"Total Duration: {summary['total_duration_s']:.2f} s")
    
    if "qps" in summary:
        print(f"\nQPS Statistics:")
        print(f"  Average: {summary['qps']['avg']:.2f}")
        print(f"  Range: [{summary['qps']['min']:.2f}, {summary['qps']['max']:.2f}]")
    
    if "total_energy_J" in summary:
        print(f"\nEnergy Statistics:")
        print(f"  Total: {summary['total_energy_J']:.2f} J")
        print(f"  Average per model: {summary['avg_energy_J']:.2f} J")
    
    print(f"{'='*60}")


def print_summary(all_results: Dict[str, Any]) -> None:
    """
    打印完整测试结果摘要（用于 CLI 输出）
    
    Args:
        all_results: 所有模型的测试结果字典
    """
    console.rule("Benchmark Summary")
    console.print_json(data=all_results)
