"""多进程批量基准测试协调模块"""

from __future__ import annotations

import multiprocessing
import time
from typing import Dict, Any, List, Optional
from pathlib import Path

# 强制使用 spawn 方法避免 CUDA 上下文冲突
try:
    multiprocessing.set_start_method('spawn', force=True)
except RuntimeError:
    # 如果已经设置过，忽略
    pass

from .processes import monitor_worker, data_handler_worker, infer_worker
from .models.registry import get_model_spec
from .reporter import print_batch_metrics, enhance_results, print_overall_summary


def run_multi_model_benchmark(
    model_names: List[str],
    adapter_backend:  Optional[str] = None,
    num_inputs: Optional[int] = None,
    warmup: Optional[int] = None,
    model_configs: Optional[Dict[str, Dict]] = None,
    enable_power: bool = True,
    device_id: int = 0,
    sample_rate_hz: float = 50.0,
    power_backend: Optional[str] = None
) -> Dict[str, Any]:
    """
    运行多模型串行基准测试（多进程模式）
    
    架构：
    - 监控进程和数据处理进程持续运行，复用于所有模型
    - 每个模型创建独立推理进程，测试完成后销毁（强制释放CUDA/显存）
    - 使用静态batch模型，batch size从模型规格中获取
    
    配置优先级:
    - CLI参数 (num_inputs, warmup) 如果指定，覆盖所有模型
    - 配置文件 (model_configs) 为每个模型单独配置
    - 默认值: inputs=100, warmup=10
    
    Args:
        model_names: 模型名称列表
        adapter_backend: 适配器后端名称
        num_inputs: 推理轮数（None表示使用配置文件或默认值）
        warmup: 预热轮数（None表示使用配置文件或默认值）
        model_configs: 每个模型的配置字典 {model_name: {inputs: int, warmup: int}}
        enable_power: 是否启用功耗采样
        device_id: 设备ID
        sample_rate_hz: 功耗采样频率
        power_backend: 功耗后端名称（None表示自动检测）
        
    Returns:
        所有模型的测试结果字典
    """
    # 记录开始时间
    start_time = time.time()
    
    # 使用上下文管理器确保 Manager 正确清理
    # 注意：整个测试过程必须在 with 块内，确保 Manager 保持活动状态
    with multiprocessing.Manager() as manager:
        # 根据是否启用功耗采样决定 barrier 大小
        # 如果启用功耗采样：3个进程（监控、数据处理、推理）
        # 如果未启用：2个进程（数据处理、推理）
        barrier_size = 3 if enable_power else 2
        barrier = manager.Barrier(barrier_size)
        
        # 事件信号列表
        # 如果启用功耗采样，需要4个事件（包括 power_data_ready_event）
        # 如果未启用，只需要3个事件
        num_events = 4 if enable_power else 3
        event_list = [manager.Event() for _ in range(num_events)]
        # event_list[0]: model_infer_stop_event - 推理完成信号
        # event_list[1]: data_copy_completed_event - 数据复制完成信号  
        # event_list[2]: monitor_stop_event - 全局退出信号
        # event_list[3]: power_data_ready_event - 功耗数据就绪信号（仅当 enable_power=True 时存在）
        
        # 共享数据列表
        power_samples = manager.list()  # 功耗采样数据
        latency_samples = manager.list()  # 延迟数据
        
        # 结果和元数据队列
        result_queue = manager.Queue()
        metadata_queue = manager.Queue()
        
        # 启动监控进程（持续运行）
        monitor_process = None
        if enable_power:
            monitor_process = multiprocessing.Process(
                target=monitor_worker,
                args=(barrier, event_list, power_samples, device_id, sample_rate_hz, power_backend),
                daemon=False
            )
            monitor_process.start()
        
        # 启动数据处理进程（持续运行）
        data_handler_process = multiprocessing.Process(
            target=data_handler_worker,
            args=(barrier, event_list, latency_samples, power_samples, result_queue, metadata_queue),
            daemon=False
        )
        data_handler_process.start()
        
        # 存储所有结果
        all_results = {
            "models": {}
        }
        
        # 遍历所有模型
        for model_name in model_names:
            print(f"\n{'='*60}")
            print(f"Testing model: {model_name}")
            print(f"{'='*60}")
            
            # 获取模型规格
            model_spec = get_model_spec(model_name)
            
            # 获取当前模型的inputs和warmup配置（优先级：CLI > 配置文件 > 默认值）
            model_num_inputs = num_inputs  # CLI参数（如果指定）
            model_warmup = warmup  # CLI参数（如果指定）
            
            # 如果CLI未指定，尝试从配置文件获取
            if model_num_inputs is None or model_warmup is None:
                model_config = (model_configs or {}).get(model_name, {})
                if model_num_inputs is None:
                    model_num_inputs = model_config.get("inputs", 100)  # 默认100
                if model_warmup is None:
                    model_warmup = model_config.get("warmup", 10)  # 默认10
            
            print(f"  Config: inputs={model_num_inputs}, warmup={model_warmup}")
            
            # 从模型规格中获取静态batch size（第一个输入的第一个维度）
            inputs = model_spec.get("inputs", {})
            if not inputs:
                print(f"  [ERROR] Model spec has no inputs")
                all_results["models"][model_name] = {
                    "name": model_spec.get("name"),
                    "error": "Model spec has no inputs"
                }
                continue
            
            # 获取第一个输入的shape，第一个维度即为batch size
            first_input = next(iter(inputs.values()))
            input_shape = first_input.get("shape", [])
            if not input_shape or len(input_shape) == 0:
                print(f"  [ERROR] Model input shape is invalid")
                all_results["models"][model_name] = {
                    "name": model_spec.get("name"),
                    "error": "Model input shape is invalid"
                }
                continue
            
            batch_size = input_shape[0]
            if not isinstance(batch_size, int) or batch_size < 1:
                print(f"  [ERROR] Invalid batch size from model spec: {batch_size}")
                all_results["models"][model_name] = {
                    "name": model_spec.get("name"),
                    "error": f"Invalid batch size from model spec: {batch_size}"
                }
                continue
            
            print(f"\n  Testing with batch_size={batch_size} (from model spec)...")
            
            # 初始化模型结果结构（包含per-model配置）
            model_results = {
                "name": model_spec.get("name"),
                "source_onnx": model_spec.get("source_onnx"),
                "task": model_spec.get("task"),
                "opset": model_spec.get("opset"),
                "batch_size": batch_size,
                "config": {
                    "inputs": model_num_inputs,
                    "warmup": model_warmup
                }
            }
            
            # 重置事件状态
            event_list[0].clear()
            event_list[1].clear()
            if len(event_list) > 3:
                event_list[3].clear()  # 重置功耗数据就绪信号（如果存在）
            
            # 创建推理进程（临时，测完销毁）
            infer_process = multiprocessing.Process(
                target=infer_worker,
                args=(
                    barrier, event_list, latency_samples, metadata_queue,
                    model_spec, batch_size, model_num_inputs, model_warmup, adapter_backend
                ),
                daemon=False
            )
            
            # 启动推理进程
            infer_process.start()
            
            # 等待推理进程完成（带超时保护）
            INFER_TIMEOUT = 120
            infer_process.join(timeout=INFER_TIMEOUT)
            
            if infer_process.is_alive():
                print(f"  [ERROR] Inference process timeout ({INFER_TIMEOUT}s), terminating...")
                infer_process.terminate()
                time.sleep(2)
                
                if infer_process.is_alive():
                    print(f"  [ERROR] Force killing inference process")
                    infer_process.kill()
                    infer_process.join()
                
                # Mark model as failed
                model_results["error"] = f"Inference timeout after {INFER_TIMEOUT}s"
                all_results["models"][model_name] = model_results
                continue
            
            # 从队列获取处理结果
            try:
                batch_result = result_queue.get(timeout=5.0)
                
                # Check if result contains error
                if "error" in batch_result:
                    print(f"    [ERROR] Test failed: {batch_result['error']}")
                    model_results.update(batch_result)
                    model_results["error"] = batch_result["error"]
                else:
                    # 将结果直接合并到model_results中
                    model_results.update(batch_result)
                    
                    # 打印关键指标
                    print_batch_metrics(batch_result)
                    
            except Exception as e:
                print(f"    [ERROR] Failed to get result: {e}")
                model_results["error"] = f"Result queue error: {str(e)}"
            
            all_results["models"][model_name] = model_results
        
        # 发送全局退出信号
        print(f"\n{'='*60}")
        print("All models tested. Shutting down worker processes...")
        print(f"{'='*60}")
        
        event_list[2].set()  # 设置全局退出信号
        
        # 等待持续进程优雅退出
        if monitor_process:
            monitor_process.join(timeout=5.0)
            if monitor_process.is_alive():
                monitor_process.terminate()
        
        data_handler_process.join(timeout=5.0)
        if data_handler_process.is_alive():
            data_handler_process.terminate()
        
        # 记录结束时间并增强结果
        end_time = time.time()
        enhanced_results = enhance_results(
            all_results,
            adapter_backend=adapter_backend,
            power_backend=power_backend,
            device_id=device_id,
            sample_rate_hz=sample_rate_hz,
            start_time=start_time,
            end_time=end_time,
            model_configs=model_configs  # 传递配置信息
        )
        
        # 打印总体总结
        print_overall_summary(enhanced_results["summary"])
        
        return enhanced_results
        # Manager 会在 with 块退出时自动清理

