"""推理进程模块 - 每个模型在独立进程中执行"""

from __future__ import annotations

import time
from typing import Dict, Any, Optional
from multiprocessing import Queue
from multiprocessing.synchronize import Barrier as BarrierType, Event as EventType
from multiprocessing.managers import ListProxy

import numpy as np

from ..adapters import InferenceRunner



def infer_worker(
    barrier: BarrierType,
    event_list: list[EventType],
    latency_samples: ListProxy,
    metadata_queue: Queue,
    model_spec: Dict[str, Any],
    batch_size: int,
    num_inputs: int,
    warmup: int,
    adapter_backend: Optional[str] = None
):
    """
    推理进程工作函数 - 为单个模型的单次batch测试创建
    
    Args:
        barrier: 三进程同步屏障
        event_list: 事件列表
            [0]: model_infer_stop_event - 推理完成信号
            [1]: data_copy_completed_event - 数据复制完成信号  
            [2]: monitor_stop_event - 全局退出信号（此进程不使用）
        latency_samples: 共享列表，存储延迟数据（毫秒）
        metadata_queue: 元数据队列，传递测试配置给数据处理进程
        model_spec: 模型规格字典
        batch_size: 批处理大小
        num_inputs: 推理轮数
        warmup: 预热轮数
        adapter_backend: 适配器名称
    """
    from ..data.loader import make_synthetic_inputs
    
    # Wait for barrier with timeout protection
    try:
        barrier.wait(timeout=10.0)
    except Exception as e:
        print(f"[ERROR] Barrier timeout in infer worker: {e}")
        # Still try to signal completion to avoid deadlock
        event_list[0].set()
        return
    
    # Critical: ensure signals are sent even on failure
    try:
        # 使用上下文管理器自动管理资源
        with InferenceRunner(adapter_backend=adapter_backend) as runner:
            # 构造确定性随机数生成器
            rng = np.random.default_rng(1234)
            inputs = make_synthetic_inputs(model_spec, batch_size, rng)
            
            # 1. 初始化模型（记录init时间）
            init_t0 = time.perf_counter()
            runner.prepare(model_spec)
            init_ms = (time.perf_counter() - init_t0) * 1000.0
            
            # 2. 预热阶段
            for _ in range(warmup):
                runner.infer(inputs)
            
            # 3. 正式推理（清空延迟列表）
            latency_samples[:] = []
            
            start = time.perf_counter()
            for _ in range(num_inputs):
                t0 = time.perf_counter()
                runner.infer(inputs)
                lat_ms = (time.perf_counter() - t0) * 1000.0
                latency_samples.append(lat_ms)
            wall_time = time.perf_counter() - start
            
            # 4. 发送元数据给数据处理进程
            metadata_queue.put({
                "batch_size": batch_size,
                "num_inputs": num_inputs,
                "wall_time": wall_time,
                "init_ms": init_ms
            })
        # with 块结束时自动调用 cleanup()
        
    except Exception as e:
        # Log error and send error metadata
        print(f"[ERROR] Inference worker failed: {e}")
        import traceback
        traceback.print_exc()
        
        metadata_queue.put({
            "batch_size": batch_size,
            "num_inputs": num_inputs,
            "wall_time": 0.0,
            "init_ms": 0.0,
            "error": str(e)
        })
    
    finally:
        # Always signal completion, even on error
        event_list[0].set()
        # Wait for data copy with timeout
        event_list[1].wait(timeout=5.0)
    
    # 推理进程结束，CUDA context和显存自动释放

