"""推理运行器 - 基于适配器进行模型推理"""

from __future__ import annotations

from contextlib import contextmanager
from typing import Any, Dict, List, Optional

import numpy as np

from .adapter_backends import AdapterBackend
from .adapter_backends import get_adapter, list_adapters


class InferenceRunner:
    """推理运行器 - 使用指定适配器进行模型推理"""
    
    def __init__(
        self,
        adapter_backend: Optional[str] = None,
        model_spec: Optional[Dict[str, Any]] = None
    ):
        """
        初始化推理运行器
        
        Args:
            adapter_backend: 适配器后端名称（如 "onnxruntime", "tensorrt", "sophon_tpu"）
                        如果为 None，将自动检测可用适配器
            model_spec: 模型规格字典，如果提供则立即准备模型
        
        Raises:
            RuntimeError: 没有可用的适配器或初始化失败
        
        示例:
            # 方式 1: 自动检测适配器
            runner = InferenceRunner()
            runner.prepare(model_spec)
            
            # 方式 2: 指定适配器
            runner = InferenceRunner(adapter_backend="onnxruntime")
            runner.prepare(model_spec)
            
            # 方式 3: 立即准备模型
            runner = InferenceRunner(
                adapter_backend="onnxruntime",
                model_spec=model_spec
            )
        """
        # 创建适配器实例
        if adapter_backend:
            self.adapter = get_adapter(adapter_backend)
        else:
            # 自动检测可用适配器
            self.adapter = self._auto_detect_adapter()
        
        # 推理句柄
        self._handle: Optional[Any] = None
        self._model_spec: Optional[Dict[str, Any]] = None
        
        # 如果提供了模型规格，立即准备
        if model_spec:
            self.prepare(model_spec)
    
    def _auto_detect_adapter(self) -> AdapterBackend:
        """
        自动检测可用的适配器
        
        Returns:
            AdapterBackend 实例
            
        Raises:
            RuntimeError: 没有可用的适配器
        """
        available = list_adapters(include_unavailable=False)
        if not available:
            raise RuntimeError(
                "No adapter available. Please install required dependencies:\n"
                "  - ONNX Runtime: pip install onnxruntime or onnxruntime-gpu\n"
                "  - TensorRT: Install TensorRT and pycuda\n"
                "  - Other backends: See adapter documentation"
            )
        
        # 按优先级选择适配器
        # 优先使用更通用的后端
        priority_order = ["onnxruntime", "onnxrt"]
        for name in priority_order:
            if name in available:
                return get_adapter(name)
        
        # 如果优先级中没有，返回第一个可用
        return get_adapter(available[0])
    
    def prepare(self, model_spec: Dict[str, Any]) -> Any:
        """
        准备模型进行推理
        
        Args:
            model_spec: 模型规格字典，包含 source_onnx、inputs、outputs 等字段
        
        Returns:
            推理句柄
        
        Raises:
            ModelLoadError: 模型加载失败
            ComplianceError: 模型合规性校验失败
        
        示例:
            runner.prepare(model_spec)
        """
        self._model_spec = model_spec
        self._handle = self.adapter.prepare(model_spec)
        return self._handle
    
    def infer(self, inputs: Dict[str, np.ndarray]) -> List[np.ndarray]:
        """
        执行单次推理
        
        Args:
            inputs: 输入张量字典 {input_name: np.ndarray}
        
        Returns:
            输出张量列表 List[np.ndarray]
        
        Raises:
            RuntimeError: 模型未准备
            InferenceError: 推理执行失败
        
        示例:
            outputs = runner.infer({"input": input_data})
        """
        if self._handle is None:
            raise RuntimeError(
                "Model not prepared. Call prepare() first or provide "
                "model_spec in __init__()"
            )
        
        return self.adapter.infer(self._handle, inputs)
    
    def teardown(self):
        """
        释放模型资源
        
        示例:
            runner.teardown()
        """
        if self._handle is not None:
            self.adapter.teardown(self._handle)
            self._handle = None
    
    @contextmanager
    def inference_session(self, model_spec: Dict[str, Any]):
        """
        上下文管理器：自动管理模型的准备和清理
        
        Args:
            model_spec: 模型规格字典
        
        Yields:
            InferenceRunner 实例（self）
        
        示例:
            with runner.inference_session(model_spec):
                outputs = runner.infer(inputs)
                # 自动清理资源
        """
        try:
            self.prepare(model_spec)
            yield self
        finally:
            self.teardown()
    
    def get_metadata(self) -> Dict[str, Any]:
        """
        获取适配器和后端的元信息
        
        Returns:
            元信息字典，包含适配器名称、版本、后端信息等
        
        示例:
            metadata = runner.get_metadata()
            print(f"Adapter: {metadata['adapter_backend']}")
            print(f"Backend: {metadata['backend']}")
            print(f"Device: {metadata['device']}")
        """
        return self.adapter.get_metadata()
    
    @property
    def is_ready(self) -> bool:
        """
        检查模型是否已准备好进行推理
        
        Returns:
            bool: 如果模型已准备则返回 True
        """
        return self._handle is not None
    
    @property
    def model_spec(self) -> Optional[Dict[str, Any]]:
        """
        获取当前加载的模型规格
        
        Returns:
            模型规格字典，如果未加载则返回 None
        """
        return self._model_spec
    
    def cleanup(self):
        """清理所有资源"""
        self.teardown()
    
    def __enter__(self):
        """支持上下文管理器协议"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """支持上下文管理器协议：自动清理资源"""
        self.cleanup()
        return False
    
    def __del__(self):
        """析构函数：确保资源被清理"""
        try:
            self.cleanup()
        except:
            pass
    
    def __repr__(self) -> str:
        """字符串表示"""
        adapter_backend = self.adapter.get_metadata().get("adapter_backend", "unknown")
        status = "ready" if self.is_ready else "not ready"
        return f"InferenceRunner(adapter='{adapter_backend}', status='{status}')"

