"""
ONNX推理实现

这个模块提供了OnnxInference类，用于加载和推理ONNX模型。
支持图像分类、图像分割等任务。
"""

import os
import torch
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
import numpy as np
import logging
import yaml
import time
from PIL import Image
from devdeploy.inference.base_inference import BaseInference


class OnnxInference(BaseInference):
    """ONNX模型推理类"""
    
    def __init__(self, model_path: str, device: str = 'cpu', class_names: Optional[List[str]] = None):
        super().__init__(model_path, device, class_names)
        self.logger = logging.getLogger(__name__)
        # 确保class_names属性已设置，这样_load_onnx_config可以正确判断
        if class_names is not None:
            self.class_names = class_names
        self.load_model()

    def load_model(self):
        """加载ONNX模型"""
        try:
            self.logger.info(f'正在加载ONNX模型: {self.model_path}')
            try:
                import onnxruntime as ort
            except ImportError:
                raise ImportError('请安装onnxruntime: pip install onnxruntime')
            providers = ['CPUExecutionProvider']
            if self.device.lower() == 'cuda' or self.device.lower().startswith('cuda:'):
                # 尝试使用CUDA provider
                try:
                    # 检查CUDA provider是否可用
                    available_providers = ort.get_available_providers()
                    self.logger.info(f'ONNX Runtime可用提供者: {available_providers}')
                    
                    if 'CUDAExecutionProvider' in available_providers:
                        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
                        self.logger.info('✅ 检测到CUDA Execution Provider，将使用GPU加速')
                    else:
                        self.logger.warning('⚠️  CUDA Execution Provider不可用，将使用CPU')
                        self.logger.warning('提示: 请确保安装了 onnxruntime-gpu: pip install onnxruntime-gpu')
                        providers = ['CPUExecutionProvider']
                except Exception as e:
                    self.logger.warning(f'检查CUDA provider时出错: {e}，将使用CPU')
                    providers = ['CPUExecutionProvider']
            
            self.logger.info(f'ONNX模型提供者配置: {providers}')
            self.model = ort.InferenceSession(self.model_path, providers=providers)
            
            # 检查实际使用的provider
            actual_providers = self.model.get_providers()
            self.logger.info(f'ONNX模型实际使用的提供者: {actual_providers}')
            if 'CUDAExecutionProvider' in actual_providers:
                self.logger.info('✅ 模型已成功加载到GPU')
            else:
                self.logger.warning('⚠️  模型运行在CPU上，未使用GPU加速')
            input_info = self.model.get_inputs()[0]
            self.logger.info(f'ONNX模型输入名: {input_info.name}')
            self.logger.info(f'ONNX模型输入尺寸: {input_info.shape}')
            print(f'ONNX模型输入尺寸: {input_info.shape}')
            self._load_onnx_config()
            self.logger.info('ONNX模型加载成功')
            self.logger.info(f'任务类型: {self.task_type}')
            if self.img_scale:
                self.logger.info(f'图像尺寸: {self.img_scale}')
            self.logger.info(f'分类任务resize尺寸: {self.class_scale}')
            # 输出模型输出信息
            output_infos = self.model.get_outputs()
            self.logger.info(f'ONNX模型输出数量: {len(output_infos)}')
            for i, output_info in enumerate(output_infos):
                self.logger.info(f'ONNX模型输出{i}名: {output_info.name}')
                self.logger.info(f'ONNX模型输出{i}尺寸: {output_info.shape}')
                self.logger.info(f'ONNX模型输出{i}类型: {output_info.type}')
            self.logger.info(f'load onnx model success')
        except Exception as e:
            self.logger.error(f'ONNX模型加载失败: {str(e)}')
            raise

    def _load_onnx_config(self):
        """从ONNX模型对应的YAML文件加载配置"""
        try:
            model_path = Path(self.model_path)
            yaml_path = model_path.with_suffix('.yaml')
            if not yaml_path.exists():
                self.logger.warning(f'未找到对应的YAML配置文件: {yaml_path}')
                self.logger.warning('使用默认配置')
                self.task_type = 'classification'
                self.class_scale = 128
                self.batch_size = 1
                return
            with open(yaml_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            
            # 加载class_names，如果外部没有传入则使用配置文件中的
            if self.class_names is None:
                config_class_names = config.get('class_names', None)
                if config_class_names is not None:
                    self.class_names = config_class_names
                    self.logger.info(f'从YAML配置文件加载class_names: {self.class_names}')
                else:
                    self.logger.warning('配置文件中未找到class_names，将使用默认类别名称')
            
            self.task_type = config.get('task_type', 'classification')
            self.class_scale = config.get('class_scale', 128)
            self.batch_size = config.get('batch_size', 1)
            if self.task_type == 'segmentation':
                self.img_scale = config.get('img_scale', (512, 512))
                if isinstance(self.img_scale, list):
                    self.img_scale = tuple(self.img_scale)
            mean = config.get('mean', None)
            std = config.get('std', None)
            if mean is not None:
                if isinstance(mean, (list, tuple)):
                    self.default_mean = [m/255.0 for m in mean]
                else:
                    self.default_mean = [mean/255.0] * 3
                self.logger.info(f'从YAML加载mean参数: {self.default_mean}')
            if std is not None:
                if isinstance(std, (list, tuple)):
                    self.default_std = [s/255.0 for s in std]
                else:
                    self.default_std = [std/255.0] * 3
                self.logger.info(f'从YAML加载std参数: {self.default_std}')
            self.logger.info(f'从YAML配置文件加载配置: {yaml_path}')
            self.logger.info(f'任务类型: {self.task_type}')
            self.logger.info(f'分类任务resize尺寸: {self.class_scale}')
            if self.img_scale:
                self.logger.info(f'分割任务图像尺寸: {self.img_scale}')
        except Exception as e:
            self.logger.error(f'加载ONNX配置失败: {str(e)}')
            self.task_type = 'classification'
            self.class_scale = 128
            self.logger.warning('使用默认配置继续执行')

    def _preprocess_image(self, image: Union[str, Image.Image]) -> np.ndarray:
        """预处理图片 - 使用基类的通用方法并转换为numpy"""
        input_tensor = self._preprocess_image_common(image)
        return input_tensor.cpu().numpy().astype(np.float32)

    def infer_single(self, image: Union[str, Image.Image], return_prob: bool = True) -> Dict[str, Any]:
        """对单张图片进行推理"""
        try:
            input_np = self._preprocess_image(image)
            input_name = self.model.get_inputs()[0].name
            start_time = time.time()
            outputs = self.model.run(None, {input_name: input_np})
            end_time = time.time()
            infer_time = end_time - start_time
            print(f'ONNX推理耗时: {infer_time:.4f} 秒')
            output = outputs[0]
            output_tensor = torch.from_numpy(output)
            if self.task_type == 'classification':
                result = self._process_classification_output(output_tensor)
                if not return_prob:
                    result.pop('probabilities', None)
            elif self.task_type == 'segmentation':
                result = self._process_segmentation_output(output_tensor)
            else:
                self.logger.warning(f'未知任务类型 {self.task_type}，按分类任务处理')
                result = self._process_classification_output(output_tensor)
                if not return_prob:
                    result.pop('probabilities', None)
            return result
        except Exception as e:
            error_msg = f'推理失败: {str(e)}'
            if isinstance(image, str):
                error_msg = f'推理失败 {image}: {str(e)}'
            self.logger.error(error_msg)
            raise

    def infer_batch(self, image_dir: str, return_prob: bool = False) -> Dict[str, Dict[str, Any]]:
        """批量推理"""
        try:
            image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif')
            image_files = [
                f for f in os.listdir(image_dir)
                if f.lower().endswith(image_extensions)
            ]
            if not image_files:
                self.logger.warning(f'目录中没有找到图片文件: {image_dir}')
                return {}
            self.logger.info(f'找到 {len(image_files)} 张图片，开始批量推理')
            results = {}
            for img_name in image_files:
                img_path = os.path.join(image_dir, img_name)
                try:
                    result = self.infer_single(img_path, return_prob=return_prob)
                    results[img_name] = result
                except Exception as e:
                    self.logger.error(f'处理图片失败 {img_name}: {str(e)}')
                    results[img_name] = {
                        'error': str(e),
                        'class_id': -1,
                        'class_name': 'error',
                        'confidence': 0.0
                    }
            self.logger.info(f'批量推理完成，成功处理 {len(results)} 张图片')
            return results
        except Exception as e:
            self.logger.error(f'批量推理失败: {str(e)}')
            raise
    
    def __del__(self):
        """析构函数，释放ONNX模型资源"""
        try:
            if hasattr(self, 'model') and self.model is not None:
                # ONNX Runtime会话会自动管理资源，但我们可以显式关闭
                if hasattr(self.model, 'close'):
                    self.model.close()
                del self.model
                self.model = None
        except Exception as e:
            # 析构函数中不应该抛出异常
            if hasattr(self, 'logger'):
                self.logger.warning(f'析构函数执行时出现警告: {str(e)}')
            else:
                print(f'析构函数执行时出现警告: {str(e)}') 