"""
PyTorch推理实现

这个模块提供了TorchInference类，用于加载和推理PyTorch模型。
支持图像分类、图像分割等任务。
"""

import os
import torch
from typing import List, Dict, Any, Optional, Union
import logging
from PIL import Image
from devdeploy.inference.fullmodel_loader import FullModelLoader
from devdeploy.inference.base_inference import BaseInference


class TorchInference(BaseInference):
    """PyTorch模型推理类"""
    
    def __init__(self, model_path: str, device: str = 'cpu', class_names: Optional[List[str]] = None):
        super().__init__(model_path, device, class_names)
        self.logger = logging.getLogger(__name__)
        self.load_model()

    def load_model(self):
        """加载PyTorch模型"""
        try:
            self.logger.info(f'正在加载PyTorch模型: {self.model_path}')
            self.model_loader = FullModelLoader(self.model_path, device=self.device)
            self.model = self.model_loader.load_model()
            self.model_loader.print_model_structure()
            self.model.eval()
            configs = self.model_loader.get_configs()
            self.task_type = configs.get('task_type', 'classification')
            self.img_scale = configs.get('img_scale', None)
            test_pipeline = configs.get('test_pipeline')
            self._extract_scale_from_pipeline(test_pipeline)
            self.logger.info('PyTorch模型加载成功')
            self.logger.info(f'任务类型: {self.task_type}')
            if self.img_scale:
                self.logger.info(f'图像尺寸: {self.img_scale}')
            self.logger.info(f'分类任务resize尺寸: {self.class_scale}')
            self.model_loader.print_info()
        except Exception as e:
            self.logger.error(f'PyTorch模型加载失败: {str(e)}')
            raise

    def _extract_scale_from_pipeline(self, test_pipeline):
        """从test_pipeline中提取resize尺寸"""
        if hasattr(test_pipeline, 'transforms'):
            steps = test_pipeline.transforms
        elif isinstance(test_pipeline, list):
            steps = test_pipeline
        else:
            self.logger.warning(f'test_pipeline格式未知，无法提取resize尺寸，使用默认值: {self.class_scale}')
            return
        for step in steps:
            step_type = step.get('type', None) if isinstance(step, dict) else type(step).__name__
            if step_type in ['Resize', 'ResizeEdge']:
                scale = getattr(step, 'scale', None) if not isinstance(step, dict) else step.get('scale', None)
                size = getattr(step, 'size', None) if not isinstance(step, dict) else step.get('size', None)
                value = scale if scale is not None else size
                if isinstance(value, (list, tuple)):
                    self.class_scale = value[0] if len(value) > 0 else 128
                elif value is not None:
                    self.class_scale = value
                self.logger.info(f'从test_pipeline提取到分类任务resize尺寸: {self.class_scale}')
                break

    def _preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
        """预处理图片 - 使用基类的通用方法并移动到指定设备"""
        input_tensor = self._preprocess_image_common(image)
        return input_tensor.to(self.device)

    def infer_single(self, image: Union[str, Image.Image], return_prob: bool = True) -> Dict[str, Any]:
        """对单张图片进行推理"""
        try:
            input_tensor = self._preprocess_image(image)
            with torch.no_grad():
                output = self.model(input_tensor)
            if self.task_type == 'classification':
                result = self._process_classification_output(output)
                if not return_prob:
                    result.pop('probabilities', None)
            elif self.task_type == 'segmentation':
                result = self._process_segmentation_output(output)
            else:
                self.logger.warning(f'未知任务类型 {self.task_type}，按分类任务处理')
                result = self._process_classification_output(output)
                if not return_prob:
                    result.pop('probabilities', None)
            return result
        except Exception as e:
            error_msg = f'推理失败: {str(e)}'
            if isinstance(image, str):
                error_msg = f'推理失败 {image}: {str(e)}'
            self.logger.error(error_msg)
            raise

    def infer_batch(self, image_dir: str, return_prob: bool = False) -> Dict[str, Dict[str, Any]]:
        """批量推理"""
        try:
            image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif')
            image_files = [
                f for f in os.listdir(image_dir)
                if f.lower().endswith(image_extensions)
            ]
            if not image_files:
                self.logger.warning(f'目录中没有找到图片文件: {image_dir}')
                return {}
            self.logger.info(f'找到 {len(image_files)} 张图片，开始批量推理')
            results = {}
            for img_name in image_files:
                img_path = os.path.join(image_dir, img_name)
                try:
                    result = self.infer_single(img_path, return_prob=return_prob)
                    results[img_name] = result
                except Exception as e:
                    self.logger.error(f'处理图片失败 {img_name}: {str(e)}')
                    results[img_name] = {
                        'error': str(e),
                        'class_id': -1,
                        'class_name': 'error',
                        'confidence': 0.0
                    }
            self.logger.info(f'批量推理完成，成功处理 {len(results)} 张图片')
            return results
        except Exception as e:
            self.logger.error(f'批量推理失败: {str(e)}')
            raise

    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        return self.model_loader.get_model_info()

    def validate_model(self) -> bool:
        """验证模型"""
        return self.model_loader.validate_model()
    
    def __del__(self):
        """析构函数，释放PyTorch模型资源"""
        try:
            if hasattr(self, 'model') and self.model is not None:
                # 将模型从GPU移动到CPU，释放GPU内存
                if hasattr(self.model, 'cpu'):
                    self.model.cpu()
                # 删除模型引用
                del self.model
                self.model = None
            if hasattr(self, 'model_loader') and self.model_loader is not None:
                del self.model_loader
                self.model_loader = None
            # 清理GPU缓存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
        except Exception as e:
            # 析构函数中不应该抛出异常
            if hasattr(self, 'logger'):
                self.logger.warning(f'析构函数执行时出现警告: {str(e)}')
            else:
                print(f'析构函数执行时出现警告: {str(e)}') 