from abc import ABC, abstractmethod
from typing import List, Optional, Any, Dict, Union
from torchvision import transforms
from PIL import Image
import torch
import numpy as np
import cv2
def convert_to_pil_rgb(image):
        """
        判断输入是否为OpenCV图像（NumPy数组），如果是则转换为PIL Image RGB格式
        无论原始通道数是多少，都转换为3通道RGB格式
        
        :param image: 输入图像，可以是OpenCV图像(NumPy数组)或其他类型
        :return: PIL Image RGB格式对象或原对象(非OpenCV图像时)
        """

        # 处理不同通道数的图像
        if image.ndim == 2:
            # 灰度图像(单通道) -> 转换为RGB
            rgb_image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        elif image.ndim == 3:
            # 多通道图像
            if image.shape[2] == 3:
                # 3通道图像(BGR) -> 转换为RGB
                rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            elif image.shape[2] == 4:
                # 4通道图像(BGRA) -> 先转换为RGB，忽略Alpha通道
                rgb_image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGB)
            else:
                # 其他通道数(如多通道科学图像) -> 取前3个通道并转换为RGB
                # 注意: 这可能不是最优处理方式，取决于具体应用
                rgb_image = cv2.cvtColor(image[:, :, :3], cv2.COLOR_BGR2RGB)
        else:
            raise ValueError("不支持的NumPy数组维度")
        
        # 转换为PIL Image
        return Image.fromarray(rgb_image)
class BaseInference(ABC):
    def __init__(self, model_path: str, device: str = 'cpu', class_names: Optional[List[str]] = None):
        self.model_path = model_path
        self.device = device
        self.class_names = class_names
        self.model = None
        self.task_type = None
        self.img_scale = None
        self.class_scale = 128
        self.default_mean = [123.675/255, 116.28/255, 103.53/255]
        self.default_std = [58.395/255, 57.12/255, 57.375/255]
        self.batch_size = 1
        # 添加原始图像尺寸记录
        self.original_image_size = None

    @abstractmethod
    def load_model(self):
        pass

    @abstractmethod
    def infer_single(self, image: Union[str, Any], return_prob: bool = True) -> Dict[str, Any]:
        pass

    @abstractmethod
    def infer_batch(self, image_dir: str, return_prob: bool = False) -> Dict[str, Dict[str, Any]]:
        pass
   

    def _preprocess_image_common(self, image: Union[str, Image.Image, np.ndarray]) -> torch.Tensor:
        """
        通用的图像预处理方法
        
        Args:
            image: 图片路径、PIL Image对象或OpenCV numpy数组
            
        Returns:
            torch.Tensor: 预处理后的图像张量
        """
        try:
            if isinstance(image, str):
                img = Image.open(image).convert('RGB')
            elif isinstance(image, Image.Image):
                img = image.convert('RGB')
            # 检查是否为NumPy数组(OpenCV图像)
            elif isinstance(image, np.ndarray):
                img = convert_to_pil_rgb(image)
            else:
                raise ValueError(f'不支持的图片输入类型: {type(image)}，必须是字符串路径或PIL Image对象')
            
            # 记录原始图像尺寸
            self.original_image_size = img.size  # (width, height)
            
            # 根据任务类型确定resize尺寸
            if self.task_type == 'segmentation' and self.img_scale:
                resize_size = (512, 512)
                if self.img_scale:
                    if isinstance(self.img_scale, (list, tuple)):
                        resize_size = self.img_scale
                    else:
                        resize_size = (self.img_scale, self.img_scale)
            else:
                resize_size = (160, 160)
                if self.class_scale:
                    resize_size = (self.class_scale, self.class_scale)
            
            # 执行预处理
            input_tensor = transforms.Compose([
                transforms.Resize(resize_size),
                transforms.ToTensor(),
                transforms.Normalize(mean=self.default_mean, std=self.default_std)
            ])(img).unsqueeze(0)
            
            return input_tensor
        except Exception as e:
            error_msg = f'图片预处理失败: {str(e)}'
            if isinstance(image, str):
                error_msg = f'图片预处理失败 {image}: {str(e)}'
            raise ValueError(error_msg)

    def _get_class_name(self, class_id: int) -> str:
        """获取类别名称"""
        if self.class_names is not None and 0 <= class_id < len(self.class_names):
            return self.class_names[class_id]
        else:
            return f"class_{class_id}"

    def _process_classification_output(self, output: Any) -> Dict[str, Any]:
        """处理分类任务输出"""
        if isinstance(output, dict):
            if 'logits' in output:
                logits = output['logits']
            elif 'pred' in output:
                logits = output['pred']
            else:
                logits = list(output.values())[0]
        else:
            logits = output
        probabilities = torch.softmax(logits, dim=1)
        confidence, predicted = torch.max(probabilities, 1)
        result = {
            'class_id': predicted.item(),
            'class_name': self._get_class_name(predicted.item()),
            'confidence': confidence.item(),
            'probabilities': probabilities.cpu().numpy()[0]
        }
        return result

    def _process_segmentation_output(self, output: Any) -> Dict[str, Any]:
        """处理分割任务输出"""
        if isinstance(output, dict):
            if 'pred_sem_seg' in output:
                seg_logits = output['pred_sem_seg']
            elif 'pred' in output:
                seg_logits = output['pred']
            else:
                seg_logits = list(output.values())[0]
        else:
            seg_logits = output
        
        # 检查seg_logits的shape是否和original_image_size一致，如果不一致则resize
        if hasattr(self, 'original_image_size') and self.original_image_size is not None:
            # seg_logits: (B, C, H, W) 或 (B, H, W)
            import torch.nn.functional as F
            if isinstance(seg_logits, torch.Tensor) and seg_logits.dim() == 4:
                _, _, h, w = seg_logits.shape
                orig_w, orig_h = self.original_image_size
                if (w, h) != (orig_w, orig_h):
                    seg_logits = F.interpolate(seg_logits, size=(orig_h, orig_w), mode='bilinear', align_corners=False)
            elif isinstance(seg_logits, torch.Tensor) and seg_logits.dim() == 3:
                # (B, H, W) 先加一维
                b, h, w = seg_logits.shape
                orig_w, orig_h = self.original_image_size
                if (w, h) != (orig_w, orig_h):
                    seg_logits = seg_logits.unsqueeze(1)
                    seg_logits = F.interpolate(seg_logits.float(), size=(orig_h, orig_w), mode='bilinear', align_corners=False)
                    seg_logits = seg_logits.squeeze(1)
        
        if seg_logits.dim() == 4:
            # 先应用softmax，然后进行argmax
            seg_probs = torch.softmax(seg_logits, dim=1)
            seg_pred = torch.argmax(seg_probs, dim=1)
        else:
            seg_pred = seg_logits
        
        # 将分割掩码resize回原始图像尺寸（仅在尺寸不一致时）
        seg_mask = seg_pred.cpu().numpy()[0]
        if self.original_image_size and seg_mask.shape[:2][::-1] != self.original_image_size:
            seg_mask = self._resize_mask_to_original(seg_mask, self.original_image_size)
        
        result = {
            'segmentation_mask': seg_mask,
            'num_classes': seg_logits.shape[1] if seg_logits.dim() == 4 else 1
        }
        return result

    def _resize_mask_to_original(self, mask: np.ndarray, original_size: tuple) -> np.ndarray:
        """
        将分割掩码resize回原始图像尺寸
        
        Args:
            mask: 分割掩码数组
            original_size: 原始图像尺寸 (width, height)
            
        Returns:
            np.ndarray: resize后的分割掩码
        """
        try:
            from PIL import Image
            import numpy as np
            
            # 将numpy数组转换为PIL Image
            mask_img = Image.fromarray(mask.astype(np.uint8))
            
            # resize到原始尺寸，使用最近邻插值保持标签值
            resized_mask = mask_img.resize(original_size, Image.NEAREST)
            
            # 转换回numpy数组
            return np.array(resized_mask)
            
        except Exception as e:
            print(f"警告：分割掩码resize失败，使用原始尺寸: {str(e)}")
            return mask

    def set_class_names(self, class_names: List[str]) -> None:
        self.class_names = class_names

    def get_task_type(self) -> str:
        return self.task_type

    def get_model_info(self) -> Dict[str, Any]:
        return {}

    def validate_model(self) -> bool:
        return True
    
    def __del__(self):
        """析构函数，提供通用的资源清理"""
        try:
            # 清理模型引用
            if hasattr(self, 'model') and self.model is not None:
                del self.model
                self.model = None
            # 清理其他资源
            if hasattr(self, 'class_names'):
                self.class_names = None
            if hasattr(self, 'original_image_size'):
                self.original_image_size = None
        except Exception as e:
            # 析构函数中不应该抛出异常，静默处理
            pass 