"""
分割模型分析器实现类

基于分割模型实现类别统计和分析功能，可以统计不同类别的像素占比。
"""

import os
import sys
import numpy as np
from PIL import Image
from typing import Dict, Any, Optional, Union, List, Tuple
from pathlib import Path
import cv2
# 添加项目根目录到Python路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
sys.path.insert(0, project_root)

from devdeploy.inference.inference import create_inference


class SegmentationAnalyzerImpl:
    """
    分割模型分析器实现类
    
    基于分割模型实现类别统计和分析功能，可以统计不同类别的像素占比。
    """
    
    def __init__(self, 
                 model_path: str, 
                 device: str = 'cpu',
                 class_names: Optional[List[str]] = None):
        """
        初始化分割模型分析器
        
        Args:
            model_path (str): 分割模型文件路径
            device (str): 推理设备，'cpu' 或 'cuda'
            class_names (List[str], optional): 类别名称列表，如果为None则使用默认名称
        """
        self.model_path = model_path
        self.device = device
        self.class_names = class_names or ['background', 'F', 'P']
        
        # 创建推理器实例
        try:
            self.infer = create_inference(model_path, device=device)
            print(f"分割模型分析器初始化成功，使用设备: {device}")
        except Exception as e:
            raise RuntimeError(f"初始化分割模型分析器失败: {str(e)}")
    def slide_analyze_single(
        self,
        image: Union[str, Image.Image, np.ndarray],
        target_classes: List[int] = [1, 2],
        # —— 新增参数：按需求设置切块与融合方式 ——
        out_x: int = 512,
        out_y: int = 512,
        overlap: float = 0.25,
        pad_val: int = 0,
        fuse_mode: str = "max",  # 可选: "max" 或 "min"
    ) -> Dict[str, Any]:
        """
        分析单张图片的指定类别分布（带大图切块-重叠-融合-还原）

        参数说明：
            image: 图像路径 / PIL.Image / np.ndarray
            target_classes: 需要计算占比的类别列表
            out_x, out_y: 子块尺寸（宽、高）
            overlap: 相邻子块的重叠比例 [0,1)
            pad_val: 右/下边界不足一块时用于补边的像素值（BGR/灰度都可）
            fuse_mode: 重叠区域的融合方式，"max" 或 "min"
        """
        try:
            # ---- 统一把输入转成 np.ndarray（彩色RGB）并记录原始大小 ----
            if isinstance(image, np.ndarray):
                # 假定是 H×W×C 或 H×W
                if image.ndim == 2:
                    img_np = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
                elif image.shape[2] == 3:
                    img_np = image[..., ::-1].copy() if image.dtype != np.uint8 else image  # 若是BGR/RGB不强制转换
                    # 为稳妥，统一到RGB
                    if img_np.shape[2] == 3:
                        img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
                else:
                    raise ValueError("仅支持单通道或三通道图像。")
            elif isinstance(image, Image.Image):
                img_np = np.array(image.convert("RGB"))
            elif isinstance(image, str):
                # 路径 → 读图为 RGB
                pil_img = Image.open(image).convert("RGB")
                img_np = np.array(pil_img)
            else:
                raise ValueError(f"不支持的图像类型: {type(image)}")

            H0, W0 = img_np.shape[:2]

            # ---- 依据 get_sub_image 逻辑进行右/下补边（不改变原图可视内容）----
            h_pad = max(0, out_y - H0)
            w_pad = max(0, out_x - W0)
            if h_pad > 0 or w_pad > 0:
                img_pad = cv2.copyMakeBorder(
                    img_np, 0, h_pad, 0, w_pad, cv2.BORDER_CONSTANT, value=pad_val
                )
            else:
                img_pad = img_np
            H, W = img_pad.shape[:2]

            # ---- 与 get_sub_image 一致的移动步长 ----
            dx = int((1.0 - overlap) * out_x)
            dy = int((1.0 - overlap) * out_y)
            dx = max(1, dx)
            dy = max(1, dy)

            # ---- 准备融合画布（整数标签）；以及计数阵用于判定首次填充 ----
            canvas = np.full((H, W), -1, dtype=np.int32)  # -1 表示尚未写入
            hit_cnt = np.zeros((H, W), dtype=np.uint16)   # 记录被多少块覆盖

            # ---- 滑窗切块 + 子块推理 + 以 max/min 融合 ----
            last_x, last_y = -1, -1
            fuse_is_max = (fuse_mode.lower() == "max")
            for y0 in range(0, H, dy):
                for x0 in range(0, W, dx):
                    # 边界对齐（与 get_sub_image 完全一致）
                    y = H - out_y if (y0 + out_y > H) else y0
                    x = W - out_x if (x0 + out_x > W) else x0
                    if last_x == x and last_y == y:
                        continue
                    last_x, last_y = x, y

                    tile = img_pad[y:y+out_y, x:x+out_x]

                    # —— 每块调用你现有的推理函数（必须返回 {'segmentation_mask': tile_mask}）——
                    infer_res = self.infer.infer_single(tile)
                    if "segmentation_mask" not in infer_res:
                        raise RuntimeError("infer_single 返回结果中缺少 'segmentation_mask'。")

                    tile_mask = infer_res["segmentation_mask"]
                    # 保障为二维整型（最近邻缩放到块大小）
                    if tile_mask.ndim != 2:
                        raise RuntimeError("期望 segmentation_mask 为二维标签图。")
                    if tile_mask.shape[0] != out_y or tile_mask.shape[1] != out_x:
                        tile_mask = cv2.resize(tile_mask.astype(np.int32), (out_x, out_y), interpolation=cv2.INTER_NEAREST)

                    # 当前画布子区
                    sub = canvas[y:y+out_y, x:x+out_x]
                    sub_hits = hit_cnt[y:y+out_y, x:x+out_x]

                    # 首次写入位置（还未被任何块覆盖）
                    first_fill = (sub_hits == 0)
                    if np.any(first_fill):
                        sub[first_fill] = tile_mask[first_fill]
                        sub_hits[first_fill] = 1

                    # 已有内容的位置 → 做 max/min 融合
                    overlap_pos = ~first_fill
                    if np.any(overlap_pos):
                        if fuse_is_max:
                            sub[overlap_pos] = np.maximum(sub[overlap_pos], tile_mask[overlap_pos])
                        else:
                            # min 融合
                            sub[overlap_pos] = np.minimum(sub[overlap_pos], tile_mask[overlap_pos])
                        sub_hits[overlap_pos] = np.minimum(sub_hits[overlap_pos] + 1, np.iinfo(sub_hits.dtype).max)

                    # 回写
                    canvas[y:y+out_y, x:x+out_x] = sub
                    hit_cnt[y:y+out_y, x:x+out_x] = sub_hits

            # ---- 裁剪回原图尺寸 ----
            full_mask = canvas[:H0, :W0].astype(np.int32)
            # 若仍有未覆盖区域（理论上不会，除非参数异常），用0兜底
            if (full_mask < 0).any():
                full_mask = np.where(full_mask < 0, 0, full_mask)

            # —— 与原代码保持一致：构造 result，交给后续分析逻辑使用 ——
            result = {
                "segmentation_mask": full_mask.astype(np.uint8)
            }
            

            # ====== 保持原“分析分割掩码后面的内容不变” ======
            analysis_result = self._analyze_segmentation_mask(result['segmentation_mask'], target_classes)

            # 获取类别名称映射
            class_names = {}
            if hasattr(self.infer, 'class_names') and self.infer.class_names:
                for class_id in target_classes:
                    if class_id < len(self.infer.class_names):
                        class_names[class_id] = self.infer.class_names[class_id]
                    else:
                        class_names[class_id] = f"类别{class_id}"
            else:
                for class_id in target_classes:
                    class_names[class_id] = f"类别{class_id}"

            # 构建返回结果
            final_result = {
                'class_ratios': analysis_result['class_ratios'],
                'class_masks': analysis_result['class_masks'],
                'class_names': class_names,
                'image_shape': result['segmentation_mask'].shape,
                'segmentation_mask': result['segmentation_mask']
            }
            return final_result

        except Exception as e:
            raise RuntimeError(f"分割分析失败: {str(e)}")
        
    def analyze_single(self, image: Union[str, Image.Image, np.ndarray], 
                      target_classes: List[int] = [1, 2]) -> Dict[str, Any]:
        """
        分析单张图片的指定类别分布
        
        Args:
            image: 图像数据，可以是图像路径(str)、PIL Image对象或numpy数组
            target_classes (List[int]): 要计算占比的类别列表，默认为[1, 2]
            
        Returns:
            Dict[str, Any]: 分析结果，包含指定类别的统计信息和二值图
        """
        try:
            # 处理不同类型的输入
            if isinstance(image, str):
                # 如果是路径，直接使用推理器
                result = self.infer.infer_single(image)
            elif isinstance(image, Image.Image):
                # 如果是PIL Image，直接使用推理器
                result = self.infer.infer_single(image)
            elif isinstance(image, np.ndarray):
                # 如果是numpy数组，直接传递给推理器
                result = self.infer.infer_single(image)
            else:
                raise ValueError(f"不支持的图像类型: {type(image)}")
            
            # 分析分割掩码
            analysis_result = self._analyze_segmentation_mask(result['segmentation_mask'], target_classes)
            
            # 获取类别名称映射
            class_names = {}
            if hasattr(self.infer, 'class_names') and self.infer.class_names:
                for class_id in target_classes:
                    if class_id < len(self.infer.class_names):
                        class_names[class_id] = self.infer.class_names[class_id]
                    else:
                        class_names[class_id] = f"类别{class_id}"
            else:
                # 如果没有类别名称，使用默认名称
                for class_id in target_classes:
                    class_names[class_id] = f"类别{class_id}"
            
            # 构建返回结果
            final_result = {
                'class_ratios': analysis_result['class_ratios'],
                'class_masks': analysis_result['class_masks'],
                'class_names': class_names,
                'image_shape': result['segmentation_mask'].shape,
                'segmentation_mask': result['segmentation_mask']
            }
            
            return final_result
            
        except Exception as e:
            raise RuntimeError(f"分割分析失败: {str(e)}")
    
    def _analyze_segmentation_mask(self, segmentation_mask: np.ndarray, target_classes: List[int]) -> Dict[str, Any]:
        """
        分析分割掩码的指定类别分布
        
        Args:
            segmentation_mask (np.ndarray): 分割掩码
            target_classes (List[int]): 要计算占比的类别列表
            
        Returns:
            Dict[str, Any]: 指定类别的统计信息和二值图
        """
        # 获取掩码的基本信息
        height, width = segmentation_mask.shape[:2]
        total_pixels = height * width
        
        # 计算指定类别的占比和二值图
        class_ratios = {}
        class_masks = {}
        
        for class_id in target_classes:
            # 创建该类别的二值图
            class_mask = (segmentation_mask == class_id).astype(np.uint8)
            class_masks[class_id] = class_mask
            
            # 计算该类别的占比
            class_pixels = np.sum(class_mask)
            class_ratio = (class_pixels / total_pixels) * 100
            class_ratios[class_id] = float(class_ratio)
        
        return {
            'class_ratios': class_ratios,
            'class_masks': class_masks
        }
    
    def get_model_info(self) -> Dict[str, Any]:
        """
        获取模型信息
        
        Returns:
            Dict[str, Any]: 模型信息
        """
        try:
            return self.infer.get_model_info()
        except Exception as e:
            return {'error': f"获取模型信息失败: {str(e)}"}
    
    def validate_model(self) -> bool:
        """
        验证模型是否有效
        
        Returns:
            bool: 模型是否有效
        """
        try:
            return self.infer.validate_model()
        except Exception as e:
            print(f"模型验证失败: {str(e)}")
            return False
    
    def get_task_type(self) -> str:
        """
        获取任务类型
        
        Returns:
            str: 任务类型
        """
        try:
            return self.infer.get_task_type()
        except Exception as e:
            return f"获取任务类型失败: {str(e)}"
