from core.config import LOCAL_STORAGE_PATH
import numpy as np
from typing import List, Union, Optional, Tuple 
import tritonclient.http as httpclient
import cv2
import pydicom
import asyncio
from collections import Counter
from scipy import ndimage
from scipy.special import softmax

# ----------------------------- 模型管理器基类开始 -----------------------------
class ModelManager:
    """
    模型管理器基类
    提供模型预处理、后处理和推理方法
    """
    def __init__(self, input_name: str='input', input_type: str='FP32', model_name: str='',batch_size: int=1):
        self.input_name = input_name
        self.input_type = input_type
        self.model_name = model_name
        self.batch_size = batch_size

    async def preprocess(self, inputs: List[httpclient.InferInput]):
        pass

    async def postprocess(self, results: List[httpclient.InferResult]):
        pass

    async def run(self, inputs: List[httpclient.InferInput], client, model_version: str='1'):
        if self.model_name == '':  # 使用 == 而不是 is 进行字符串比较
            raise ValueError("模型名称未设置")
        result = client.infer(self.model_name, inputs, model_version=model_version)
        return result


    async def run_batch(self, data_list: List[np.array], client, model_version: str='1'):
        """
        批量处理多个数据样本
        :param data_list: 数据列表，每个元素是预处理后的输入数据
        :param client: Triton客户端
        :param model_version: 模型版本
        :return: 批量推理结果列表
        """
        if self.model_name == '':
            raise ValueError("模型名称未设置")
        
        results = []
        for data in data_list:
            result = client.infer(self.model_name, data, model_version=model_version)
            results.append(result)
        return results

    async def process_batch(self, image_list: List, client, model_version: str='1'):
        """
        完整的批量处理流程：预处理 -> 推理 -> 后处理
        :param image_list: 原始图像数据列表
        :param client: Triton客户端
        :param model_version: 模型版本
        :return: 处理后的结果列表
        """
        # 批量预处理
        preprocessed_data = []
        for image in image_list:
            data = await self.preprocess(image)
            preprocessed_data.append(data)
        
        # 批量推理
        raw_results = await self.run_batch(preprocessed_data, client, model_version)
        
        # 批量后处理
        final_results = []
        for result in raw_results:
            processed_result = await self.postprocess(result)
            final_results.append(processed_result)
            
        return final_results

    def list2image(self, data: List[List[int]]):
        image = np.array(data)
        if image.ndim == 2:
            image = image.reshape(512, 512, 1)
            image = image.repeat(3, axis=2)
        elif image.ndim == 3:
            image = image.reshape(512, 512, 3)
        else:
            raise ValueError(f"Invalid image dimension: {image.ndim}")
        return image

# ----------------------------- 模型管理器基类结束 -----------------------------


# ----------------------------- 体位预测模型开始 -----------------------------
class PositionEstimationModelManager(ModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='position_estimation',batch_size: int=8):
        super().__init__(input_name, input_type)
        self.model_name = model_name
        self.batch_size = batch_size
        self.label_dict = {
            "左冠足位": 'F',
            "左冠左肩位": 'LS',
            "右冠头位": 'RCA',
            "右冠左前斜位(偏头位)": 'RCA',
            "左冠头位": 'other',
            "左冠蜘蛛位": 'other',
            "左冠右肩位": 'other',
            "左冠肝位": 'other',
            "": 'other',
        }

    def _classify_xa_image_angles(self, primary_angle: Optional[float], secondary_angle: Optional[float]) -> Tuple[Optional[str], Optional[str]]:
        """
        Classifies the XA image angles.

        :param primary_angle: Primary angle.
        :param secondary_angle: Secondary angle.
        :return: Tuple of classifications for primary and secondary angles.
        """
        if primary_angle is None or secondary_angle is None:
            return None, None

        if -15 <= primary_angle <= 15:
            primary_angle_class = "AP"
        elif 15 < primary_angle <= 90:
            primary_angle_class = "LAO"
        elif -90 <= primary_angle < -15:
            primary_angle_class = "RAO"
        else:
            primary_angle_class = "Unknown"

        secondary_angle_class = "CRA" if secondary_angle >= 0 else "CAU"

        return primary_angle_class, secondary_angle_class

    def _determine_machine_position(self,primary_angle: Optional[float],secondary_angle: Optional[float],primary_angle_class: Optional[str],secondary_angle_class: Optional[str]) -> Tuple[str, str]:

        if None in [
            primary_angle,
            secondary_angle,
            primary_angle_class,
            secondary_angle_class,
        ]:
            return "未知体位", "未知体位"

        positions = {
            ("AP", "CAU"): (
                ("左冠足位", "右冠头位") if -45 <= secondary_angle <= 0 else None
            ),
            ("AP", "CRA"): ("左冠头位", "右冠头位") if 0 < secondary_angle <= 45 else None,
            ("LAO", "CAU"): (
                ("左冠蜘蛛位", "右冠左前斜位(偏头位)")
                if 16 <= primary_angle <= 45 and -45 <= secondary_angle <= 0
                else None
            ),
            ("LAO", "CRA"): (
                ("左冠左肩位", "右冠左前斜位(偏头位)")
                if 16 <= primary_angle <= 45 and 0 < secondary_angle <= 45
                else None
            ),
            ("RAO", "CAU"): (
                ("左冠肝位", "")
                if -45 <= primary_angle <= -16 and -45 <= secondary_angle <= 0
                else None
            ),
            ("RAO", "CRA"): (
                ("左冠右肩位", "")
                if -45 <= primary_angle <= -16 and 0 < secondary_angle <= 45
                else None
            ),
        }

        return positions.get(
            (primary_angle_class, secondary_angle_class), ("未知体位", "未知体位")
        )

    def _get_max_label_pred(self, result_label: np.array, result_pred: np.array):
        result_label_count = Counter(result_label)
        max_label_pred = result_label_count.most_common(1)[0][0]
        result_print_str = [result_pred[i] for i, category in enumerate(result_label) if category == max_label_pred]
        average_pred_for_most_common = sum(result_print_str) / len(result_print_str)
        return max_label_pred, average_pred_for_most_common
        
    async def get_position_label(self, label: np.array, pred: np.array, primary_angle: int, secondary_angle: int):
        primary_angle_class, secondary_angle_class = self._classify_xa_image_angles(primary_angle, secondary_angle)
        result_angle = self._determine_machine_position(primary_angle, secondary_angle, primary_angle_class, secondary_angle_class)
        max_label_pred, _ = self._get_max_label_pred(label, pred)
        if result_angle == None:
            result_angle = ["",""]

        if max_label_pred == 0:
            position_result = self.label_dict[result_angle[0]]
        elif max_label_pred == 1:
            position_result = self.label_dict[result_angle[1]]
        else:
            position_result = 'unknown'
            
        return position_result
    
    async def preprocess(self, image: np.array):
        # 转换为float32并归一化
        data = image
        data = (data - data.min())/(data.max() - data.min())
        data = np.expand_dims(data, axis=1)
        data = data.repeat(3, axis=1) 
        data = data.astype(np.float32)
        return data
    
    async def run(self, data: np.ndarray, client, model_version: str='1'):
        if self.model_name == '':
            raise ValueError("模型名称未设置")
        if not isinstance(data, np.ndarray):
            raise TypeError("data 应为 numpy.ndarray")
        if data.ndim == 0:
            raise ValueError("data 维度错误，至少需要包含 batch 维度")

        batch_size = max(1, int(getattr(self, 'batch_size', 1) or 1))
        num_samples = data.shape[0]

        result_list = []
        for start in range(0, num_samples, batch_size):
            end = min(start + batch_size, num_samples)
            batch = data[start:end]
            inputs = [httpclient.InferInput(self.input_name, batch.shape, self.input_type)]
            inputs[0].set_data_from_numpy(batch, binary_data=True)
            result = client.infer(self.model_name, inputs, model_version=model_version)
            result_list.append(result)

        return result_list

    async def postprocess(self, results: List[httpclient.InferResult]):
        # 收集每个batch的标签与概率，避免维度不一致的拼接错误
        label_chunks: List[np.ndarray] = []
        pred_chunks: List[np.ndarray] = []
        for result in results:
            output = result.as_numpy("output")
            label_chunks.append(np.argmax(output, axis=1))
            pred_chunks.append(softmax(output, axis=1))
        processed_results = np.concatenate(label_chunks, axis=0) if label_chunks else np.array([], dtype=np.int64)
        processed_preds = np.concatenate(pred_chunks, axis=0) if pred_chunks else np.array([])
        return processed_results, processed_preds

   
# ----------------------------- 体位分析模型结束 -----------------------------

# ----------------------------- 血管分割模型开始 -----------------------------
class SegmentationModelManager(ModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='vessel_segmentation', batch_size: int=8):
        super().__init__(input_name, input_type)
        self.model_name = model_name
        self.batch_size = batch_size
        
    async def preprocess(self, image: np.array):
        # 转换为float32并归一化
        image = (image - image.min())/(image.max() - image.min())
        # 转换为CHW格式 (3, 512, 512)
        image = image.reshape(-1, 1, 512, 512)
        image = image.repeat(3, axis=1) 
        image = image.astype(np.float32)
        return image
    
    async def run(self, data: np.ndarray, client, model_version: str='1'):
        if self.model_name == '':
            raise ValueError("模型名称未设置")
        if not isinstance(data, np.ndarray):
            raise TypeError("data 应为 numpy.ndarray")
        if data.ndim == 0:
            raise ValueError("data 维度错误，至少需要包含 batch 维度")

        batch_size = max(1, int(getattr(self, 'batch_size', 1) or 1))
        num_samples = data.shape[0]

        result_list = []
        for start in range(0, num_samples, batch_size):
            end = min(start + batch_size, num_samples)
            batch = data[start:end]
            inputs = [httpclient.InferInput(self.input_name, batch.shape, self.input_type)]
            inputs[0].set_data_from_numpy(batch, binary_data=True)
            results = client.infer(self.model_name, inputs, model_version=model_version)
            results = results.as_numpy("output")
            result_list.append(results)
        return result_list
    
    async def postprocess(self, results: List[np.ndarray]):
        results = np.concatenate(results, axis=0)
        segmentation = np.argmax(results, axis=1).astype(np.uint8)
        return segmentation
    
    def _analyze_connected_components_enhanced(self, segmentation: np.array, min_area: int = 100):
        """
        增强的连通域分析，包含更多鲁棒性检查
        
        Args:
            segmentation: 分割结果掩码 (H, W)
            min_area: 最小连通域面积阈值
            max_area_ratio: 最大连通域占总面积的比例阈值
        
        Returns:
            dict: 包含增强连通域分析结果的字典
        """
        # 确保分割结果是二值图像
        binary_mask = (segmentation > 0).astype(np.uint8)
        
        # 形态学操作：去除小噪点
        kernel = np.ones((3, 3), np.uint8)
        binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel)
        binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_OPEN, kernel)
        
        # 标记连通域
        labeled, num_features = ndimage.label(binary_mask)
        
        if num_features == 0:
            return {
                'num_components': 0,
                'total_area': 0,
                'avg_area': 0,
                'max_area': 0,
                'connectivity_score': 0,
                'distribution_score': 0,
                'quality_indicators': {
                    'has_large_component': False,
                    'has_small_components': False,
                }
            }
        
        # 计算每个连通域的面积
        areas = []
        valid_components = 0
        
        for i in range(1, num_features + 1):
            area = np.sum(labeled == i)
            if area >= min_area:  # 过滤掉太小的连通域
                areas.append(area)
                valid_components += 1
        
        if len(areas) == 0:
            return {
                'num_components': 0,
                'total_area': 0,
                'avg_area': 0,
                'max_area': 0,
                'connectivity_score': 0,
                'distribution_score': 0,
                'quality_indicators': {
                    'has_large_component': False,
                    'has_small_components': False,
                }
            }
        
        areas = np.array(areas)
        total_area = np.sum(areas)
        avg_area = np.mean(areas)
        max_area = np.max(areas)
        
        # 质量指标
        quality_indicators = {
            'has_large_component': max_area / total_area > 0.5,  # 有较大的连通域
            'has_small_components': valid_components < 5,  # 不包含多个小连通域
        }
        
        # 增强的连通性评分
        if valid_components > 0:
            # 连通域数量评分（连通域越少分数越高）
            # 使用指数衰减函数，连通域数量越少分数越高
            num_score = max(0, np.exp(-valid_components / 3))  # 连通域数量为0时得分为1，随着数量增加分数递减
            
            
            # 最大连通域占比评分
            max_area_ratio_actual = max_area / total_area if total_area > 0 else 0
            max_area_score = max(0, max_area_ratio_actual)
            
            # 质量指标加分
            quality_bonus = sum(quality_indicators.values()) / len(quality_indicators)
            
            connectivity_score = (num_score + max_area_score + quality_bonus) / 3
        else:
            connectivity_score = 0
        
        # 综合评分
        return {
            'num_components': valid_components,
            'total_area': total_area,
            'avg_area': avg_area,
            'max_area': max_area,
            'connectivity_score': connectivity_score,
            'quality_indicators': quality_indicators
        }
    
    async def get_key_frames(self, segmentation: np.array):
        """
        从分割结果中提取关键帧索引
        
        Args:
            segmentation: 分割结果数组，形状为 (num_frames, height, width)
            num_frames: 要提取的关键帧数量，默认为8
            strategy: 关键帧选择策略，支持 'uniform', 'middle', 'adaptive'
        
        Returns:
            关键帧索引列表或单个索引
        """
        key_frame_seg = []
        for frame in range(segmentation.shape[0]):
            # 基础分割评分（像素数量）
            mask_num = np.sum(segmentation[frame])
            # 连通域分析（使用增强版本）
            connectivity_analysis = self._analyze_connected_components_enhanced(segmentation[frame])
            
            # 综合评分：结合像素数量和连通域质量
            # 权重可以根据实际需求调整
            pixel_weight = 0.1
            connectivity_weight = 0.9
            combined_score = (pixel_weight * mask_num + 
                            connectivity_weight * connectivity_analysis['connectivity_score'] * 1000)
            
            key_frame_seg.append({
                'frame': frame,
                'pixel_count': mask_num,
                'connectivity_score': connectivity_analysis['connectivity_score'],
                'combined_score': combined_score,
                'analysis': connectivity_analysis
            })
        
            # 按综合评分排序选择top_k帧
        top_k = 10
        sorted_frames = sorted(key_frame_seg, key=lambda x: x['combined_score'], reverse=True)
        key_frames_by_connectivity = [item['frame'] for item in sorted_frames[:top_k]]
        best_frame_by_connectivity = key_frames_by_connectivity[0]   
        return best_frame_by_connectivity
        

# ----------------------------- 血管分割模型结束 -----------------------------
class MainVesselModelManager(ModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='main_segmentation', batch_size: int=8):
        super().__init__(input_name, input_type)
        self.model_name = model_name
        self.batch_size = batch_size
        
    async def preprocess(self, image: np.array):
        image = image.reshape(-1, 1, 512, 512)
        image = image.repeat(3, axis=1) 
        image = image.astype(np.float32)
        return image
    

    async def run(self, data: np.ndarray, client, model_version: str='1'):
        if self.model_name == '':
            raise ValueError("模型名称未设置")
        inputs = [httpclient.InferInput(self.input_name, data.shape, self.input_type)]
        inputs[0].set_data_from_numpy(data, binary_data=True)
        result = client.infer(self.model_name, inputs, model_version=model_version)
        result_lad = result.as_numpy("output_lad")
        result_lcx = result.as_numpy("output_lcx")
        return [result_lad, result_lcx]
        
    async def postprocess(self, results: List[np.array]):
        result_lad = np.argmax(results[0], axis=1)[0].astype(np.uint8)
        result_lcx = np.argmax(results[1], axis=1)[0].astype(np.uint8)
        return [result_lad, result_lcx]
    
# ----------------------------- 病灶检测模型开始 -----------------------------
class LesionModelManager(ModelManager):
    """
    病灶检测模型管理器
    提供病灶检测模型预处理、后处理和推理方法
    """
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='lesion_detection'):
        super().__init__(input_name, input_type)
        self.model_name = model_name


    async def preprocess(self, image: List[List[int]]):
        data = self.list2image(image)
        image_float = data.astype(np.float32)
        
        # 归一化参数
        mean = np.array([105.16, 105.16, 105.16], dtype=np.float32)
        std = np.array([39.4, 39.4, 39.4], dtype=np.float32)
        
        # 执行归一化: (image - mean) / std
        normalized = (image_float - mean) / std
        preprocessed = normalized.transpose(2, 0, 1).astype(np.float32)  # 转换维度顺序：HWC -> CHW
        preprocessed = np.expand_dims(preprocessed, axis=0).astype(np.float32)  # 添加批次维度
        # 创建 Triton 输入对象
        inputs = [httpclient.InferInput(self.input_name, preprocessed.shape, self.input_type)]
        inputs[0].set_data_from_numpy(preprocessed, binary_data=True)
        
        return inputs

    async def postprocess(self, results: httpclient.InferResult):
        dets_data = results.as_numpy("dets")
        labels_data = results.as_numpy("labels")
        detections = []
        if dets_data.size > 0 and labels_data.size > 0:
            for i in range(len(labels_data)):
                if i < len(dets_data):
                    detection = {
                        'bbox': dets_data[i].tolist() if len(dets_data[i]) >= 4 else dets_data[i].tolist(),
                        'label': labels_data[i],
                        'confidence': dets_data[i][-1] if len(dets_data[i]) > 4 else 1.0
                    }
                    detections.append(detection)
        return detections
# ----------------------------- 病灶检测模型结束 -----------------------------


# ----------------------------- 命名模型开始 -----------------------------
class SegmentNamedModelManager(ModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='segment_named'):
        super().__init__(input_name, input_type)
        self.model_name = model_name

    async def preprocess(self, image: list[list[int]]):
        """预处理分段模型输入
            预处理说明:
            1. 图像需要从BGR转为RGB
            2. 缩放到512x512
            3. 标准化: (pixel - mean) / std
                mean: [123.675, 116.28, 103.53]
                std: [58.395, 57.12, 57.375]
            4. 转换为CHW格式
            5. 数据类型: float32
        """
        image = self.list2image(image)

        # 3. 标准化: (pixel - mean) / std
        mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
        std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
        image = (image - mean) / std
        # 5. 转换为CHW格式
        image = image.transpose(2, 0, 1)
        # 6. 数据类型: float32
        image = image.astype(np.float32)
        # 7. 添加batch维度
        image = np.expand_dims(image, axis=0)
        inputs = [httpclient.InferInput(self.input_name, image.shape, self.input_type)]
        inputs[0].set_data_from_numpy(image, binary_data=True)
        return inputs

    async def postprocess(self, results: httpclient.InferResult):
        results = results.as_numpy("output").astype(np.uint8)
        return results
    
class F_SegmentNamedModelManager(SegmentNamedModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='segment_f'):
        super().__init__(input_name, input_type)
        self.model_name = model_name
        
    async def postprocess(self, results: httpclient.InferResult):
        results = results.as_numpy("output").astype(np.uint8)
        """
        将预测的掩码值映射到对应的结果值。

        :param pred_mask: 模型的预测掩码。
        :return: 映射后的结果掩码。
        """
        pred_result = np.zeros_like(results, dtype=np.uint8)
        pred_result[results == 1] = 5
        pred_result[results == 2] = 11
        pred_result[results == 3] = 13
        pred_result[results == 4] = 14  # 将14a 14b也暂时认为是14
        pred_result[results == 5] = 12  # 将12a 12b也暂时认为是12
        pred_result[results == 6] = 26  # 非关注节段
        return pred_result

class LS_SegmentNamedModelManager(SegmentNamedModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='segment_ls'):
        super().__init__(input_name, input_type)
        self.model_name = model_name

class RCA_SegmentNamedModelManager(SegmentNamedModelManager):
    def __init__(self, input_name: str='input',input_type: str='FP32', model_name: str='segment_rca'):
        super().__init__(input_name, input_type)
        self.model_name = model_name
# ----------------------------- 命名模型结束 ----------------------------- 