#!/usr/bin/env python3
"""
结果整合模块

整合所有帧的分割结果，生成分割视频、最优分割图、置信度评估和LVEF等下游任务结果
"""

import torch
import numpy as np
import cv2
from typing import Dict, List, Tuple, Optional, Any, Union
from pathlib import Path
import json

import sys
sys.path.append(str(Path(__file__).parent.parent.parent))
from utils.logger import get_logger


class ResultIntegrator:
    """结果整合器"""
    
    def __init__(self, device: str = "cuda"):
        """
        初始化结果整合器
        
        Args:
            device: 设备
        """
        self.device = device
        self.logger = get_logger("ResultIntegrator")
    
    def integrate_video_results(self,
                               frame_results: List[torch.Tensor],
                               output_path: Optional[str] = None) -> np.ndarray:
        """
        整合所有帧的分割结果，生成分割视频
        
        Args:
            frame_results: 所有帧的分割结果列表 [(B, 1, H, W), ...]
            output_path: 输出视频路径（可选）
            
        Returns:
            分割视频数组 (T, H, W)
        """
        if len(frame_results) == 0:
            self.logger.warning("没有分割结果")
            return np.array([])
        
        # 转换为numpy数组
        video_masks = []
        for mask in frame_results:
            if isinstance(mask, torch.Tensor):
                mask_np = mask.squeeze().cpu().numpy()
            else:
                mask_np = mask
            video_masks.append(mask_np)
        
        video_array = np.stack(video_masks, axis=0)  # (T, H, W)
        
        # 如果指定了输出路径，保存视频
        if output_path:
            self._save_video(video_array, output_path)
        
        return video_array
    
    def extract_best_segmentation(self,
                                  frame_results: List[torch.Tensor],
                                  confidences: List[torch.Tensor]) -> Tuple[torch.Tensor, int, float]:
        """
        提取最优分割图（置信度最高的帧）
        
        Args:
            frame_results: 所有帧的分割结果列表
            confidences: 所有帧的置信度列表
            
        Returns:
            (最优分割图, 帧索引, 置信度)
        """
        if len(frame_results) == 0:
            self.logger.warning("没有分割结果")
            return torch.zeros(1, 1, 224, 224, device=self.device), 0, 0.0
        
        # 找到置信度最高的帧
        confidence_values = [c.item() if isinstance(c, torch.Tensor) else c 
                            for c in confidences]
        best_frame_idx = int(np.argmax(confidence_values))
        best_confidence = confidence_values[best_frame_idx]
        best_mask = frame_results[best_frame_idx]
        
        self.logger.info(f"最优分割图：帧{best_frame_idx}，置信度{best_confidence:.4f}")
        
        return best_mask, best_frame_idx, best_confidence
    
    def compute_confidence_scores(self,
                                 frame_results: List[torch.Tensor],
                                 method: str = "area") -> List[float]:
        """
        计算每帧的置信度分数
        
        Args:
            frame_results: 所有帧的分割结果列表
            method: 置信度计算方法 ('area', 'consistency', 'quality')
            
        Returns:
            置信度分数列表
        """
        confidences = []
        
        for i, mask in enumerate(frame_results):
            if method == "area":
                # 基于分割面积的一致性
                area = mask.sum().item()
                max_area = mask.numel()
                confidence = area / max_area if max_area > 0 else 0.0
            
            elif method == "consistency":
                # 基于与前一帧的一致性
                if i > 0:
                    prev_mask = frame_results[i-1]
                    overlap = (mask * prev_mask).sum().item()
                    union = ((mask + prev_mask) > 0).sum().item()
                    confidence = overlap / union if union > 0 else 0.0
                else:
                    confidence = 0.5  # 第一帧默认置信度
            
            elif method == "quality":
                # 基于分割质量（简单实现）
                area = mask.sum().item()
                perimeter = self._compute_perimeter(mask)
                confidence = area / (perimeter + 1e-8)
            else:
                confidence = 0.5
            
            confidences.append(confidence)
        
        return confidences
    
    def _compute_perimeter(self, mask: torch.Tensor) -> float:
        """计算掩码周长"""
        mask_np = mask.squeeze().cpu().numpy().astype(np.uint8)
        if mask_np.sum() == 0:
            return 0.0
        
        contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) == 0:
            return 0.0
        
        perimeter = sum(cv2.arcLength(contour, True) for contour in contours)
        return float(perimeter)
    
    def compute_lvef(self,
                     ed_mask: torch.Tensor,
                     es_mask: torch.Tensor,
                     method: str = "smod",
                     view_type: str = "2CH") -> Dict[str, float]:
        """
        计算左心室射血分数（LVEF）
        
        Args:
            ed_mask: 舒张末期（ED）分割掩码 (B, 1, H, W) 或 (H, W)
            es_mask: 收缩末期（ES）分割掩码 (B, 1, H, W) 或 (H, W)
            method: 计算方法 ('smod'=Simpson双平面法, 'single'=单平面法)
            view_type: 视图类型 ('2CH', '4CH', 'both')
            
        Returns:
            LVEF相关指标字典
        """
        # 转换为numpy数组
        if isinstance(ed_mask, torch.Tensor):
            ed_mask_np = ed_mask.squeeze().cpu().numpy()
        else:
            ed_mask_np = ed_mask
        
        if isinstance(es_mask, torch.Tensor):
            es_mask_np = es_mask.squeeze().cpu().numpy()
        else:
            es_mask_np = es_mask
        
        # 计算面积
        ed_area = np.sum(ed_mask_np > 0.5)
        es_area = np.sum(es_mask_np > 0.5)
        
        if method == "smod":
            # Simpson双平面法（需要2CH和4CH两个视图）
            # 这里简化处理，假设只有一个视图
            # 实际应该使用两个视图的面积计算体积
            ed_volume = self._area_to_volume_smod(ed_area, view_type)
            es_volume = self._area_to_volume_smod(es_area, view_type)
        else:
            # 单平面法
            ed_volume = self._area_to_volume_single(ed_area)
            es_volume = self._area_to_volume_single(es_area)
        
        # 计算LVEF
        if ed_volume > 0:
            lvef = ((ed_volume - es_volume) / ed_volume) * 100
        else:
            lvef = 0.0
        
        return {
            'lvef': float(lvef),
            'ed_volume': float(ed_volume),
            'es_volume': float(es_volume),
            'ed_area': float(ed_area),
            'es_area': float(es_area),
            'stroke_volume': float(ed_volume - es_volume)
        }
    
    def _area_to_volume_smod(self, area: float, view_type: str) -> float:
        """
        使用Simpson双平面法从面积计算体积
        
        Args:
            area: 面积（像素）
            view_type: 视图类型
            
        Returns:
            体积（ml）
        """
        # 简化计算：假设像素大小为1mm²
        # 实际应该根据图像的实际物理尺寸计算
        # Simpson双平面法：V = (π/4) * Σ(D1 * D2 * h)
        # 这里简化为一维计算
        pixel_size_mm = 1.0  # 假设1像素=1mm
        area_mm2 = area * (pixel_size_mm ** 2)
        
        # 简化的体积估算（基于面积）
        # 实际应该使用多个切片的面积
        volume_ml = area_mm2 * 0.1  # 简化转换
        
        return volume_ml
    
    def _area_to_volume_single(self, area: float) -> float:
        """
        使用单平面法从面积计算体积
        
        Args:
            area: 面积（像素）
            
        Returns:
            体积（ml）
        """
        pixel_size_mm = 1.0
        area_mm2 = area * (pixel_size_mm ** 2)
        volume_ml = area_mm2 * 0.1  # 简化转换
        
        return volume_ml
    
    def compute_lvef_metrics(self,
                            predicted_lvef: List[float],
                            ground_truth_lvef: List[float]) -> Dict[str, float]:
        """
        计算LVEF评估指标
        
        Args:
            predicted_lvef: 预测的LVEF值列表
            ground_truth_lvef: 真实LVEF值列表
            
        Returns:
            评估指标字典（corr, bias, std）
        """
        if len(predicted_lvef) != len(ground_truth_lvef):
            self.logger.warning("预测和真实值数量不匹配")
            return {'corr': 0.0, 'bias': 0.0, 'std': 0.0}
        
        pred_array = np.array(predicted_lvef)
        gt_array = np.array(ground_truth_lvef)
        
        # 皮尔逊相关系数
        corr = np.corrcoef(pred_array, gt_array)[0, 1]
        
        # 平均偏差
        bias = np.mean(pred_array - gt_array)
        
        # 标准误差
        std = np.std(pred_array - gt_array)
        
        return {
            'corr': float(corr),
            'bias': float(bias),
            'std': float(std),
            'mae': float(np.mean(np.abs(pred_array - gt_array))),
            'rmse': float(np.sqrt(np.mean((pred_array - gt_array) ** 2)))
        }
    
    def _save_video(self, video_array: np.ndarray, output_path: str):
        """
        保存分割视频
        
        Args:
            video_array: 视频数组 (T, H, W)
            output_path: 输出路径
        """
        T, H, W = video_array.shape
        
        # 转换为uint8
        video_uint8 = (video_array * 255).astype(np.uint8)
        
        # 创建视频写入器
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, 30.0, (W, H), isColor=False)
        
        for frame in video_uint8:
            out.write(frame)
        
        out.release()
        self.logger.info(f"分割视频已保存: {output_path}")
    
    def generate_report(self,
                       results: Dict[str, Any],
                       output_dir: str,
                       task_name: str = "segmentation") -> Dict[str, str]:
        """
        生成详细报告
        
        Args:
            results: 结果字典
            output_dir: 输出目录
            task_name: 任务名称
            
        Returns:
            报告文件路径字典
        """
        output_path = Path(output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        
        report_files = {}
        
        # 方法说明
        method_file = output_path / "method.md"
        with open(method_file, 'w', encoding='utf-8') as f:
            f.write(f"# {task_name} 方法说明\n\n")
            f.write("## 使用的方法\n\n")
            f.write("详细描述使用的方法、算法、参数等...\n")
        report_files['method'] = str(method_file)
        
        # 处理逻辑
        logic_file = output_path / "processing_logic.md"
        with open(logic_file, 'w', encoding='utf-8') as f:
            f.write(f"# {task_name} 处理逻辑\n\n")
            f.write("## 处理流程\n\n")
            f.write("说明数据处理的流程、步骤、决策逻辑等...\n")
        report_files['processing_logic'] = str(logic_file)
        
        # 结果解释
        explanation_file = output_path / "results_explanation.md"
        with open(explanation_file, 'w', encoding='utf-8') as f:
            f.write(f"# {task_name} 结果解释\n\n")
            f.write("## 结果说明\n\n")
            f.write("详细解释每个结果的含义...\n")
        report_files['results_explanation'] = str(explanation_file)
        
        # 推论
        inference_file = output_path / "inference.md"
        with open(inference_file, 'w', encoding='utf-8') as f:
            f.write(f"# {task_name} 推论\n\n")
            f.write("## 结论\n\n")
            f.write("从结果中得出的推论和结论...\n")
        report_files['inference'] = str(inference_file)
        
        # 保存结果数据
        results_file = output_path / "results.json"
        results_serializable = self._serialize_results(results)
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump(results_serializable, f, indent=2, ensure_ascii=False)
        report_files['results'] = str(results_file)
        
        self.logger.info(f"报告已生成: {output_dir}")
        
        return report_files
    
    def _serialize_results(self, results: Dict[str, Any]) -> Dict[str, Any]:
        """序列化结果（将tensor转换为可序列化格式）"""
        serializable = {}
        for key, value in results.items():
            if isinstance(value, torch.Tensor):
                serializable[key] = value.cpu().numpy().tolist()
            elif isinstance(value, np.ndarray):
                serializable[key] = value.tolist()
            elif isinstance(value, (list, tuple)):
                serializable[key] = [
                    v.cpu().numpy().tolist() if isinstance(v, torch.Tensor) 
                    else v.tolist() if isinstance(v, np.ndarray) else v
                    for v in value
                ]
            else:
                serializable[key] = value
        return serializable
