"""
超声心动图数据预处理器

提供图像预处理、质量过滤、数据增强等功能
"""

import cv2
import numpy as np
import torch
from pathlib import Path
from typing import List, Dict, Tuple, Optional, Union, Any, Callable
import albumentations as A
from albumentations.pytorch import ToTensorV2
import logging
from scipy import ndimage
from skimage import filters, measure, morphology
from skimage.metrics import structural_similarity as ssim

from src.utils.logger import get_logger


class ImageQualityFilter:
    """图像质量过滤器"""
    
    def __init__(self, 
                 min_contrast: float = 0.1,
                 min_brightness: float = 0.1,
                 max_blur: float = 0.8,
                 min_sharpness: float = 0.1):
        """
        初始化质量过滤器
        
        Args:
            min_contrast: 最小对比度
            min_brightness: 最小亮度
            max_blur: 最大模糊度
            min_sharpness: 最小锐度
        """
        self.min_contrast = min_contrast
        self.min_brightness = min_brightness
        self.max_blur = max_blur
        self.min_sharpness = min_sharpness
        self.logger = get_logger("ImageQualityFilter")
    
    def calculate_contrast(self, image: np.ndarray) -> float:
        """计算图像对比度"""
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image
        
        # 使用标准差作为对比度指标
        return float(np.std(gray))
    
    def calculate_brightness(self, image: np.ndarray) -> float:
        """计算图像亮度"""
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image
        
        return float(np.mean(gray))
    
    def calculate_blur(self, image: np.ndarray) -> float:
        """计算图像模糊度（拉普拉斯方差）"""
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image
        
        return float(cv2.Laplacian(gray, cv2.CV_64F).var())
    
    def calculate_sharpness(self, image: np.ndarray) -> float:
        """计算图像锐度（Sobel算子）"""
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image
        
        sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
        sharpness = np.sqrt(sobelx**2 + sobely**2)
        return float(np.mean(sharpness))
    
    def is_good_quality(self, image: np.ndarray) -> bool:
        """判断图像质量是否良好"""
        try:
            contrast = self.calculate_contrast(image)
            brightness = self.calculate_brightness(image)
            blur = self.calculate_blur(image)
            sharpness = self.calculate_sharpness(image)
            
            # 归一化到0-1范围
            contrast_norm = contrast / 255.0
            brightness_norm = brightness / 255.0
            blur_norm = blur / 1000.0  # 经验值
            sharpness_norm = sharpness / 100.0  # 经验值
            
            quality_good = (
                contrast_norm >= self.min_contrast and
                brightness_norm >= self.min_brightness and
                blur_norm <= self.max_blur and
                sharpness_norm >= self.min_sharpness
            )
            
            if not quality_good:
                self.logger.debug(f"质量过滤: contrast={contrast_norm:.3f}, "
                                f"brightness={brightness_norm:.3f}, "
                                f"blur={blur_norm:.3f}, "
                                f"sharpness={sharpness_norm:.3f}")
            
            return quality_good
            
        except Exception as e:
            self.logger.warning(f"质量检查失败: {e}")
            return False


class EchoPreprocessor:
    """超声心动图预处理器"""
    
    def __init__(self, 
                 image_size: Tuple[int, int] = (1024, 1024),
                 normalize: bool = True,
                 mean: Tuple[float, float, float] = (0.485, 0.456, 0.406),
                 std: Tuple[float, float, float] = (0.229, 0.224, 0.225),
                 quality_filter: Optional[ImageQualityFilter] = None):
        """
        初始化预处理器
        
        Args:
            image_size: 目标图像尺寸 (height, width)
            normalize: 是否归一化
            mean: 归一化均值
            std: 归一化标准差
            quality_filter: 质量过滤器
        """
        self.image_size = image_size
        self.normalize = normalize
        self.mean = mean
        self.std = std
        self.quality_filter = quality_filter or ImageQualityFilter()
        self.logger = get_logger("EchoPreprocessor")
    
    def preprocess_image(self, image: np.ndarray) -> np.ndarray:
        """
        预处理单张图像
        
        Args:
            image: 输入图像 (H, W, C) 或 (H, W)
            
        Returns:
            预处理后的图像
        """
        # 确保是3通道图像
        if len(image.shape) == 2:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        elif len(image.shape) == 3 and image.shape[2] == 4:
            image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
        
        # 调整尺寸
        image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
        
        # 质量过滤
        if not self.quality_filter.is_good_quality(image):
            self.logger.warning("图像质量不佳，可能影响训练效果")
        
        # 归一化
        if self.normalize:
            image = image.astype(np.float32) / 255.0
            image = (image - np.array(self.mean)) / np.array(self.std)
        
        return image
    
    def preprocess_frames(self, frames: List[np.ndarray]) -> List[np.ndarray]:
        """
        预处理帧序列
        
        Args:
            frames: 帧列表
            
        Returns:
            预处理后的帧列表
        """
        processed_frames = []
        
        for i, frame in enumerate(frames):
            try:
                processed_frame = self.preprocess_image(frame)
                processed_frames.append(processed_frame)
            except Exception as e:
                self.logger.warning(f"预处理第{i}帧失败: {e}")
                continue
        
        return processed_frames
    
    def detect_cardiac_cycle(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """
        检测心脏周期
        
        Args:
            frames: 帧序列
            
        Returns:
            周期信息
        """
        if len(frames) < 10:
            return {'phases': [], 'cycle_length': 0}
        
        # 计算帧间差异
        diffs = []
        for i in range(1, len(frames)):
            if len(frames[i].shape) == 3:
                gray1 = cv2.cvtColor(frames[i-1], cv2.COLOR_BGR2GRAY)
                gray2 = cv2.cvtColor(frames[i], cv2.COLOR_BGR2GRAY)
            else:
                gray1, gray2 = frames[i-1], frames[i]
            
            diff = np.mean(np.abs(gray2.astype(float) - gray1.astype(float)))
            diffs.append(diff)
        
        # 寻找峰值（收缩期）
        from scipy.signal import find_peaks
        peaks, _ = find_peaks(diffs, height=np.mean(diffs), distance=5)
        
        # 计算周期相位
        phases = []
        if len(peaks) > 0:
            cycle_length = len(frames) // len(peaks) if len(peaks) > 0 else len(frames)
            
            for i in range(len(frames)):
                phase = (i % cycle_length) / cycle_length
                phases.append(phase)
        else:
            # 如果没有检测到明显周期，使用线性相位
            for i in range(len(frames)):
                phase = i / len(frames)
                phases.append(phase)
        
        return {
            'phases': phases,
            'cycle_length': len(frames) // max(len(peaks), 1),
            'peaks': peaks.tolist()
        }
    
    def extract_roi(self, image: np.ndarray, 
                   method: str = 'auto') -> Tuple[np.ndarray, Tuple[int, int, int, int]]:
        """
        提取感兴趣区域
        
        Args:
            image: 输入图像
            method: 提取方法 ('auto', 'center', 'largest_connected')
            
        Returns:
            (ROI图像, 边界框)
        """
        if method == 'center':
            # 中心裁剪
            h, w = image.shape[:2]
            center_h, center_w = h // 2, w // 2
            crop_h, crop_w = min(h, w) // 2, min(h, w) // 2
            
            x1 = max(0, center_w - crop_w // 2)
            y1 = max(0, center_h - crop_h // 2)
            x2 = min(w, center_w + crop_w // 2)
            y2 = min(h, center_h + crop_h // 2)
            
            roi = image[y1:y2, x1:x2]
            bbox = (x1, y1, x2, y2)
            
        elif method == 'largest_connected':
            # 最大连通区域
            if len(image.shape) == 3:
                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            else:
                gray = image
            
            # 二值化
            _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            
            # 查找最大连通区域
            contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            if contours:
                largest_contour = max(contours, key=cv2.contourArea)
                x, y, w, h = cv2.boundingRect(largest_contour)
                roi = image[y:y+h, x:x+w]
                bbox = (x, y, x+w, y+h)
            else:
                roi = image
                bbox = (0, 0, image.shape[1], image.shape[0])
        
        else:  # auto
            # 自动选择最佳方法
            try:
                roi, bbox = self.extract_roi(image, 'largest_connected')
            except:
                roi, bbox = self.extract_roi(image, 'center')
        
        return roi, bbox


class EchoAugmentation:
    """超声心动图数据增强"""
    
    def __init__(self, 
                 image_size: Tuple[int, int] = (1024, 1024),
                 training: bool = True):
        """
        初始化数据增强
        
        Args:
            image_size: 图像尺寸
            training: 是否为训练模式
        """
        self.image_size = image_size
        self.training = training
        self.logger = get_logger("EchoAugmentation")
        
        # 训练时的增强
        if training:
            self.transform = A.Compose([
                A.Resize(height=image_size[0], width=image_size[1]),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.3),
                A.Rotate(limit=15, p=0.5),
                A.RandomScale(scale_limit=0.2, p=0.5),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                    p=0.5
                ),
                A.GaussNoise(var_limit=(10.0, 50.0), p=0.3),
                A.Blur(blur_limit=3, p=0.2),
                A.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                ),
                ToTensorV2()
            ])
        else:
            # 验证/测试时的增强
            self.transform = A.Compose([
                A.Resize(height=image_size[0], width=image_size[1]),
                A.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225]
                ),
                ToTensorV2()
            ])
    
    def __call__(self, image: np.ndarray, mask: Optional[np.ndarray] = None) -> Dict[str, torch.Tensor]:
        """
        应用数据增强
        
        Args:
            image: 输入图像
            mask: 输入掩码（可选）
            
        Returns:
            增强后的数据
        """
        if mask is not None:
            transformed = self.transform(image=image, mask=mask)
            return {
                'image': transformed['image'],
                'mask': transformed['mask']
            }
        else:
            transformed = self.transform(image=image)
            return {
                'image': transformed['image']
            }


def create_preprocessing_pipeline(config: Dict[str, Any]) -> EchoPreprocessor:
    """
    创建预处理管道
    
    Args:
        config: 配置字典
        
    Returns:
        预处理器
    """
    image_size = config.get('image_size', (1024, 1024))
    normalize = config.get('normalize', True)
    mean = config.get('mean', (0.485, 0.456, 0.406))
    std = config.get('std', (0.229, 0.224, 0.225))
    
    # 质量过滤器配置
    quality_config = config.get('quality_filter', {})
    quality_filter = ImageQualityFilter(
        min_contrast=quality_config.get('min_contrast', 0.1),
        min_brightness=quality_config.get('min_brightness', 0.1),
        max_blur=quality_config.get('max_blur', 0.8),
        min_sharpness=quality_config.get('min_sharpness', 0.1)
    )
    
    return EchoPreprocessor(
        image_size=image_size,
        normalize=normalize,
        mean=mean,
        std=std,
        quality_filter=quality_filter
    )


def test_preprocessor():
    """测试预处理器"""
    logger = get_logger("TestPreprocessor")
    
    # 创建测试图像
    test_image = np.random.randint(0, 255, (512, 512, 3), dtype=np.uint8)
    
    # 测试预处理器
    preprocessor = EchoPreprocessor()
    processed = preprocessor.preprocess_image(test_image)
    
    logger.info(f"原始图像尺寸: {test_image.shape}")
    logger.info(f"处理后图像尺寸: {processed.shape}")
    logger.info(f"处理后图像范围: [{processed.min():.3f}, {processed.max():.3f}]")
    
    # 测试质量过滤器
    quality_filter = ImageQualityFilter()
    is_good = quality_filter.is_good_quality(test_image)
    logger.info(f"图像质量: {'良好' if is_good else '不佳'}")


if __name__ == "__main__":
    test_preprocessor()
