"""自定义PPG处理Pipeline

基于原有ppg_batch_processor.py代码实现的PPG处理Pipeline，
去除文件依赖，采用字典和numpy等信号输入类型。

主要处理步骤：
1. 异常值检测和替换
2. Savitzky-Golay平滑滤波
3. 趋势去除
4. 带通滤波
5. 信号质量分段检测
6. 自适应峰值检测
7. HRV分析
8. 信号质量评估

作者: PPG算法包开发团队
版本: 2.0.0
"""

import numpy as np
from typing import Dict, Any, Optional, List, Tuple
import logging
from scipy.signal import find_peaks, welch, butter, filtfilt, savgol_filter

try:
    from ..core.base_pipeline import BasePipeline
    from ..core.data_types import PPGSignal, PPGResults, QualityResults
    from ..config.pipeline_config import PipelineConfig
except ImportError:
    # 处理直接运行时的导入问题
    import sys
    from pathlib import Path
    sys.path.append(str(Path(__file__).parent.parent))
    from core.base_pipeline import BasePipeline
    from core.data_types import PPGSignal, PPGResults, QualityResults
    from config.pipeline_config import PipelineConfig

logger = logging.getLogger(__name__)


class CustomPipeline(BasePipeline):
    """自定义PPG处理Pipeline
    
    基于原有批处理代码的优化实现，支持配置化参数调整
    """
    
    def __init__(self, config: PipelineConfig):
        """初始化自定义Pipeline
        
        参数:
            config: Pipeline配置
        """
        super().__init__(config)
        logger.info(f"初始化自定义Pipeline: {self.name}")
    
    def process(self, signal: PPGSignal) -> PPGResults:
        """处理PPG信号的主要入口
        
        参数:
            signal: 输入PPG信号
            
        返回:
            PPGResults: 完整的处理结果
        """
        logger.info(f"开始处理PPG信号 - 长度: {signal.length}, 采样率: {signal.sampling_rate}Hz")
        
        # 更新采样率
        if signal.sampling_rate != self.sampling_rate:
            logger.warning(f"信号采样率({signal.sampling_rate})与配置不符({self.sampling_rate})，使用信号采样率")
            self.sampling_rate = signal.sampling_rate
        
        # 1. 预处理
        preprocessing_results = self.preprocess(signal.data)
        processed_signal = preprocessing_results['filtered']
        
        # 2. 信号质量分段检测
        quality_segments = self._detect_quality_segments(processed_signal)
        
        # 3. 峰值检测
        peaks, peak_properties = self.detect_peaks(processed_signal, quality_segments)
        
        # 4. 计算心率
        heart_rates = self.calculate_heart_rates(peaks)
        mean_heart_rate = float(np.mean(heart_rates)) if heart_rates is not None else 0.0
        
        # 5. HRV分析
        hrv_results = self.calculate_hrv(peaks)
        
        # 6. 信号质量评估
        quality_results = self.assess_quality(signal.data, processed_signal, peaks)
        if quality_results:
            quality_results.quality_segments = quality_segments
        
        # 构建结果
        results = PPGResults(
            original_signal=signal.data,
            processed_signal=processed_signal,
            peaks=peaks,
            heart_rates=heart_rates,
            mean_heart_rate=mean_heart_rate,
            hrv_results=hrv_results,
            quality_results=quality_results,
            processing_info=self.get_processing_info(),
            pipeline_name=self.name
        )
        
        logger.info(f"处理完成 - 检测峰值: {len(peaks)}, 平均心率: {mean_heart_rate:.1f} BPM")
        return results
    
    def preprocess(self, signal_data: np.ndarray) -> Dict[str, np.ndarray]:
        """预处理步骤
        
        参数:
            signal_data: 原始信号数据
            
        返回:
            Dict: 包含各预处理步骤结果的字典
        """
        logger.info("开始预处理步骤")
        
        results = {'original': signal_data.copy()}
        current_signal = signal_data.copy()
        
        # 步骤1: 异常值检测和替换
        if self.config.preprocessing["outlier_detection"]["enabled"]:
            current_signal = self._remove_outliers(current_signal)
            results['cleaned'] = current_signal.copy()
            self._log_processing_step("outlier_removal", {
                "method": self.config.preprocessing["outlier_detection"]["method"],
                "outliers_detected": np.sum(signal_data != current_signal)
            })
        
        # 步骤2: Savitzky-Golay平滑滤波
        if self.config.preprocessing["smoothing"]["enabled"]:
            current_signal = self._smooth_signal(current_signal)
            results['smoothed'] = current_signal.copy()
            self._log_processing_step("smoothing", {
                "method": self.config.preprocessing["smoothing"]["method"],
                "window_length": self.config.preprocessing["smoothing"]["window_length"]
            })
        
        # 步骤3: 趋势去除
        if self.config.preprocessing["detrending"]["enabled"]:
            current_signal = self._detrend_signal(current_signal)
            results['detrended'] = current_signal.copy()
            self._log_processing_step("detrending", {
                "method": self.config.preprocessing["detrending"]["method"],
                "order": self.config.preprocessing["detrending"]["order"]
            })
        
        # 步骤4: 带通滤波
        if self.config.preprocessing["filtering"]["enabled"]:
            current_signal = self._filter_signal(current_signal)
            results['filtered'] = current_signal.copy()
            self._log_processing_step("filtering", {
                "method": self.config.preprocessing["filtering"]["method"],
                "low_cutoff": self.config.preprocessing["filtering"]["low_cutoff"],
                "high_cutoff": self.config.preprocessing["filtering"]["high_cutoff"]
            })
        else:
            results['filtered'] = current_signal.copy()
        
        logger.info("预处理完成")
        return results
    
    def detect_peaks(self, processed_signal: np.ndarray, 
                    quality_segments: Optional[List[Tuple[int, int]]] = None) -> Tuple[np.ndarray, Dict]:
        """峰值检测步骤
        
        参数:
            processed_signal: 预处理后的信号
            quality_segments: 高质量信号段（可选）
            
        返回:
            tuple: (peaks, properties) 峰值位置和属性
        """
        logger.info("开始峰值检测")
        
        # 自适应参数设置
        height_threshold = np.percentile(processed_signal, self.config.peak_detection["height_percentile"])
        distance_min = self.config.peak_detection["distance_min"]
        prominence_threshold = np.std(processed_signal) * self.config.peak_detection["prominence_factor"]
        width_min = self.config.peak_detection["width_min"]
        
        # 在整个信号中检测峰值
        all_peaks, all_properties = find_peaks(
            processed_signal,
            height=height_threshold,
            distance=distance_min,
            prominence=prominence_threshold,
            width=width_min
        )
        
        # 如果提供了质量段，优先使用质量段内的峰值
        if quality_segments:
            quality_mask = np.zeros(len(processed_signal), dtype=bool)
            for start, end in quality_segments:
                quality_mask[start:end] = True
            
            # 过滤出高质量段内的峰值
            quality_peaks = []
            quality_properties = {}
            
            for i, peak in enumerate(all_peaks):
                if quality_mask[peak]:
                    quality_peaks.append(peak)
                    for key in all_properties:
                        if key not in quality_properties:
                            quality_properties[key] = []
                        quality_properties[key].append(all_properties[key][i])
            
            quality_peaks = np.array(quality_peaks)
            for key in quality_properties:
                quality_properties[key] = np.array(quality_properties[key])
            
            # 如果高质量段内峰值足够，使用这些峰值
            if len(quality_peaks) >= 3:
                peaks = quality_peaks
                properties = quality_properties
                self._log_processing_step("peak_detection", {
                    "total_peaks": len(all_peaks),
                    "quality_peaks": len(quality_peaks),
                    "used_quality_segments": True
                })
            else:
                # 否则使用所有峰值
                peaks = all_peaks
                properties = all_properties
                self._log_processing_step("peak_detection", {
                    "total_peaks": len(all_peaks),
                    "quality_peaks": len(quality_peaks),
                    "used_quality_segments": False,
                    "reason": "insufficient_quality_peaks"
                })
        else:
            peaks = all_peaks
            properties = all_properties
            self._log_processing_step("peak_detection", {
                "total_peaks": len(all_peaks),
                "used_quality_segments": False
            })
        
        # 如果峰值仍然不足，尝试更宽松的参数
        if len(peaks) < 3:
            logger.warning("峰值数量不足，尝试更宽松的检测参数")
            loose_peaks, loose_properties = find_peaks(
                processed_signal,
                height=np.percentile(processed_signal, 30),
                distance=max(6, distance_min - 2),
                prominence=prominence_threshold * 0.5,
                width=max(1, width_min - 1)
            )
            
            if len(loose_peaks) > len(peaks):
                peaks = loose_peaks
                properties = loose_properties
                self._log_processing_step("peak_detection_fallback", {
                    "fallback_peaks": len(loose_peaks),
                    "reason": "insufficient_peaks"
                })
        
        logger.info(f"峰值检测完成 - 检测到 {len(peaks)} 个峰值")
        return peaks, properties

    def calculate_heart_rates(self, peaks: np.ndarray) -> Optional[np.ndarray]:
        """计算瞬时心率（管道自有实现）"""
        if len(peaks) < 2:
            logger.warning("峰值数量不足，无法计算心率")
            return None

        rr_intervals = np.diff(peaks) / self.sampling_rate * 1000
        heart_rates = 60000 / rr_intervals

        hr_range = self.config.peak_detection.get("heart_rate_range", [40, 150])
        valid_mask = (heart_rates >= hr_range[0]) & (heart_rates <= hr_range[1])

        if np.sum(valid_mask) == 0:
            fallback_range = self.config.peak_detection.get("fallback_range", [30, 200])
            valid_mask = (heart_rates >= fallback_range[0]) & (heart_rates <= fallback_range[1])
            if np.sum(valid_mask) == 0:
                logger.warning("没有有效的心率数据")
                return None

        return heart_rates[valid_mask]

    def calculate_hrv(self, peaks: np.ndarray) -> Optional[QualityResults]:
        """计算HRV指标（管道自有实现，与批处理脚本对齐）"""
        if not self.config.hrv_analysis.get("enabled", True):
            return None

        if len(peaks) < 3:
            logger.warning("峰值数量不足，无法计算HRV")
            return None

        rr_intervals = np.diff(peaks) / self.sampling_rate * 1000

        if self.config.hrv_analysis["outlier_removal"]["enabled"]:
            threshold = self.config.hrv_analysis["outlier_removal"]["threshold"]
            mean_rr = np.mean(rr_intervals)
            std_rr = np.std(rr_intervals)
            if std_rr > 0:
                valid_mask = np.abs(rr_intervals - mean_rr) <= threshold * std_rr
                rr_intervals = rr_intervals[valid_mask]

        if len(rr_intervals) < 2:
            logger.warning("清理后RR间隔数据不足")
            return None

        mean_nn = float(np.mean(rr_intervals))
        sdnn = float(np.std(rr_intervals, ddof=1))
        rmssd = float(np.sqrt(np.mean(np.diff(rr_intervals) ** 2)))

        diff_rr = np.diff(rr_intervals)
        nn50 = np.sum(np.abs(diff_rr) > 50)
        nn20 = np.sum(np.abs(diff_rr) > 20)
        pnn50 = float(nn50 / len(rr_intervals) * 100)
        pnn20 = float(nn20 / len(rr_intervals) * 100)

        hist, _ = np.histogram(rr_intervals, bins=20)
        triangular_index = float(len(rr_intervals) / np.max(hist)) if np.max(hist) > 0 else 0.0

        vlf_power = lf_power = hf_power = total_power = 0.0
        lf_nu = hf_nu = lf_hf_ratio = 0.0

        if self.config.hrv_analysis["frequency_analysis"]["enabled"]:
            try:
                from scipy.signal import welch
                fa_cfg = self.config.hrv_analysis["frequency_analysis"]
                resampling_rate = fa_cfg.get("resampling_rate", 4.0)
                t_rr = np.cumsum(rr_intervals) / 1000
                t_resampled = np.arange(t_rr[0], t_rr[-1], 1 / resampling_rate)
                rr_resampled = np.interp(t_resampled, t_rr, rr_intervals)

                if fa_cfg.get("detrend_before_psd", True):
                    rr_detrended = rr_resampled - np.polyval(np.polyfit(t_resampled, rr_resampled, 1), t_resampled)
                    series_for_psd = rr_detrended
                else:
                    series_for_psd = rr_resampled

                configured_nperseg = fa_cfg.get("welch_nperseg", None)
                default_nperseg = max(8, len(series_for_psd) // 4)
                nperseg = int(configured_nperseg) if configured_nperseg else default_nperseg
                overlap_ratio = float(fa_cfg.get("welch_overlap", 0.5))
                noverlap = int(max(0, min(overlap_ratio, 0.99)) * nperseg)
                freqs, psd = welch(series_for_psd, fs=resampling_rate, nperseg=nperseg, noverlap=noverlap)

                vlf_band = self.config.hrv_analysis["frequency_analysis"]["vlf_band"]
                lf_band = self.config.hrv_analysis["frequency_analysis"]["lf_band"]
                hf_band = self.config.hrv_analysis["frequency_analysis"]["hf_band"]

                vlf_mask = (freqs >= vlf_band[0]) & (freqs < vlf_band[1])
                lf_mask = (freqs >= lf_band[0]) & (freqs < lf_band[1])
                hf_mask = (freqs >= hf_band[0]) & (freqs < hf_band[1])

                vlf_power = float(np.trapz(psd[vlf_mask], freqs[vlf_mask]))
                lf_power = float(np.trapz(psd[lf_mask], freqs[lf_mask]))
                hf_power = float(np.trapz(psd[hf_mask], freqs[hf_mask]))
                total_power = vlf_power + lf_power + hf_power

                total_hf_lf_power = lf_power + hf_power
                if total_hf_lf_power > 0:
                    lf_nu = float(lf_power / total_hf_lf_power * 100)
                    hf_nu = float(hf_power / total_hf_lf_power * 100)

                lf_hf_ratio = float(lf_power / hf_power) if hf_power > 0 else 0.0
            except Exception as e:
                logger.warning(f"频域分析失败: {e}")

        from ..core.data_types import HRVResults
        return HRVResults(
            mean_nn=mean_nn,
            sdnn=sdnn,
            rmssd=rmssd,
            pnn50=pnn50,
            pnn20=pnn20,
            triangular_index=triangular_index,
            vlf_power=vlf_power,
            lf_power=lf_power,
            hf_power=hf_power,
            total_power=total_power,
            lf_nu=lf_nu,
            hf_nu=hf_nu,
            lf_hf_ratio=lf_hf_ratio,
            rr_intervals=rr_intervals
        )
    
    def _remove_outliers(self, signal_data: np.ndarray) -> np.ndarray:
        """异常值检测和替换"""
        window_size = self.config.preprocessing["outlier_detection"]["window_size"]
        threshold_percentile = self.config.preprocessing["outlier_detection"]["threshold_percentile"]
        
        # 中位数滤波
        median_filtered = []
        for i in range(len(signal_data)):
            start_idx = max(0, i - window_size // 2)
            end_idx = min(len(signal_data), i + window_size // 2 + 1)
            window_data = signal_data[start_idx:end_idx]
            median_filtered.append(np.median(window_data))
        
        median_filtered = np.array(median_filtered)
        
        # 检测异常值
        diff_from_median = np.abs(signal_data - median_filtered)
        outlier_threshold = np.percentile(diff_from_median, threshold_percentile)
        outlier_mask = diff_from_median > outlier_threshold
        
        # 替换异常值
        cleaned_signal = signal_data.copy()
        cleaned_signal[outlier_mask] = median_filtered[outlier_mask]
        
        return cleaned_signal
    
    def _smooth_signal(self, signal_data: np.ndarray) -> np.ndarray:
        """Savitzky-Golay平滑滤波"""
        window_length = self.config.preprocessing["smoothing"]["window_length"]
        polynomial_order = self.config.preprocessing["smoothing"]["polynomial_order"]
        
        # 确保窗口长度为奇数且不超过信号长度
        window_length = min(window_length, len(signal_data))
        if window_length % 2 == 0:
            window_length -= 1
        
        if window_length < polynomial_order + 1:
            window_length = polynomial_order + 2
            if window_length % 2 == 0:
                window_length += 1
        
        return savgol_filter(signal_data, window_length, polynomial_order)
    
    def _detrend_signal(self, signal_data: np.ndarray) -> np.ndarray:
        """趋势去除"""
        order = self.config.preprocessing["detrending"]["order"]
        x = np.arange(len(signal_data))
        trend = np.polyval(np.polyfit(x, signal_data, order), x)
        return signal_data - trend
    
    def _filter_signal(self, signal_data: np.ndarray) -> np.ndarray:
        """带通滤波"""
        order = self.config.preprocessing["filtering"]["order"]
        low_cutoff = self.config.preprocessing["filtering"]["low_cutoff"]
        high_cutoff = self.config.preprocessing["filtering"]["high_cutoff"]
        
        nyquist = self.sampling_rate / 2
        low = low_cutoff / nyquist
        high = high_cutoff / nyquist
        
        # 确保截止频率在有效范围内
        low = max(0.001, min(low, 0.99))
        high = max(low + 0.001, min(high, 0.99))
        
        b, a = butter(order, [low, high], btype='band')
        return filtfilt(b, a, signal_data)
    
    def _detect_quality_segments(self, processed_signal: np.ndarray) -> List[Tuple[int, int]]:
        """检测信号质量分段"""
        if not self.config.quality_assessment["segment_analysis"]["enabled"]:
            return []
        
        window_size = int(self.sampling_rate * self.config.quality_assessment["segment_analysis"]["window_size"])
        threshold_percentile = self.config.quality_assessment["segment_analysis"]["quality_threshold_percentile"]
        min_duration = int(self.sampling_rate * self.config.quality_assessment["segment_analysis"]["min_segment_duration"])
        
        # 计算局部信号质量指标
        quality_scores = []
        for i in range(window_size, len(processed_signal)):
            window_data = processed_signal[i-window_size:i]
            
            # 计算局部质量指标
            local_std = np.std(window_data)
            local_mean = np.mean(np.abs(window_data))
            local_snr = local_mean / (local_std + 1e-6)  # 避免除零
            
            quality_scores.append(local_snr * 10)  # 放大评分
        
        quality_scores = np.array(quality_scores)
        
        # 确定质量阈值
        quality_threshold = np.percentile(quality_scores, threshold_percentile)
        
        # 标记高质量段
        high_quality_mask = quality_scores >= quality_threshold
        
        # 找到连续的高质量段
        quality_segments = []
        current_start = None
        
        for i in range(len(high_quality_mask)):
            if high_quality_mask[i] and current_start is None:
                current_start = i + window_size
            elif not high_quality_mask[i] and current_start is not None:
                if i + window_size - current_start >= min_duration:
                    quality_segments.append((current_start, i + window_size))
                current_start = None
        
        # 处理最后一个段
        if current_start is not None and len(processed_signal) - current_start >= min_duration:
            quality_segments.append((current_start, len(processed_signal)))
        
        self._log_processing_step("quality_segmentation", {
            "segments_count": len(quality_segments),
            "total_quality_points": sum(end - start for start, end in quality_segments),
            "quality_ratio": sum(end - start for start, end in quality_segments) / len(processed_signal)
        })
        
        return quality_segments

    def assess_quality(self, original_signal: np.ndarray, processed_signal: np.ndarray, 
                      peaks: np.ndarray) -> Optional[QualityResults]:
        """评估信号质量
        
        参数:
            original_signal: 原始信号
            processed_signal: 处理后信号
            peaks: 检测到的峰值
            
        返回:
            Optional[QualityResults]: 质量评估结果
        """
        if not self.config.quality_assessment.get("enabled", True):
            return None
        
        try:
            from scipy.signal import welch
            
            # 信噪比计算
            signal_power = np.var(processed_signal)
            noise_power = np.var(original_signal - processed_signal)
            snr = float(10 * np.log10(signal_power / noise_power)) if noise_power > 0 else float('inf')
            
            # 频率分析
            freqs, psd = welch(processed_signal, fs=self.sampling_rate, 
                              nperseg=min(256, len(processed_signal)//4))
            peak_freq_idx = np.argmax(psd[1:]) + 1
            peak_freq = float(freqs[peak_freq_idx])
            peak_hr = float(peak_freq * 60)
            
            # 心率频段功率比
            hr_range = self.config.quality_assessment["frequency_analysis"]["hr_frequency_range"]
            hr_mask = (freqs >= hr_range[0]) & (freqs <= hr_range[1])
            hr_power = np.trapz(psd[hr_mask], freqs[hr_mask])
            total_power = np.trapz(psd, freqs)
            hr_power_ratio = float(hr_power / total_power * 100) if total_power > 0 else 0.0
            
            # 峰值规律性计算
            peak_regularity = self._calculate_peak_regularity(peaks)
            
            # 质量评分计算
            quality_score = 0.0
            
            # 信噪比评分 (25分)
            if snr > 20:
                quality_score += 25
            elif snr > 10:
                quality_score += 15
            elif snr > 5:
                quality_score += 5
            
            # 频率合理性评分 (25分)
            if 0.8 <= peak_freq <= 3.0:
                quality_score += 25
            elif 0.5 <= peak_freq <= 4.0:
                quality_score += 15
            
            # 心率功率比评分 (25分)
            if hr_power_ratio > 30:
                quality_score += 25
            elif hr_power_ratio > 15:
                quality_score += 15
            elif hr_power_ratio > 5:
                quality_score += 5
            
            # 峰值规律性评分 (25分)
            if peak_regularity > 0.8:
                quality_score += 25
            elif peak_regularity > 0.6:
                quality_score += 15
            elif peak_regularity > 0.4:
                quality_score += 10
            elif peak_regularity > 0.2:
                quality_score += 5
            
            # 根据评分设置质量等级
            quality_grade = self._grade_quality(quality_score)
            
            self._log_processing_step("quality_assessment", {
                "snr": snr,
                "peak_freq": peak_freq,
                "peak_hr": peak_hr,
                "hr_power_ratio": hr_power_ratio,
                "peak_regularity": peak_regularity,
                "quality_score": quality_score,
                "quality_grade": quality_grade
            })
            
            return QualityResults(
                snr=snr,
                peak_freq=peak_freq,
                peak_hr=peak_hr,
                hr_power_ratio=hr_power_ratio,
                quality_score=quality_score,
                quality_grade=quality_grade,
                peak_regularity=peak_regularity,
                artifacts_ratio=0.0,  # 可以根据需要计算
                quality_segments=[]  # 将在process方法中设置
            )
            
        except Exception as e:
            logger.error(f"质量评估失败: {e}")
            return None
    
    def _calculate_peak_regularity(self, peaks: np.ndarray) -> float:
        """计算峰值规律性
        
        参数:
            peaks: 峰值位置数组
            
        返回:
            float: 规律性评分 (0-1)
        """
        if len(peaks) < 3:
            return 0.0
        
        intervals = np.diff(peaks)
        if len(intervals) == 0:
            return 0.0
        
        # 计算变异系数
        mean_interval = np.mean(intervals)
        std_interval = np.std(intervals)
        cv = std_interval / (mean_interval + 1e-10)  # 变异系数
        
        # 转换为0-1的规律性评分，变异系数越小规律性越高
        regularity = max(0.0, 1.0 - cv)
        return regularity
    
    def _grade_quality(self, score: float) -> str:
        """根据评分给出质量等级
        
        参数:
            score: 质量评分 (0-100)
            
        返回:
            str: 质量等级
        """
        if score >= 80:
            return "excellent"
        elif score >= 60:
            return "good"
        elif score >= 40:
            return "fair"
        else:
            return "poor"