"""NeuroKit2 PPG处理Pipeline

封装NeuroKit2的PPG处理功能，提供标准化的Pipeline接口。
支持NeuroKit2的各种PPG处理算法和参数配置。

主要处理步骤：
1. 信号清洗（ppg_clean）
2. 峰值检测（ppg_peaks）
3. 心率计算（ppg_rate）
4. HRV分析（hrv）
5. 信号质量评估（ppg_quality）

作者: PPG算法包开发团队
版本: 2.0.0
"""

import numpy as np
import logging
from typing import Dict, Any, Optional, List, Tuple

try:
    from ..core.base_pipeline import BasePipeline
    from ..core.data_types import PPGSignal, PPGResults, HRVResults, QualityResults
    from ..config.pipeline_config import PipelineConfig
except ImportError:
    # 处理直接运行时的导入问题
    import sys
    from pathlib import Path
    sys.path.append(str(Path(__file__).parent.parent))
    from core.base_pipeline import BasePipeline
    from core.data_types import PPGSignal, PPGResults, HRVResults, QualityResults
    from config.pipeline_config import PipelineConfig

logger = logging.getLogger(__name__)

# 尝试导入NeuroKit2
try:
    import neurokit2 as nk
    NEUROKIT2_AVAILABLE = True
    logger.info("NeuroKit2已成功导入")
except ImportError:
    NEUROKIT2_AVAILABLE = False
    logger.warning("NeuroKit2未安装，NeuroKit2Pipeline将不可用")


class NeuroKit2Pipeline(BasePipeline):
    """NeuroKit2 PPG处理Pipeline
    
    封装NeuroKit2的PPG处理功能，提供标准化接口
    """
    
    def __init__(self, config: PipelineConfig):
        """初始化NeuroKit2 Pipeline
        
        参数:
            config: Pipeline配置
            
        异常:
            ImportError: 当NeuroKit2未安装时抛出
        """
        if not NEUROKIT2_AVAILABLE:
            raise ImportError("NeuroKit2未安装，请使用 'pip install neurokit2' 安装")
        
        super().__init__(config)
        logger.info(f"初始化NeuroKit2 Pipeline: {self.name}")
    
    def process(self, signal: PPGSignal) -> PPGResults:
        """处理PPG信号的主要入口
        
        参数:
            signal: 输入PPG信号
            
        返回:
            PPGResults: 完整的处理结果
        """
        logger.info(f"开始NeuroKit2处理PPG信号 - 长度: {signal.length}, 采样率: {signal.sampling_rate}Hz")
        
        # 更新采样率
        if signal.sampling_rate != self.sampling_rate:
            logger.warning(f"信号采样率({signal.sampling_rate})与配置不符({self.sampling_rate})，使用信号采样率")
            self.sampling_rate = signal.sampling_rate
        
        # 1. 预处理（信号清洗）
        preprocessing_results = self.preprocess(signal.data)
        processed_signal = preprocessing_results['cleaned']
        
        # 2. 峰值检测
        peaks, peak_properties = self.detect_peaks(processed_signal)
        
        # 3. 计算心率
        heart_rates = self.calculate_heart_rates(peaks)
        mean_heart_rate = float(np.mean(heart_rates)) if heart_rates is not None else 0.0
        
        # 4. HRV分析
        hrv_results = self.calculate_hrv(peaks)
        
        # 5. 信号质量评估
        quality_results = self.assess_quality(signal.data, processed_signal, peaks)
        
        # 构建结果
        results = PPGResults(
            original_signal=signal.data,
            processed_signal=processed_signal,
            peaks=peaks,
            heart_rates=heart_rates,
            mean_heart_rate=mean_heart_rate,
            hrv_results=hrv_results,
            quality_results=quality_results,
            processing_info=self.get_processing_info(),
            pipeline_name=self.name
        )
        
        logger.info(f"NeuroKit2处理完成 - 检测峰值: {len(peaks)}, 平均心率: {mean_heart_rate:.1f} BPM")
        return results
    
    def preprocess(self, signal_data: np.ndarray) -> Dict[str, np.ndarray]:
        """使用NeuroKit2进行预处理
        
        参数:
            signal_data: 原始信号数据
            
        返回:
            Dict: 包含预处理结果的字典
        """
        logger.info("开始NeuroKit2预处理")
        
        results = {'original': signal_data.copy()}
        
        try:
            # 使用NeuroKit2的ppg_clean函数
            method = self.config.preprocessing.get("neurokit2_method", "elgendi")
            
            # NeuroKit2清洗参数
            clean_params = {
                'sampling_rate': self.sampling_rate,
                'method': method
            }
            
            # 添加额外的清洗参数
            if "neurokit2_params" in self.config.preprocessing:
                clean_params.update(self.config.preprocessing["neurokit2_params"])
            
            cleaned_signal = nk.ppg_clean(signal_data, **clean_params)
            results['cleaned'] = cleaned_signal
            
            self._log_processing_step("neurokit2_cleaning", {
                "method": method,
                "sampling_rate": self.sampling_rate,
                "signal_length": len(signal_data)
            })
            
        except Exception as e:
            logger.error(f"NeuroKit2预处理失败: {e}")
            # 如果NeuroKit2处理失败，返回原始信号
            results['cleaned'] = signal_data.copy()
            self._log_processing_step("neurokit2_cleaning_failed", {
                "error": str(e),
                "fallback": "original_signal"
            })
        
        logger.info("NeuroKit2预处理完成")
        return results
    
    def detect_peaks(self, processed_signal: np.ndarray, 
                    quality_segments: Optional[List[Tuple[int, int]]] = None) -> Tuple[np.ndarray, Dict]:
        """使用NeuroKit2进行峰值检测
        
        参数:
            processed_signal: 预处理后的信号
            quality_segments: 高质量信号段（未使用，保持接口一致性）
            
        返回:
            tuple: (peaks, properties) 峰值位置和属性
        """
        logger.info("开始NeuroKit2峰值检测")
        
        try:
            # 使用NeuroKit2的ppg_peaks函数
            method = self.config.peak_detection.get("neurokit2_method", "elgendi")
            
            # NeuroKit2峰值检测参数
            peak_params = {
                'sampling_rate': self.sampling_rate,
                'method': method
            }
            
            # 添加额外的峰值检测参数
            if "neurokit2_params" in self.config.peak_detection:
                peak_params.update(self.config.peak_detection["neurokit2_params"])
            
            # 执行峰值检测
            _, peaks_dict = nk.ppg_peaks(processed_signal, **peak_params)
            
            # 提取峰值位置
            peaks = peaks_dict.get('PPG_Peaks', np.array([]))
            
            # 构建属性字典（与scipy.signal.find_peaks格式兼容）
            properties = {
                'peak_heights': processed_signal[peaks] if len(peaks) > 0 else np.array([]),
                'neurokit2_info': peaks_dict
            }
            
            self._log_processing_step("neurokit2_peak_detection", {
                "method": method,
                "peaks_count": len(peaks),
                "sampling_rate": self.sampling_rate
            })
            
        except Exception as e:
            logger.error(f"NeuroKit2峰值检测失败: {e}")
            # 如果NeuroKit2处理失败，使用简单的峰值检测
            from scipy.signal import find_peaks
            peaks, properties = find_peaks(
                processed_signal,
                height=np.percentile(processed_signal, 70),
                distance=int(self.sampling_rate * 0.4)  # 最小间隔0.4秒
            )
            
            self._log_processing_step("neurokit2_peak_detection_failed", {
                "error": str(e),
                "fallback": "scipy_find_peaks",
                "peaks_count": len(peaks)
            })
        
        logger.info(f"NeuroKit2峰值检测完成 - 检测到 {len(peaks)} 个峰值")
        return peaks, properties
    
    def calculate_hrv(self, peaks: np.ndarray) -> Optional[HRVResults]:
        """使用NeuroKit2计算HRV指标
        
        参数:
            peaks: 峰值位置数组
            
        返回:
            HRVResults: HRV分析结果，如果计算失败返回None
        """
        if len(peaks) < 3:
            logger.warning("峰值数量不足，无法计算HRV")
            return None
        
        try:
            # 计算RR间期（以毫秒为单位）
            rr_intervals = np.diff(peaks) / self.sampling_rate * 1000
            
            # 使用NeuroKit2计算HRV指标
            hrv_time = nk.hrv_time(rr_intervals, sampling_rate=1000)  # RR间期采样率为1000Hz
            hrv_freq = nk.hrv_frequency(rr_intervals, sampling_rate=1000)
            hrv_nonlinear = nk.hrv_nonlinear(rr_intervals, sampling_rate=1000)
            
            # 提取主要指标
            results = HRVResults(
                # 时域指标
                rmssd=float(hrv_time.get('HRV_RMSSD', [0])[0]),
                sdnn=float(hrv_time.get('HRV_SDNN', [0])[0]),
                pnn50=float(hrv_time.get('HRV_pNN50', [0])[0]),
                mean_rr=float(np.mean(rr_intervals)),
                std_rr=float(np.std(rr_intervals)),
                
                # 频域指标
                lf_power=float(hrv_freq.get('HRV_LF', [0])[0]),
                hf_power=float(hrv_freq.get('HRV_HF', [0])[0]),
                lf_hf_ratio=float(hrv_freq.get('HRV_LFHF', [0])[0]),
                total_power=float(hrv_freq.get('HRV_TP', [0])[0]),
                
                # 非线性指标
                sd1=float(hrv_nonlinear.get('HRV_SD1', [0])[0]),
                sd2=float(hrv_nonlinear.get('HRV_SD2', [0])[0]),
                
                # 原始数据
                rr_intervals=rr_intervals,
                
                # NeuroKit2完整结果
                neurokit2_results={
                    'time': hrv_time.to_dict() if hasattr(hrv_time, 'to_dict') else hrv_time,
                    'frequency': hrv_freq.to_dict() if hasattr(hrv_freq, 'to_dict') else hrv_freq,
                    'nonlinear': hrv_nonlinear.to_dict() if hasattr(hrv_nonlinear, 'to_dict') else hrv_nonlinear
                }
            )
            
            self._log_processing_step("neurokit2_hrv_analysis", {
                "rr_intervals_count": len(rr_intervals),
                "mean_rr": results.mean_rr,
                "rmssd": results.rmssd,
                "sdnn": results.sdnn
            })
            
            return results
            
        except Exception as e:
            logger.error(f"NeuroKit2 HRV计算失败: {e}")
            # 回退到本地基础HRV实现
            return self._calculate_hrv_basic(peaks)
    
    def assess_quality(self, original_signal: np.ndarray, 
                      processed_signal: np.ndarray, 
                      peaks: np.ndarray) -> Optional[QualityResults]:
        """使用NeuroKit2评估信号质量
        
        参数:
            original_signal: 原始信号
            processed_signal: 处理后信号
            peaks: 检测到的峰值
            
        返回:
            QualityResults: 质量评估结果
        """
        try:
            # 使用NeuroKit2的信号质量评估
            quality_score = nk.ppg_quality(processed_signal, sampling_rate=self.sampling_rate)
            
            # 计算其他质量指标
            snr = self._calculate_snr(processed_signal)
            peak_regularity = self._calculate_peak_regularity(peaks)
            
            # 综合质量评分
            overall_score = float(np.mean([quality_score, snr / 20, peak_regularity]))  # 归一化SNR
            
            results = QualityResults(
                quality_score=quality_score,
                snr=snr,
                peak_regularity=peak_regularity,
                quality_grade=self._grade_quality(overall_score),
                quality_segments=None,
                artifacts_ratio=0.0  # 默认值，可以根据需要计算
            )
            
            self._log_processing_step("neurokit2_quality_assessment", {
                "neurokit2_score": float(quality_score),
                "overall_score": overall_score,
                "quality_grade": results.quality_grade
            })
            
            return results
            
        except Exception as e:
            logger.error(f"NeuroKit2质量评估失败: {e}")
            # 回退到本地基础质量评估实现
            return self._assess_quality_basic(original_signal, processed_signal, peaks)

    def calculate_heart_rates(self, peaks: np.ndarray) -> Optional[np.ndarray]:
        """计算瞬时心率（NeuroKit2管道自有实现，通用逻辑）"""
        if len(peaks) < 2:
            logger.warning("峰值数量不足，无法计算心率")
            return None

        rr_intervals = np.diff(peaks) / self.sampling_rate * 1000
        heart_rates = 60000 / rr_intervals

        hr_range = self.config.peak_detection.get("heart_rate_range", [40, 150])
        valid_mask = (heart_rates >= hr_range[0]) & (heart_rates <= hr_range[1])

        if np.sum(valid_mask) == 0:
            fallback_range = self.config.peak_detection.get("fallback_range", [30, 200])
            valid_mask = (heart_rates >= fallback_range[0]) & (heart_rates <= fallback_range[1])
            if np.sum(valid_mask) == 0:
                logger.warning("没有有效的心率数据")
                return None

        return heart_rates[valid_mask]

    def _calculate_hrv_basic(self, peaks: np.ndarray) -> Optional[HRVResults]:
        """本地基础HRV计算（与自定义管道一致，用于NK失败回退）"""
        if not self.config.hrv_analysis.get("enabled", True):
            return None
        if len(peaks) < 3:
            logger.warning("峰值数量不足，无法计算HRV")
            return None
        rr_intervals = np.diff(peaks) / self.sampling_rate * 1000
        if self.config.hrv_analysis["outlier_removal"]["enabled"]:
            threshold = self.config.hrv_analysis["outlier_removal"]["threshold"]
            mean_rr = np.mean(rr_intervals)
            std_rr = np.std(rr_intervals)
            if std_rr > 0:
                valid_mask = np.abs(rr_intervals - mean_rr) <= threshold * std_rr
                rr_intervals = rr_intervals[valid_mask]
        if len(rr_intervals) < 2:
            logger.warning("清理后RR间隔数据不足")
            return None
        mean_nn = float(np.mean(rr_intervals))
        sdnn = float(np.std(rr_intervals, ddof=1))
        rmssd = float(np.sqrt(np.mean(np.diff(rr_intervals) ** 2)))
        diff_rr = np.diff(rr_intervals)
        nn50 = np.sum(np.abs(diff_rr) > 50)
        nn20 = np.sum(np.abs(diff_rr) > 20)
        pnn50 = float(nn50 / len(rr_intervals) * 100)
        pnn20 = float(nn20 / len(rr_intervals) * 100)
        hist, _ = np.histogram(rr_intervals, bins=20)
        triangular_index = float(len(rr_intervals) / np.max(hist)) if np.max(hist) > 0 else 0.0
        vlf_power = lf_power = hf_power = total_power = 0.0
        lf_nu = hf_nu = lf_hf_ratio = 0.0
        if self.config.hrv_analysis["frequency_analysis"]["enabled"]:
            try:
                from scipy.signal import welch
                fa_cfg = self.config.hrv_analysis["frequency_analysis"]
                resampling_rate = fa_cfg.get("resampling_rate", 4.0)
                t_rr = np.cumsum(rr_intervals) / 1000
                t_resampled = np.arange(t_rr[0], t_rr[-1], 1 / resampling_rate)
                rr_resampled = np.interp(t_resampled, t_rr, rr_intervals)
                if fa_cfg.get("detrend_before_psd", True):
                    rr_detrended = rr_resampled - np.polyval(np.polyfit(t_resampled, rr_resampled, 1), t_resampled)
                    series_for_psd = rr_detrended
                else:
                    series_for_psd = rr_resampled
                configured_nperseg = fa_cfg.get("welch_nperseg", None)
                default_nperseg = max(8, len(series_for_psd) // 4)
                nperseg = int(configured_nperseg) if configured_nperseg else default_nperseg
                overlap_ratio = float(fa_cfg.get("welch_overlap", 0.5))
                noverlap = int(max(0, min(overlap_ratio, 0.99)) * nperseg)
                freqs, psd = welch(series_for_psd, fs=resampling_rate, nperseg=nperseg, noverlap=noverlap)
                vlf_band = self.config.hrv_analysis["frequency_analysis"]["vlf_band"]
                lf_band = self.config.hrv_analysis["frequency_analysis"]["lf_band"]
                hf_band = self.config.hrv_analysis["frequency_analysis"]["hf_band"]
                vlf_mask = (freqs >= vlf_band[0]) & (freqs < vlf_band[1])
                lf_mask = (freqs >= lf_band[0]) & (freqs < lf_band[1])
                hf_mask = (freqs >= hf_band[0]) & (freqs < hf_band[1])
                vlf_power = float(np.trapz(psd[vlf_mask], freqs[vlf_mask]))
                lf_power = float(np.trapz(psd[lf_mask], freqs[lf_mask]))
                hf_power = float(np.trapz(psd[hf_mask], freqs[hf_mask]))
                total_power = vlf_power + lf_power + hf_power
                total_hf_lf_power = lf_power + hf_power
                if total_hf_lf_power > 0:
                    lf_nu = float(lf_power / total_hf_lf_power * 100)
                    hf_nu = float(hf_power / total_hf_lf_power * 100)
                lf_hf_ratio = float(lf_power / hf_power) if hf_power > 0 else 0.0
            except Exception as e:
                logger.warning(f"频域分析失败: {e}")
        return HRVResults(
            mean_nn=mean_nn,
            sdnn=sdnn,
            rmssd=rmssd,
            pnn50=pnn50,
            pnn20=pnn20,
            triangular_index=triangular_index,
            vlf_power=vlf_power,
            lf_power=lf_power,
            hf_power=hf_power,
            total_power=total_power,
            lf_nu=lf_nu,
            hf_nu=hf_nu,
            lf_hf_ratio=lf_hf_ratio,
            rr_intervals=rr_intervals
        )

    def _assess_quality_basic(self, original_signal: np.ndarray, processed_signal: np.ndarray, 
                               peaks: np.ndarray) -> Optional[QualityResults]:
        """本地基础质量评估（与基类逻辑一致，用于回退）"""
        if not self.config.quality_assessment.get("enabled", True):
            return None
        try:
            from scipy.signal import welch
            signal_power = np.var(processed_signal)
            noise_power = np.var(original_signal - processed_signal)
            snr = float(10 * np.log10(signal_power / noise_power)) if noise_power > 0 else float('inf')
            freqs, psd = welch(processed_signal, fs=self.sampling_rate, 
                               nperseg=min(256, len(processed_signal)//4))
            peak_freq_idx = np.argmax(psd[1:]) + 1
            peak_freq = float(freqs[peak_freq_idx])
            peak_hr = float(peak_freq * 60)
            hr_range = self.config.quality_assessment["frequency_analysis"]["hr_frequency_range"]
            hr_mask = (freqs >= hr_range[0]) & (freqs <= hr_range[1])
            hr_power = np.trapz(psd[hr_mask], freqs[hr_mask])
            total_power = np.trapz(psd, freqs)
            hr_power_ratio = float(hr_power / total_power * 100) if total_power > 0 else 0.0
            quality_score = 0.0
            if snr > 20:
                quality_score += 25
            elif snr > 10:
                quality_score += 15
            elif snr > 5:
                quality_score += 5
            if 0.8 <= peak_freq <= 3.0:
                quality_score += 25
            elif 0.5 <= peak_freq <= 4.0:
                quality_score += 15
            if hr_power_ratio > 30:
                quality_score += 25
            elif hr_power_ratio > 15:
                quality_score += 15
            elif hr_power_ratio > 5:
                quality_score += 5
            expected_peaks = len(processed_signal) / self.sampling_rate * 60 / 60
            if len(peaks) > expected_peaks * 0.8:
                quality_score += 25
            elif len(peaks) > expected_peaks * 0.5:
                quality_score += 15
            elif len(peaks) > expected_peaks * 0.3:
                quality_score += 5
            return QualityResults(
                snr=snr,
                peak_freq=peak_freq,
                peak_hr=peak_hr,
                hr_power_ratio=hr_power_ratio,
                quality_score=quality_score,
                quality_segments=[]
            )
        except Exception as e:
            logger.error(f"质量评估失败: {e}")
            return None
    
    def _calculate_snr(self, signal: np.ndarray) -> float:
        """计算信噪比"""
        signal_power = np.mean(signal ** 2)
        noise_power = np.var(np.diff(signal))  # 使用差分估计噪声
        return 10 * np.log10(signal_power / (noise_power + 1e-10))
    
    def _calculate_peak_regularity(self, peaks: np.ndarray) -> float:
        """计算峰值规律性"""
        if len(peaks) < 3:
            return 0.0
        
        intervals = np.diff(peaks)
        cv = np.std(intervals) / (np.mean(intervals) + 1e-10)  # 变异系数
        return max(0.0, 1.0 - cv)  # 转换为0-1的规律性评分
    
    def _grade_quality(self, score: float) -> str:
        """根据评分给出质量等级"""
        if score >= 0.8:
            return "excellent"
        elif score >= 0.6:
            return "good"
        elif score >= 0.4:
            return "fair"
        else:
            return "poor"