"""EDA处理器自定义算法示例

演示如何创建和注册自定义EDA处理算法，包括：
- 自定义预处理算法
- 自定义峰值检测算法
- 自定义指标计算算法
- 自定义质量评估算法
- 算法注册和使用

适合需要特定处理需求或研究新算法的用户。
"""

import numpy as np
import pandas as pd
from typing import Dict, Any, List, Tuple, Optional, Callable
from scipy import signal, stats
from scipy.optimize import curve_fit
import logging

# 导入EDA处理器模块
from ..core.registry import register_algorithm, get_algorithm, list_available_algorithms
from ..core.pipeline import process_eda_pipeline
from ..config.metrics_definition import EDA_METRICS_DEFINITION, get_metric_info

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# ============================================================================
# 自定义预处理算法
# ============================================================================

def preprocess_adaptive_filter(eda_data: np.ndarray,
                             sampling_rate: float,
                             **kwargs) -> Dict[str, Any]:
    """自适应滤波预处理算法
    
    基于信号特征自动调整滤波参数的预处理算法
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        **kwargs: 其他参数
            - adaptation_window: 自适应窗口大小 (默认: 30秒)
            - base_cutoff: 基础截止频率 (默认: 1.0 Hz)
            - noise_threshold: 噪声阈值 (默认: 0.05)
    
    Returns:
        预处理结果字典
    """
    try:
        # 参数设置
        adaptation_window = kwargs.get('adaptation_window', 30)  # 秒
        base_cutoff = kwargs.get('base_cutoff', 1.0)  # Hz
        noise_threshold = kwargs.get('noise_threshold', 0.05)
        
        window_samples = int(adaptation_window * sampling_rate)
        processed_signal = eda_data.copy()
        
        # 分段自适应处理
        num_windows = len(eda_data) // window_samples + 1
        
        for i in range(num_windows):
            start_idx = i * window_samples
            end_idx = min((i + 1) * window_samples, len(eda_data))
            
            if end_idx - start_idx < window_samples // 2:
                continue  # 跳过太短的窗口
            
            window_data = eda_data[start_idx:end_idx]
            
            # 计算窗口内的噪声水平
            noise_level = estimate_noise_level(window_data)
            
            # 根据噪声水平调整滤波参数
            if noise_level > noise_threshold:
                # 高噪声：更强的滤波
                cutoff_freq = base_cutoff * 0.5
                filter_order = 6
            elif noise_level < noise_threshold * 0.3:
                # 低噪声：较弱的滤波以保持细节
                cutoff_freq = base_cutoff * 1.5
                filter_order = 2
            else:
                # 中等噪声：标准滤波
                cutoff_freq = base_cutoff
                filter_order = 4
            
            # 应用自适应滤波
            if cutoff_freq < sampling_rate / 2:
                sos = signal.butter(filter_order, cutoff_freq, 
                                  btype='low', fs=sampling_rate, output='sos')
                
                # 处理边界效应
                if len(window_data) > 3 * filter_order:
                    filtered_window = signal.sosfiltfilt(sos, window_data)
                    processed_signal[start_idx:end_idx] = filtered_window
        
        # 去除趋势
        processed_signal = detrend_adaptive(processed_signal, sampling_rate)
        
        # 异常值检测和处理
        processed_signal = handle_outliers_adaptive(processed_signal)
        
        return {
            'success': True,
            'processed_signal': processed_signal,
            'parameters_used': {
                'adaptation_window': adaptation_window,
                'base_cutoff': base_cutoff,
                'noise_threshold': noise_threshold,
                'num_windows': num_windows
            },
            'quality_metrics': {
                'noise_reduction': calculate_noise_reduction(eda_data, processed_signal),
                'signal_preservation': calculate_signal_preservation(eda_data, processed_signal)
            }
        }
        
    except Exception as e:
        logger.error(f"自适应滤波预处理失败: {str(e)}")
        return {
            'success': False,
            'processed_signal': eda_data,
            'error': str(e)
        }

def preprocess_wavelet_denoising(eda_data: np.ndarray,
                               sampling_rate: float,
                               **kwargs) -> Dict[str, Any]:
    """小波去噪预处理算法
    
    使用小波变换进行信号去噪的预处理算法
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        **kwargs: 其他参数
            - wavelet: 小波类型 (默认: 'db4')
            - levels: 分解层数 (默认: 6)
            - threshold_mode: 阈值模式 (默认: 'soft')
    
    Returns:
        预处理结果字典
    """
    try:
        import pywt
        
        # 参数设置
        wavelet = kwargs.get('wavelet', 'db4')
        levels = kwargs.get('levels', 6)
        threshold_mode = kwargs.get('threshold_mode', 'soft')
        
        # 小波分解
        coeffs = pywt.wavedec(eda_data, wavelet, level=levels)
        
        # 计算阈值
        sigma = np.median(np.abs(coeffs[-1])) / 0.6745
        threshold = sigma * np.sqrt(2 * np.log(len(eda_data)))
        
        # 应用阈值
        coeffs_thresh = list(coeffs)
        coeffs_thresh[1:] = [pywt.threshold(detail, threshold, mode=threshold_mode) 
                           for detail in coeffs_thresh[1:]]
        
        # 小波重构
        processed_signal = pywt.waverec(coeffs_thresh, wavelet)
        
        # 确保长度一致
        if len(processed_signal) != len(eda_data):
            processed_signal = processed_signal[:len(eda_data)]
        
        return {
            'success': True,
            'processed_signal': processed_signal,
            'parameters_used': {
                'wavelet': wavelet,
                'levels': levels,
                'threshold_mode': threshold_mode,
                'threshold': threshold
            },
            'quality_metrics': {
                'snr_improvement': calculate_snr_improvement(eda_data, processed_signal),
                'mse': np.mean((eda_data - processed_signal) ** 2)
            }
        }
        
    except ImportError:
        logger.warning("PyWavelets未安装，使用备用滤波方法")
        return preprocess_adaptive_filter(eda_data, sampling_rate, **kwargs)
    except Exception as e:
        logger.error(f"小波去噪预处理失败: {str(e)}")
        return {
            'success': False,
            'processed_signal': eda_data,
            'error': str(e)
        }

# ============================================================================
# 自定义峰值检测算法
# ============================================================================

def detect_peaks_template_matching(eda_data: np.ndarray,
                                 sampling_rate: float,
                                 **kwargs) -> Dict[str, Any]:
    """模板匹配峰值检测算法
    
    使用SCR模板匹配的方法检测峰值
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        **kwargs: 其他参数
            - template_duration: 模板持续时间 (默认: 10秒)
            - correlation_threshold: 相关性阈值 (默认: 0.6)
            - min_peak_distance: 最小峰值间距 (默认: 1秒)
    
    Returns:
        峰值检测结果字典
    """
    try:
        # 参数设置
        template_duration = kwargs.get('template_duration', 10)  # 秒
        correlation_threshold = kwargs.get('correlation_threshold', 0.6)
        min_peak_distance = kwargs.get('min_peak_distance', 1)  # 秒
        
        # 创建SCR模板
        template = create_scr_template(template_duration, sampling_rate)
        
        # 模板匹配
        correlation = np.correlate(eda_data, template, mode='valid')
        correlation = correlation / (np.linalg.norm(template) * 
                                   np.array([np.linalg.norm(eda_data[i:i+len(template)]) 
                                           for i in range(len(correlation))]))
        
        # 查找峰值
        min_distance_samples = int(min_peak_distance * sampling_rate)
        peak_indices, properties = signal.find_peaks(
            correlation,
            height=correlation_threshold,
            distance=min_distance_samples
        )
        
        # 调整峰值位置（考虑模板偏移）
        template_offset = len(template) // 2
        peak_indices = peak_indices + template_offset
        
        # 过滤超出范围的峰值
        peak_indices = peak_indices[peak_indices < len(eda_data)]
        
        # 计算峰值特征
        peaks_info = []
        for i, peak_idx in enumerate(peak_indices):
            peak_info = analyze_peak_characteristics(
                eda_data, peak_idx, sampling_rate
            )
            peak_info['correlation_score'] = properties['peak_heights'][i] if i < len(properties['peak_heights']) else correlation[peak_idx - template_offset]
            peaks_info.append(peak_info)
        
        return {
            'success': True,
            'peaks': peaks_info,
            'peak_indices': peak_indices.tolist(),
            'parameters_used': {
                'template_duration': template_duration,
                'correlation_threshold': correlation_threshold,
                'min_peak_distance': min_peak_distance,
                'template_length': len(template)
            },
            'detection_metrics': {
                'num_peaks': len(peak_indices),
                'peak_rate': len(peak_indices) / (len(eda_data) / sampling_rate) * 60,  # 每分钟
                'mean_correlation': np.mean([p['correlation_score'] for p in peaks_info]) if peaks_info else 0
            }
        }
        
    except Exception as e:
        logger.error(f"模板匹配峰值检测失败: {str(e)}")
        return {
            'success': False,
            'peaks': [],
            'peak_indices': [],
            'error': str(e)
        }

def detect_peaks_machine_learning(eda_data: np.ndarray,
                                sampling_rate: float,
                                **kwargs) -> Dict[str, Any]:
    """机器学习峰值检测算法
    
    使用机器学习方法检测SCR峰值
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        **kwargs: 其他参数
            - feature_window: 特征窗口大小 (默认: 5秒)
            - confidence_threshold: 置信度阈值 (默认: 0.7)
    
    Returns:
        峰值检测结果字典
    """
    try:
        # 参数设置
        feature_window = kwargs.get('feature_window', 5)  # 秒
        confidence_threshold = kwargs.get('confidence_threshold', 0.7)
        
        window_samples = int(feature_window * sampling_rate)
        
        # 提取特征
        features = extract_ml_features(eda_data, sampling_rate, window_samples)
        
        # 使用简单的基于规则的分类器（实际应用中可以使用训练好的模型）
        peak_probabilities = classify_peaks_rule_based(features)
        
        # 查找高置信度的峰值
        candidate_indices = np.where(peak_probabilities > confidence_threshold)[0]
        
        # 转换为信号中的实际位置
        peak_indices = candidate_indices * window_samples + window_samples // 2
        peak_indices = peak_indices[peak_indices < len(eda_data)]
        
        # 非最大值抑制
        peak_indices = non_maximum_suppression(
            peak_indices, peak_probabilities[candidate_indices], 
            min_distance=int(1.0 * sampling_rate)
        )
        
        # 计算峰值特征
        peaks_info = []
        for peak_idx in peak_indices:
            peak_info = analyze_peak_characteristics(
                eda_data, peak_idx, sampling_rate
            )
            # 添加ML特定的特征
            feature_idx = peak_idx // window_samples
            if feature_idx < len(peak_probabilities):
                peak_info['ml_confidence'] = peak_probabilities[feature_idx]
            peaks_info.append(peak_info)
        
        return {
            'success': True,
            'peaks': peaks_info,
            'peak_indices': peak_indices.tolist(),
            'parameters_used': {
                'feature_window': feature_window,
                'confidence_threshold': confidence_threshold,
                'num_features': len(features[0]) if features else 0
            },
            'detection_metrics': {
                'num_peaks': len(peak_indices),
                'peak_rate': len(peak_indices) / (len(eda_data) / sampling_rate) * 60,
                'mean_confidence': np.mean([p['ml_confidence'] for p in peaks_info if 'ml_confidence' in p])
            }
        }
        
    except Exception as e:
        logger.error(f"机器学习峰值检测失败: {str(e)}")
        return {
            'success': False,
            'peaks': [],
            'peak_indices': [],
            'error': str(e)
        }

# ============================================================================
# 自定义指标计算算法
# ============================================================================

def calculate_metrics_advanced_frequency(eda_data: np.ndarray,
                                       sampling_rate: float,
                                       peaks: List[Dict],
                                       **kwargs) -> Dict[str, Any]:
    """高级频域指标计算算法
    
    计算详细的频域分析指标
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        peaks: 检测到的峰值列表
        **kwargs: 其他参数
    
    Returns:
        指标计算结果字典
    """
    try:
        metrics = {}
        
        # 基础频域分析
        freqs, psd = signal.welch(eda_data, fs=sampling_rate, nperseg=min(len(eda_data)//4, 256))
        
        # 频带定义
        frequency_bands = {
            'very_low': (0.0, 0.045),      # 很低频
            'low': (0.045, 0.15),          # 低频
            'mid': (0.15, 0.25),           # 中频
            'high': (0.25, 0.4),           # 高频
            'very_high': (0.4, 2.0)        # 很高频
        }
        
        # 计算各频带功率
        total_power = np.trapz(psd, freqs)
        
        for band_name, (low_freq, high_freq) in frequency_bands.items():
            band_mask = (freqs >= low_freq) & (freqs <= high_freq)
            band_power = np.trapz(psd[band_mask], freqs[band_mask])
            
            metrics[f'{band_name}_freq_power'] = band_power
            metrics[f'{band_name}_freq_power_ratio'] = band_power / total_power if total_power > 0 else 0
        
        # 主频率成分
        peak_freq_idx = np.argmax(psd)
        metrics['dominant_frequency'] = freqs[peak_freq_idx]
        metrics['dominant_power'] = psd[peak_freq_idx]
        
        # 频谱质心
        metrics['spectral_centroid'] = np.sum(freqs * psd) / np.sum(psd) if np.sum(psd) > 0 else 0
        
        # 频谱带宽
        centroid = metrics['spectral_centroid']
        metrics['spectral_bandwidth'] = np.sqrt(np.sum(((freqs - centroid) ** 2) * psd) / np.sum(psd)) if np.sum(psd) > 0 else 0
        
        # 频谱滚降
        cumulative_power = np.cumsum(psd)
        rolloff_threshold = 0.85 * cumulative_power[-1]
        rolloff_idx = np.where(cumulative_power >= rolloff_threshold)[0]
        metrics['spectral_rolloff'] = freqs[rolloff_idx[0]] if len(rolloff_idx) > 0 else freqs[-1]
        
        # 频谱平坦度
        geometric_mean = stats.gmean(psd[psd > 0]) if np.any(psd > 0) else 0
        arithmetic_mean = np.mean(psd)
        metrics['spectral_flatness'] = geometric_mean / arithmetic_mean if arithmetic_mean > 0 else 0
        
        # 频谱熵
        normalized_psd = psd / np.sum(psd) if np.sum(psd) > 0 else psd
        metrics['spectral_entropy'] = -np.sum(normalized_psd * np.log2(normalized_psd + 1e-12))
        
        return {
            'success': True,
            'metrics': metrics,
            'frequency_analysis': {
                'frequencies': freqs.tolist(),
                'power_spectral_density': psd.tolist(),
                'frequency_bands': frequency_bands
            }
        }
        
    except Exception as e:
        logger.error(f"高级频域指标计算失败: {str(e)}")
        return {
            'success': False,
            'metrics': {},
            'error': str(e)
        }

def calculate_metrics_nonlinear_dynamics(eda_data: np.ndarray,
                                       sampling_rate: float,
                                       peaks: List[Dict],
                                       **kwargs) -> Dict[str, Any]:
    """非线性动力学指标计算算法
    
    计算EDA信号的非线性动力学特征
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        peaks: 检测到的峰值列表
        **kwargs: 其他参数
    
    Returns:
        指标计算结果字典
    """
    try:
        metrics = {}
        
        # 样本熵
        metrics['sample_entropy'] = calculate_sample_entropy(eda_data)
        
        # 近似熵
        metrics['approximate_entropy'] = calculate_approximate_entropy(eda_data)
        
        # 去趋势波动分析 (DFA)
        metrics['dfa_alpha'] = calculate_dfa(eda_data)
        
        # Hurst指数
        metrics['hurst_exponent'] = calculate_hurst_exponent(eda_data)
        
        # 关联维数
        metrics['correlation_dimension'] = calculate_correlation_dimension(eda_data)
        
        # Lyapunov指数
        metrics['lyapunov_exponent'] = calculate_lyapunov_exponent(eda_data, sampling_rate)
        
        # 递归量化分析
        rqa_metrics = calculate_rqa_metrics(eda_data)
        metrics.update(rqa_metrics)
        
        # 多尺度熵
        mse_values = calculate_multiscale_entropy(eda_data)
        metrics['multiscale_entropy_mean'] = np.mean(mse_values)
        metrics['multiscale_entropy_std'] = np.std(mse_values)
        
        return {
            'success': True,
            'metrics': metrics,
            'nonlinear_analysis': {
                'multiscale_entropy_values': mse_values.tolist(),
                'complexity_measures': {
                    'entropy_based': ['sample_entropy', 'approximate_entropy'],
                    'fractal_based': ['dfa_alpha', 'hurst_exponent'],
                    'chaos_based': ['lyapunov_exponent', 'correlation_dimension']
                }
            }
        }
        
    except Exception as e:
        logger.error(f"非线性动力学指标计算失败: {str(e)}")
        return {
            'success': False,
            'metrics': {},
            'error': str(e)
        }

# ============================================================================
# 自定义质量评估算法
# ============================================================================

def assess_quality_statistical_tests(eda_data: np.ndarray,
                                    sampling_rate: float,
                                    peaks: List[Dict],
                                    **kwargs) -> Dict[str, Any]:
    """基于统计检验的质量评估算法
    
    使用多种统计检验方法评估EDA信号质量
    
    Args:
        eda_data: EDA信号数据
        sampling_rate: 采样率
        peaks: 检测到的峰值列表
        **kwargs: 其他参数
    
    Returns:
        质量评估结果字典
    """
    try:
        quality_scores = {}
        
        # 1. 正态性检验
        _, normality_p = stats.shapiro(eda_data[:5000] if len(eda_data) > 5000 else eda_data)
        quality_scores['normality_score'] = min(1.0, normality_p * 10)  # 转换为0-1分数
        
        # 2. 平稳性检验 (ADF检验)
        stationarity_score = perform_adf_test(eda_data)
        quality_scores['stationarity_score'] = stationarity_score
        
        # 3. 自相关分析
        autocorr_score = analyze_autocorrelation(eda_data)
        quality_scores['autocorrelation_score'] = autocorr_score
        
        # 4. 异常值检验
        outlier_score = detect_statistical_outliers(eda_data)
        quality_scores['outlier_score'] = outlier_score
        
        # 5. 信噪比评估
        snr_score = estimate_signal_to_noise_ratio(eda_data)
        quality_scores['snr_score'] = snr_score
        
        # 6. 峰值质量评估
        if peaks:
            peak_quality_score = assess_peaks_statistical_quality(peaks, eda_data, sampling_rate)
            quality_scores['peak_quality_score'] = peak_quality_score
        else:
            quality_scores['peak_quality_score'] = 0.0
        
        # 7. 频域质量评估
        frequency_quality_score = assess_frequency_domain_quality(eda_data, sampling_rate)
        quality_scores['frequency_quality_score'] = frequency_quality_score
        
        # 计算综合质量分数
        weights = {
            'normality_score': 0.1,
            'stationarity_score': 0.15,
            'autocorrelation_score': 0.15,
            'outlier_score': 0.2,
            'snr_score': 0.2,
            'peak_quality_score': 0.15,
            'frequency_quality_score': 0.05
        }
        
        overall_score = sum(score * weights[metric] for metric, score in quality_scores.items())
        
        # 质量等级
        if overall_score >= 0.8:
            quality_level = 'excellent'
        elif overall_score >= 0.6:
            quality_level = 'good'
        elif overall_score >= 0.4:
            quality_level = 'fair'
        else:
            quality_level = 'poor'
        
        return {
            'success': True,
            'overall_score': overall_score,
            'quality_level': quality_level,
            'individual_scores': quality_scores,
            'recommendations': generate_quality_recommendations(quality_scores),
            'statistical_tests': {
                'normality_p_value': normality_p,
                'num_outliers': count_outliers(eda_data),
                'signal_length': len(eda_data),
                'sampling_rate': sampling_rate
            }
        }
        
    except Exception as e:
        logger.error(f"统计质量评估失败: {str(e)}")
        return {
            'success': False,
            'overall_score': 0.0,
            'quality_level': 'unknown',
            'error': str(e)
        }

# ============================================================================
# 辅助函数
# ============================================================================

def estimate_noise_level(signal: np.ndarray) -> float:
    """估计信号噪声水平"""
    # 使用高频成分估计噪声
    diff_signal = np.diff(signal)
    noise_level = np.std(diff_signal) / np.sqrt(2)
    return noise_level

def detrend_adaptive(signal: np.ndarray, sampling_rate: float) -> np.ndarray:
    """自适应去趋势"""
    # 使用多项式去趋势
    t = np.arange(len(signal))
    
    # 根据信号长度选择多项式阶数
    if len(signal) > 1000:
        poly_order = 3
    elif len(signal) > 300:
        poly_order = 2
    else:
        poly_order = 1
    
    coeffs = np.polyfit(t, signal, poly_order)
    trend = np.polyval(coeffs, t)
    
    return signal - trend

def handle_outliers_adaptive(signal: np.ndarray, threshold: float = 3.0) -> np.ndarray:
    """自适应异常值处理"""
    # 使用修正的Z分数检测异常值
    median = np.median(signal)
    mad = np.median(np.abs(signal - median))
    modified_z_scores = 0.6745 * (signal - median) / mad
    
    # 标记异常值
    outlier_mask = np.abs(modified_z_scores) > threshold
    
    # 使用线性插值替换异常值
    if np.any(outlier_mask):
        signal_clean = signal.copy()
        outlier_indices = np.where(outlier_mask)[0]
        
        for idx in outlier_indices:
            # 找到最近的非异常值点进行插值
            left_idx = idx - 1
            right_idx = idx + 1
            
            while left_idx >= 0 and outlier_mask[left_idx]:
                left_idx -= 1
            while right_idx < len(signal) and outlier_mask[right_idx]:
                right_idx += 1
            
            if left_idx >= 0 and right_idx < len(signal):
                # 线性插值
                signal_clean[idx] = signal[left_idx] + (signal[right_idx] - signal[left_idx]) * (idx - left_idx) / (right_idx - left_idx)
            elif left_idx >= 0:
                signal_clean[idx] = signal[left_idx]
            elif right_idx < len(signal):
                signal_clean[idx] = signal[right_idx]
        
        return signal_clean
    
    return signal

def calculate_noise_reduction(original: np.ndarray, processed: np.ndarray) -> float:
    """计算噪声减少程度"""
    original_noise = estimate_noise_level(original)
    processed_noise = estimate_noise_level(processed)
    
    if original_noise > 0:
        return (original_noise - processed_noise) / original_noise
    return 0.0

def calculate_signal_preservation(original: np.ndarray, processed: np.ndarray) -> float:
    """计算信号保持程度"""
    correlation = np.corrcoef(original, processed)[0, 1]
    return correlation if not np.isnan(correlation) else 0.0

def calculate_snr_improvement(original: np.ndarray, processed: np.ndarray) -> float:
    """计算信噪比改善"""
    def estimate_snr(signal):
        # 简单的SNR估计：信号功率 / 噪声功率
        signal_power = np.var(signal)
        noise_power = np.var(np.diff(signal)) / 2  # 高频成分作为噪声
        return signal_power / noise_power if noise_power > 0 else 0
    
    original_snr = estimate_snr(original)
    processed_snr = estimate_snr(processed)
    
    if original_snr > 0:
        return (processed_snr - original_snr) / original_snr
    return 0.0

def create_scr_template(duration: float, sampling_rate: float) -> np.ndarray:
    """创建SCR模板"""
    t = np.arange(0, duration, 1/sampling_rate)
    
    # SCR的典型形状：快速上升，慢速恢复
    rise_time = 1.5  # 秒
    recovery_time = duration - rise_time
    
    template = np.zeros_like(t)
    
    # 上升阶段
    rise_mask = t <= rise_time
    template[rise_mask] = 1 - np.exp(-3 * t[rise_mask] / rise_time)
    
    # 恢复阶段
    recovery_mask = t > rise_time
    recovery_t = t[recovery_mask] - rise_time
    template[recovery_mask] = np.exp(-2 * recovery_t / recovery_time)
    
    return template

def analyze_peak_characteristics(eda_data: np.ndarray, peak_idx: int, 
                               sampling_rate: float) -> Dict[str, Any]:
    """分析峰值特征"""
    # 定义分析窗口
    window_before = int(2 * sampling_rate)  # 峰值前2秒
    window_after = int(8 * sampling_rate)   # 峰值后8秒
    
    start_idx = max(0, peak_idx - window_before)
    end_idx = min(len(eda_data), peak_idx + window_after)
    
    peak_window = eda_data[start_idx:end_idx]
    peak_position = peak_idx - start_idx
    
    # 基本特征
    peak_amplitude = eda_data[peak_idx]
    
    # 寻找峰值起始点和结束点
    onset_idx = find_peak_onset(peak_window, peak_position)
    offset_idx = find_peak_offset(peak_window, peak_position)
    
    # 计算特征
    rise_time = (peak_position - onset_idx) / sampling_rate if onset_idx < peak_position else 0
    recovery_time = (offset_idx - peak_position) / sampling_rate if offset_idx > peak_position else 0
    
    # 峰值面积
    if onset_idx < offset_idx:
        baseline = np.mean([peak_window[onset_idx], peak_window[offset_idx]])
        peak_area = np.trapz(peak_window[onset_idx:offset_idx+1] - baseline) / sampling_rate
    else:
        peak_area = 0
    
    return {
        'peak_index': peak_idx,
        'amplitude': peak_amplitude,
        'rise_time': rise_time,
        'recovery_time': recovery_time,
        'peak_area': peak_area,
        'onset_index': start_idx + onset_idx,
        'offset_index': start_idx + offset_idx
    }

def find_peak_onset(signal: np.ndarray, peak_position: int) -> int:
    """查找峰值起始点"""
    # 从峰值向前搜索，找到信号开始上升的点
    if peak_position <= 0:
        return 0
    
    # 计算一阶导数
    gradient = np.gradient(signal)
    
    # 从峰值向前搜索
    for i in range(peak_position, 0, -1):
        if gradient[i] <= 0:  # 找到梯度变为非正的点
            return i
    
    return 0

def find_peak_offset(signal: np.ndarray, peak_position: int) -> int:
    """查找峰值结束点"""
    # 从峰值向后搜索，找到信号恢复到基线的点
    if peak_position >= len(signal) - 1:
        return len(signal) - 1
    
    # 计算一阶导数
    gradient = np.gradient(signal)
    
    # 从峰值向后搜索
    for i in range(peak_position, len(signal)):
        if abs(gradient[i]) < 0.01:  # 梯度接近0，认为恢复到基线
            return i
    
    return len(signal) - 1

def extract_ml_features(eda_data: np.ndarray, sampling_rate: float, 
                       window_size: int) -> List[List[float]]:
    """提取机器学习特征"""
    features = []
    
    for i in range(0, len(eda_data) - window_size, window_size // 2):
        window = eda_data[i:i + window_size]
        
        # 统计特征
        feature_vector = [
            np.mean(window),           # 均值
            np.std(window),            # 标准差
            np.min(window),            # 最小值
            np.max(window),            # 最大值
            np.ptp(window),            # 峰峰值
            stats.skew(window),        # 偏度
            stats.kurtosis(window),    # 峰度
        ]
        
        # 梯度特征
        gradient = np.gradient(window)
        feature_vector.extend([
            np.mean(gradient),         # 平均梯度
            np.std(gradient),          # 梯度标准差
            np.max(gradient),          # 最大梯度
            np.min(gradient),          # 最小梯度
        ])
        
        # 频域特征
        freqs, psd = signal.welch(window, fs=sampling_rate, nperseg=len(window)//4)
        feature_vector.extend([
            np.sum(psd),               # 总功率
            freqs[np.argmax(psd)],     # 主频率
            np.sum(freqs * psd) / np.sum(psd),  # 频谱质心
        ])
        
        features.append(feature_vector)
    
    return features

def classify_peaks_rule_based(features: List[List[float]]) -> np.ndarray:
    """基于规则的峰值分类"""
    probabilities = np.zeros(len(features))
    
    for i, feature_vector in enumerate(features):
        mean_val, std_val, min_val, max_val, ptp_val = feature_vector[:5]
        mean_grad, std_grad, max_grad, min_grad = feature_vector[7:11]
        
        # 规则1：标准差较大（变化明显）
        std_score = min(1.0, std_val / 0.1)
        
        # 规则2：峰峰值较大
        ptp_score = min(1.0, ptp_val / 0.2)
        
        # 规则3：有明显的上升梯度
        grad_score = min(1.0, max_grad / 0.05) if max_grad > 0 else 0
        
        # 规则4：梯度变化明显
        grad_var_score = min(1.0, std_grad / 0.02)
        
        # 综合评分
        probabilities[i] = (std_score * 0.3 + ptp_score * 0.3 + 
                          grad_score * 0.2 + grad_var_score * 0.2)
    
    return probabilities

def non_maximum_suppression(indices: np.ndarray, scores: np.ndarray, 
                          min_distance: int) -> np.ndarray:
    """非最大值抑制"""
    if len(indices) == 0:
        return indices
    
    # 按分数排序
    sorted_indices = np.argsort(scores)[::-1]
    
    keep = []
    for i in sorted_indices:
        idx = indices[i]
        
        # 检查是否与已选择的峰值太近
        too_close = False
        for kept_idx in keep:
            if abs(idx - kept_idx) < min_distance:
                too_close = True
                break
        
        if not too_close:
            keep.append(idx)
    
    return np.array(keep)

# 非线性动力学分析函数
def calculate_sample_entropy(data: np.ndarray, m: int = 2, r: float = None) -> float:
    """计算样本熵"""
    if r is None:
        r = 0.2 * np.std(data)
    
    N = len(data)
    
    def _maxdist(xi, xj, m):
        return max([abs(ua - va) for ua, va in zip(xi, xj)])
    
    def _phi(m):
        patterns = np.array([data[i:i + m] for i in range(N - m + 1)])
        C = np.zeros(N - m + 1)
        
        for i in range(N - m + 1):
            template_i = patterns[i]
            for j in range(N - m + 1):
                if _maxdist(template_i, patterns[j], m) <= r:
                    C[i] += 1.0
        
        phi = np.mean(np.log(C / (N - m + 1.0)))
        return phi
    
    return _phi(m) - _phi(m + 1)

def calculate_approximate_entropy(data: np.ndarray, m: int = 2, r: float = None) -> float:
    """计算近似熵"""
    if r is None:
        r = 0.2 * np.std(data)
    
    N = len(data)
    
    def _maxdist(xi, xj, m):
        return max([abs(ua - va) for ua, va in zip(xi, xj)])
    
    def _phi(m):
        patterns = np.array([data[i:i + m] for i in range(N - m + 1)])
        C = np.zeros(N - m + 1)
        
        for i in range(N - m + 1):
            template_i = patterns[i]
            for j in range(N - m + 1):
                if _maxdist(template_i, patterns[j], m) <= r:
                    C[i] += 1.0
        
        C = C / (N - m + 1.0)
        phi = np.mean(np.log(C))
        return phi
    
    return _phi(m) - _phi(m + 1)

def calculate_dfa(data: np.ndarray) -> float:
    """计算去趋势波动分析指数"""
    N = len(data)
    
    # 积分序列
    y = np.cumsum(data - np.mean(data))
    
    # 窗口大小
    scales = np.logspace(1, np.log10(N//4), 20).astype(int)
    
    fluctuations = []
    
    for scale in scales:
        # 分段
        segments = N // scale
        
        F = 0
        for v in range(segments):
            start = v * scale
            end = (v + 1) * scale
            segment = y[start:end]
            
            # 线性去趋势
            t = np.arange(len(segment))
            coeffs = np.polyfit(t, segment, 1)
            trend = np.polyval(coeffs, t)
            
            F += np.mean((segment - trend) ** 2)
        
        F = F / segments
        fluctuations.append(np.sqrt(F))
    
    # 拟合幂律
    coeffs = np.polyfit(np.log10(scales), np.log10(fluctuations), 1)
    return coeffs[0]

def calculate_hurst_exponent(data: np.ndarray) -> float:
    """计算Hurst指数"""
    N = len(data)
    
    # R/S分析
    lags = np.logspace(1, np.log10(N//2), 20).astype(int)
    
    rs_values = []
    
    for lag in lags:
        # 分段
        segments = N // lag
        rs_segment = []
        
        for i in range(segments):
            start = i * lag
            end = (i + 1) * lag
            segment = data[start:end]
            
            # 计算累积偏差
            mean_segment = np.mean(segment)
            cumulative_deviation = np.cumsum(segment - mean_segment)
            
            # 计算范围
            R = np.max(cumulative_deviation) - np.min(cumulative_deviation)
            
            # 计算标准差
            S = np.std(segment)
            
            if S > 0:
                rs_segment.append(R / S)
        
        if rs_segment:
            rs_values.append(np.mean(rs_segment))
    
    # 拟合Hurst指数
    if len(rs_values) > 1:
        coeffs = np.polyfit(np.log10(lags[:len(rs_values)]), np.log10(rs_values), 1)
        return coeffs[0]
    
    return 0.5

def calculate_correlation_dimension(data: np.ndarray, m: int = 5) -> float:
    """计算关联维数"""
    N = len(data)
    
    # 嵌入
    embedded = np.array([data[i:i + m] for i in range(N - m + 1)])
    
    # 距离范围
    distances = []
    for i in range(len(embedded)):
        for j in range(i + 1, len(embedded)):
            dist = np.linalg.norm(embedded[i] - embedded[j])
            distances.append(dist)
    
    distances = np.array(distances)
    
    # 计算关联积分
    r_values = np.logspace(np.log10(np.min(distances[distances > 0])), 
                          np.log10(np.max(distances)), 20)
    
    correlations = []
    for r in r_values:
        C_r = np.sum(distances <= r) / len(distances)
        correlations.append(C_r)
    
    # 拟合关联维数
    log_r = np.log10(r_values)
    log_C = np.log10(np.array(correlations) + 1e-12)
    
    # 找到线性区域
    valid_indices = np.isfinite(log_C) & (log_C > -10)
    if np.sum(valid_indices) > 2:
        coeffs = np.polyfit(log_r[valid_indices], log_C[valid_indices], 1)
        return coeffs[0]
    
    return 0.0

def calculate_lyapunov_exponent(data: np.ndarray, sampling_rate: float) -> float:
    """计算最大Lyapunov指数"""
    # 简化的Lyapunov指数计算
    N = len(data)
    
    # 嵌入参数
    m = 3  # 嵌入维数
    tau = 1  # 时间延迟
    
    # 重构相空间
    embedded = np.array([data[i:i + m * tau:tau] for i in range(N - (m - 1) * tau)])
    
    # 寻找最近邻
    lyap_sum = 0
    count = 0
    
    for i in range(len(embedded) - 1):
        distances = np.linalg.norm(embedded - embedded[i], axis=1)
        
        # 排除自身和太近的点
        valid_indices = (distances > 0) & (distances < np.std(distances))
        
        if np.sum(valid_indices) > 0:
            nearest_idx = np.argmin(distances[valid_indices])
            nearest_idx = np.where(valid_indices)[0][nearest_idx]
            
            # 计算发散
            if i + 1 < len(embedded) and nearest_idx + 1 < len(embedded):
                d0 = distances[nearest_idx]
                d1 = np.linalg.norm(embedded[i + 1] - embedded[nearest_idx + 1])
                
                if d0 > 0 and d1 > 0:
                    lyap_sum += np.log(d1 / d0)
                    count += 1
    
    if count > 0:
        return lyap_sum / count * sampling_rate
    
    return 0.0

def calculate_rqa_metrics(data: np.ndarray) -> Dict[str, float]:
    """计算递归量化分析指标"""
    # 简化的RQA实现
    N = len(data)
    m = 3  # 嵌入维数
    
    # 重构相空间
    embedded = np.array([data[i:i + m] for i in range(N - m + 1)])
    
    # 计算距离矩阵
    threshold = 0.1 * np.std(data)
    
    recurrence_matrix = np.zeros((len(embedded), len(embedded)))
    for i in range(len(embedded)):
        for j in range(len(embedded)):
            if np.linalg.norm(embedded[i] - embedded[j]) < threshold:
                recurrence_matrix[i, j] = 1
    
    # 计算RQA指标
    rr = np.sum(recurrence_matrix) / (len(embedded) ** 2)  # 递归率
    
    # 确定性
    det_lines = []
    for i in range(len(embedded) - 2):
        for j in range(len(embedded) - 2):
            if recurrence_matrix[i, j] == 1:
                length = 1
                k = 1
                while (i + k < len(embedded) and j + k < len(embedded) and 
                       recurrence_matrix[i + k, j + k] == 1):
                    length += 1
                    k += 1
                if length >= 2:
                    det_lines.append(length)
    
    det = len(det_lines) / np.sum(recurrence_matrix) if np.sum(recurrence_matrix) > 0 else 0
    
    return {
        'recurrence_rate': rr,
        'determinism': det,
        'average_diagonal_length': np.mean(det_lines) if det_lines else 0
    }

def calculate_multiscale_entropy(data: np.ndarray, max_scale: int = 20) -> np.ndarray:
    """计算多尺度熵"""
    mse_values = []
    
    for scale in range(1, min(max_scale + 1, len(data) // 10)):
        # 粗粒化
        coarse_grained = []
        for i in range(0, len(data) - scale + 1, scale):
            coarse_grained.append(np.mean(data[i:i + scale]))
        
        coarse_grained = np.array(coarse_grained)
        
        # 计算样本熵
        if len(coarse_grained) > 10:
            se = calculate_sample_entropy(coarse_grained)
            mse_values.append(se)
        else:
            mse_values.append(0)
    
    return np.array(mse_values)

# 质量评估辅助函数
def perform_adf_test(data: np.ndarray) -> float:
    """执行ADF平稳性检验"""
    try:
        from statsmodels.tsa.stattools import adfuller
        
        result = adfuller(data)
        p_value = result[1]
        
        # 转换p值为质量分数
        return 1.0 - p_value if p_value <= 1.0 else 0.0
        
    except ImportError:
        # 如果没有statsmodels，使用简单的方差比检验
        first_half = data[:len(data)//2]
        second_half = data[len(data)//2:]
        
        var_ratio = np.var(first_half) / np.var(second_half) if np.var(second_half) > 0 else 1
        
        # 方差比接近1表示更平稳
        return 1.0 / (1.0 + abs(var_ratio - 1.0))

def analyze_autocorrelation(data: np.ndarray) -> float:
    """分析自相关性"""
    # 计算自相关函数
    autocorr = np.correlate(data, data, mode='full')
    autocorr = autocorr[len(autocorr)//2:]
    autocorr = autocorr / autocorr[0]  # 归一化
    
    # 寻找第一个零交叉点
    zero_crossing = None
    for i in range(1, min(len(autocorr), len(data)//4)):
        if autocorr[i] <= 0:
            zero_crossing = i
            break
    
    if zero_crossing is not None:
        # 零交叉点越晚，自相关性越强
        return min(1.0, zero_crossing / (len(data) * 0.1))
    else:
        return 0.5  # 中等自相关性

def detect_statistical_outliers(data: np.ndarray) -> float:
    """检测统计异常值"""
    # 使用IQR方法检测异常值
    q1 = np.percentile(data, 25)
    q3 = np.percentile(data, 75)
    iqr = q3 - q1
    
    lower_bound = q1 - 1.5 * iqr
    upper_bound = q3 + 1.5 * iqr
    
    outliers = np.sum((data < lower_bound) | (data > upper_bound))
    outlier_ratio = outliers / len(data)
    
    # 异常值比例越低，质量分数越高
    return max(0.0, 1.0 - outlier_ratio * 10)

def estimate_signal_to_noise_ratio(data: np.ndarray) -> float:
    """估计信噪比"""
    # 使用信号功率与噪声功率的比值
    signal_power = np.var(data)
    
    # 估计噪声（使用高频成分）
    diff_signal = np.diff(data)
    noise_power = np.var(diff_signal) / 2
    
    if noise_power > 0:
        snr = signal_power / noise_power
        # 转换为0-1分数
        return min(1.0, np.log10(snr + 1) / 2)
    
    return 0.5

def assess_peaks_statistical_quality(peaks: List[Dict], data: np.ndarray, 
                                   sampling_rate: float) -> float:
    """评估峰值的统计质量"""
    if not peaks:
        return 0.0
    
    # 峰值间隔的一致性
    peak_indices = [p['peak_index'] for p in peaks]
    intervals = np.diff(peak_indices) / sampling_rate
    
    if len(intervals) > 1:
        interval_cv = np.std(intervals) / np.mean(intervals)  # 变异系数
        interval_score = max(0.0, 1.0 - interval_cv)
    else:
        interval_score = 0.5
    
    # 峰值幅度的一致性
    amplitudes = [p['amplitude'] for p in peaks]
    if len(amplitudes) > 1:
        amplitude_cv = np.std(amplitudes) / np.mean(amplitudes)
        amplitude_score = max(0.0, 1.0 - amplitude_cv)
    else:
        amplitude_score = 0.5
    
    # 峰值形状的一致性
    rise_times = [p.get('rise_time', 0) for p in peaks if p.get('rise_time', 0) > 0]
    if len(rise_times) > 1:
        rise_time_cv = np.std(rise_times) / np.mean(rise_times)
        shape_score = max(0.0, 1.0 - rise_time_cv)
    else:
        shape_score = 0.5
    
    # 综合评分
    return (interval_score + amplitude_score + shape_score) / 3

def assess_frequency_domain_quality(data: np.ndarray, sampling_rate: float) -> float:
    """评估频域质量"""
    # 计算功率谱
    freqs, psd = signal.welch(data, fs=sampling_rate)
    
    # EDA信号的主要频率应该在低频段
    low_freq_mask = freqs <= 0.5  # 0.5 Hz以下
    high_freq_mask = freqs > 0.5
    
    low_freq_power = np.sum(psd[low_freq_mask])
    high_freq_power = np.sum(psd[high_freq_mask])
    
    total_power = low_freq_power + high_freq_power
    
    if total_power > 0:
        low_freq_ratio = low_freq_power / total_power
        # 低频功率比例越高，质量越好
        return low_freq_ratio
    
    return 0.5

def count_outliers(data: np.ndarray) -> int:
    """计算异常值数量"""
    q1 = np.percentile(data, 25)
    q3 = np.percentile(data, 75)
    iqr = q3 - q1
    
    lower_bound = q1 - 1.5 * iqr
    upper_bound = q3 + 1.5 * iqr
    
    return np.sum((data < lower_bound) | (data > upper_bound))

def generate_quality_recommendations(quality_scores: Dict[str, float]) -> List[str]:
    """生成质量改善建议"""
    recommendations = []
    
    if quality_scores.get('outlier_score', 1.0) < 0.7:
        recommendations.append("检测到较多异常值，建议进行异常值处理")
    
    if quality_scores.get('snr_score', 1.0) < 0.6:
        recommendations.append("信噪比较低，建议应用更强的滤波或去噪处理")
    
    if quality_scores.get('stationarity_score', 1.0) < 0.5:
        recommendations.append("信号非平稳性较强，建议进行去趋势处理")
    
    if quality_scores.get('peak_quality_score', 1.0) < 0.6:
        recommendations.append("峰值质量较低，建议调整峰值检测参数")
    
    if quality_scores.get('frequency_quality_score', 1.0) < 0.7:
        recommendations.append("频域特征异常，建议检查采样率和滤波设置")
    
    if not recommendations:
        recommendations.append("信号质量良好，无需特殊处理")
    
    return recommendations

# ============================================================================
# 算法注册和示例
# ============================================================================

def register_custom_algorithms():
    """注册所有自定义算法"""
    try:
        # 注册预处理算法
        register_algorithm('preprocessors', 'adaptive_filter', preprocess_adaptive_filter)
        register_algorithm('preprocessors', 'wavelet_denoising', preprocess_wavelet_denoising)
        
        # 注册峰值检测算法
        register_algorithm('peak_detectors', 'template_matching', detect_peaks_template_matching)
        register_algorithm('peak_detectors', 'machine_learning', detect_peaks_machine_learning)
        
        # 注册指标计算算法
        register_algorithm('metrics_calculators', 'advanced_frequency', calculate_metrics_advanced_frequency)
        register_algorithm('metrics_calculators', 'nonlinear_dynamics', calculate_metrics_nonlinear_dynamics)
        
        # 注册质量评估算法
        register_algorithm('quality_assessors', 'statistical_tests', assess_quality_statistical_tests)
        
        print("所有自定义算法注册成功！")
        
        # 显示可用算法
        print("\n可用算法列表:")
        available_algorithms = list_available_algorithms()
        for category, algorithms in available_algorithms.items():
            print(f"  {category}: {', '.join(algorithms)}")
        
        return True
        
    except Exception as e:
        logger.error(f"算法注册失败: {str(e)}")
        return False

def custom_algorithm_example():
    """自定义算法使用示例"""
    print("=" * 60)
    print("自定义算法使用示例")
    print("=" * 60)
    
    # 1. 注册自定义算法
    print("1. 注册自定义算法...")
    if not register_custom_algorithms():
        print("算法注册失败！")
        return
    
    # 2. 生成测试数据
    print("\n2. 生成测试数据...")
    duration = 300  # 5分钟
    sampling_rate = 4.0
    
    eda_data = generate_test_eda_data(duration, sampling_rate)
    
    # 3. 使用自定义算法处理
    print("\n3. 使用自定义算法处理...")
    
    # 配置使用自定义算法
    custom_config = {
        'preprocessing': {
            'algorithm': 'adaptive_filter',
            'params': {
                'adaptation_window': 30,
                'base_cutoff': 1.0,
                'noise_threshold': 0.05
            }
        },
        'peak_detection': {
            'algorithm': 'template_matching',
            'params': {
                'template_duration': 10,
                'correlation_threshold': 0.6,
                'min_peak_distance': 1
            }
        },
        'metrics_calculation': {
            'algorithm': 'nonlinear_dynamics',
            'params': {}
        },
        'quality_assessment': {
            'algorithm': 'statistical_tests',
            'params': {}
        }
    }
    
    # 执行处理
    result = process_eda_pipeline(eda_data, sampling_rate, custom_config)
    
    if result['success']:
        print(f"处理成功！")
        print(f"检测到 {len(result['peaks'])} 个峰值")
        print(f"计算了 {len(result['metrics'])} 个指标")
        print(f"质量评估分数: {result['quality']['overall_score']:.3f}")
        
        # 显示一些关键指标
        print("\n关键指标:")
        metrics = result['metrics']
        if 'sample_entropy' in metrics:
            print(f"  样本熵: {metrics['sample_entropy']:.4f}")
        if 'dfa_alpha' in metrics:
            print(f"  DFA指数: {metrics['dfa_alpha']:.4f}")
        if 'hurst_exponent' in metrics:
            print(f"  Hurst指数: {metrics['hurst_exponent']:.4f}")
        
        # 显示质量评估详情
        print("\n质量评估详情:")
        quality_scores = result['quality']['individual_scores']
        for metric, score in quality_scores.items():
            print(f"  {metric}: {score:.3f}")
        
        # 显示建议
        print("\n改善建议:")
        for rec in result['quality']['recommendations']:
            print(f"  - {rec}")
    
    else:
        print(f"处理失败: {result.get('error', '未知错误')}")

def algorithm_comparison_example():
    """算法比较示例"""
    print("\n" + "=" * 60)
    print("算法比较示例")
    print("=" * 60)
    
    # 注册算法
    register_custom_algorithms()
    
    # 生成测试数据
    duration = 120  # 2分钟
    sampling_rate = 4.0
    eda_data = generate_test_eda_data(duration, sampling_rate)
    
    # 定义要比较的算法配置
    algorithms_to_compare = {
        '基础算法': {
            'preprocessing': {'algorithm': 'basic'},
            'peak_detection': {'algorithm': 'basic'},
            'metrics_calculation': {'algorithm': 'basic'},
            'quality_assessment': {'algorithm': 'basic'}
        },
        '自适应滤波': {
            'preprocessing': {'algorithm': 'adaptive_filter'},
            'peak_detection': {'algorithm': 'basic'},
            'metrics_calculation': {'algorithm': 'basic'},
            'quality_assessment': {'algorithm': 'basic'}
        },
        '模板匹配': {
            'preprocessing': {'algorithm': 'basic'},
            'peak_detection': {'algorithm': 'template_matching'},
            'metrics_calculation': {'algorithm': 'basic'},
            'quality_assessment': {'algorithm': 'basic'}
        },
        '非线性分析': {
            'preprocessing': {'algorithm': 'basic'},
            'peak_detection': {'algorithm': 'basic'},
            'metrics_calculation': {'algorithm': 'nonlinear_dynamics'},
            'quality_assessment': {'algorithm': 'basic'}
        },
        '统计质量评估': {
            'preprocessing': {'algorithm': 'basic'},
            'peak_detection': {'algorithm': 'basic'},
            'metrics_calculation': {'algorithm': 'basic'},
            'quality_assessment': {'algorithm': 'statistical_tests'}
        }
    }
    
    # 比较结果
    comparison_results = {}
    
    print("正在比较不同算法...")
    for name, config in algorithms_to_compare.items():
        print(f"  处理: {name}")
        result = process_eda_pipeline(eda_data, sampling_rate, config)
        
        if result['success']:
            comparison_results[name] = {
                'num_peaks': len(result['peaks']),
                'quality_score': result['quality']['overall_score'],
                'processing_time': result.get('processing_time', 0),
                'metrics_count': len(result['metrics'])
            }
        else:
            comparison_results[name] = {
                'error': result.get('error', '处理失败')
            }
    
    # 显示比较结果
    print("\n算法比较结果:")
    print("-" * 80)
    print(f"{'算法名称':<15} {'峰值数量':<10} {'质量分数':<10} {'指标数量':<10} {'状态':<15}")
    print("-" * 80)
    
    for name, results in comparison_results.items():
        if 'error' in results:
            print(f"{name:<15} {'N/A':<10} {'N/A':<10} {'N/A':<10} {results['error']:<15}")
        else:
            print(f"{name:<15} {results['num_peaks']:<10} {results['quality_score']:<10.3f} "
                  f"{results['metrics_count']:<10} {'成功':<15}")

def generate_test_eda_data(duration: float, sampling_rate: float) -> np.ndarray:
    """生成测试EDA数据"""
    t = np.arange(0, duration, 1/sampling_rate)
    
    # 基础SCL（缓慢变化的基线）
    scl = 2.0 + 0.5 * np.sin(2 * np.pi * t / 60) + 0.2 * np.sin(2 * np.pi * t / 20)
    
    # 添加SCR峰值
    scr = np.zeros_like(t)
    
    # 随机添加一些SCR事件
    np.random.seed(42)
    num_events = int(duration / 30)  # 平均每30秒一个事件
    
    for _ in range(num_events):
        event_time = np.random.uniform(10, duration - 10)
        event_idx = int(event_time * sampling_rate)
        
        # SCR形状：快速上升，慢速恢复
        rise_duration = np.random.uniform(1, 3)  # 1-3秒上升
        recovery_duration = np.random.uniform(5, 15)  # 5-15秒恢复
        amplitude = np.random.uniform(0.1, 0.8)  # 幅度
        
        # 生成SCR波形
        rise_samples = int(rise_duration * sampling_rate)
        recovery_samples = int(recovery_duration * sampling_rate)
        
        if event_idx + rise_samples + recovery_samples < len(t):
            # 上升阶段
            rise_t = np.arange(rise_samples) / sampling_rate
            rise_scr = amplitude * (1 - np.exp(-3 * rise_t / rise_duration))
            
            # 恢复阶段
            recovery_t = np.arange(recovery_samples) / sampling_rate
            recovery_scr = amplitude * np.exp(-2 * recovery_t / recovery_duration)
            
            # 添加到信号中
            scr[event_idx:event_idx + rise_samples] += rise_scr
            scr[event_idx + rise_samples:event_idx + rise_samples + recovery_samples] += recovery_scr
    
    # 合成最终信号
    eda_signal = scl + scr
    
    # 添加噪声
    noise = np.random.normal(0, 0.02, len(t))
    eda_signal += noise
    
    return eda_signal

if __name__ == "__main__":
    # 运行示例
    custom_algorithm_example()
    algorithm_comparison_example()