"""EDA数据质量评估算法实现

提供多种EDA信号质量评估功能：
- 基础质量评估：数据完整性、噪声水平等基本指标
- 高级质量评估：信号稳定性、伪影检测等深度分析
- 综合质量评估：整合多个维度的质量评分

所有评估函数返回质量分数和详细的质量报告。
"""

import numpy as np
from scipy import signal, stats
from typing import Dict, Any, Tuple, List, Optional
import logging

logger = logging.getLogger(__name__)

def assess_quality_basic(
    eda_signal: np.ndarray, 
    sampling_rate: float = 4.0,
    context: Optional[Dict[str, Any]] = None,
    peaks: Optional[np.ndarray] = None,
    processed_data: Optional[Dict[str, Any]] = None,
    **kwargs
) -> Tuple[float, Dict[str, Any]]:
    """基础数据质量评估
    
    评估EDA信号的基本质量指标，包括数据完整性、噪声水平等。
    
    Args:
        eda_signal: EDA信号数据
        sampling_rate: 采样率（Hz）
        
    Returns:
        (quality_score, quality_report): 质量分数(0-1)和详细报告
    """
    try:
        logger.debug(f"开始基础质量评估，数据长度: {len(eda_signal)}")
        
        quality_report = {
            'assessment_type': 'basic',
            'signal_length': len(eda_signal),
            'sampling_rate': sampling_rate
        }
        
        scores = []
        
        # 1. 数据完整性评估
        completeness_score, completeness_info = _assess_data_completeness(eda_signal)
        scores.append(completeness_score)
        quality_report['completeness'] = completeness_info
        
        # 2. 数据范围合理性评估
        range_score, range_info = _assess_data_range(eda_signal)
        scores.append(range_score)
        quality_report['range_validity'] = range_info
        
        # 3. 噪声水平评估
        noise_score, noise_info = _assess_noise_level(eda_signal, sampling_rate)
        scores.append(noise_score)
        quality_report['noise_level'] = noise_info
        
        # 4. 信号稳定性评估
        stability_score, stability_info = _assess_signal_stability(eda_signal)
        scores.append(stability_score)
        quality_report['stability'] = stability_info
        
        # 5. 采样质量评估
        sampling_score, sampling_info = _assess_sampling_quality(eda_signal, sampling_rate)
        scores.append(sampling_score)
        quality_report['sampling_quality'] = sampling_info
        
        # 计算综合质量分数（加权平均）
        weights = [0.3, 0.2, 0.2, 0.2, 0.1]  # 数据完整性权重最高
        overall_score = np.average(scores, weights=weights)
        
        quality_report['individual_scores'] = {
            'completeness': completeness_score,
            'range_validity': range_score,
            'noise_level': noise_score,
            'stability': stability_score,
            'sampling_quality': sampling_score
        }
        quality_report['overall_score'] = float(overall_score)
        quality_report['quality_level'] = _get_quality_level(overall_score)
        
        logger.debug(f"基础质量评估完成，总分: {overall_score:.3f}")
        return float(overall_score), quality_report
        
    except Exception as e:
        logger.error(f"基础质量评估失败: {str(e)}")
        return 0.0, {'error': str(e), 'assessment_type': 'basic'}

def assess_quality_advanced(
    eda_signal: np.ndarray, 
    sampling_rate: float = 4.0,
    context: Optional[Dict[str, Any]] = None,
    peaks: Optional[np.ndarray] = None,
    processed_data: Optional[Dict[str, Any]] = None,
    **kwargs
) -> Tuple[float, Dict[str, Any]]:
    """高级数据质量评估
    
    进行更深入的质量分析，包括伪影检测、信号特征一致性等。
    
    Args:
        eda_signal: EDA信号数据
        sampling_rate: 采样率（Hz）
        
    Returns:
        (quality_score, quality_report): 质量分数(0-1)和详细报告
    """
    try:
        logger.debug("开始高级质量评估")
        
        # 先进行基础评估
        basic_score, basic_report = assess_quality_basic(eda_signal, sampling_rate)
        
        quality_report = {
            'assessment_type': 'advanced',
            'basic_assessment': basic_report
        }
        
        scores = [basic_score]
        
        # 1. 伪影检测
        artifact_score, artifact_info = _detect_artifacts(eda_signal, sampling_rate)
        scores.append(artifact_score)
        quality_report['artifact_detection'] = artifact_info
        
        # 2. 信号特征一致性评估
        consistency_score, consistency_info = _assess_signal_consistency(eda_signal, sampling_rate)
        scores.append(consistency_score)
        quality_report['signal_consistency'] = consistency_info
        
        # 3. 生理合理性评估
        physiological_score, physiological_info = _assess_physiological_validity(eda_signal, sampling_rate)
        scores.append(physiological_score)
        quality_report['physiological_validity'] = physiological_info
        
        # 4. 频域质量评估
        frequency_score, frequency_info = _assess_frequency_domain_quality(eda_signal, sampling_rate)
        scores.append(frequency_score)
        quality_report['frequency_quality'] = frequency_info
        
        # 计算综合质量分数
        weights = [0.4, 0.2, 0.2, 0.1, 0.1]  # 基础评估权重最高
        overall_score = np.average(scores, weights=weights)
        
        quality_report['individual_scores'] = {
            'basic_assessment': basic_score,
            'artifact_detection': artifact_score,
            'signal_consistency': consistency_score,
            'physiological_validity': physiological_score,
            'frequency_quality': frequency_score
        }
        quality_report['overall_score'] = float(overall_score)
        quality_report['quality_level'] = _get_quality_level(overall_score)
        
        # 生成质量改进建议
        quality_report['recommendations'] = _get_quality_recommendations(overall_score, quality_report)
        
        logger.debug(f"高级质量评估完成，总分: {overall_score:.3f}")
        return float(overall_score), quality_report
        
    except Exception as e:
        logger.error(f"高级质量评估失败: {str(e)}")
        return 0.0, {'error': str(e), 'assessment_type': 'advanced'}

def assess_quality_comprehensive(
    eda_signal: np.ndarray, 
    sampling_rate: float = 4.0, 
    context: Optional[Dict[str, Any]] = None,
    peaks: Optional[np.ndarray] = None,
    processed_data: Optional[Dict[str, Any]] = None,
    **kwargs
) -> Tuple[float, Dict[str, Any]]:
    """综合数据质量评估
    
    结合实验上下文信息进行全面的质量评估。
    
    Args:
        eda_signal: EDA信号数据
        sampling_rate: 采样率（Hz）
        context: 实验上下文信息（可选）
        
    Returns:
        (quality_score, quality_report): 质量分数(0-1)和详细报告
    """
    try:
        logger.debug("开始综合质量评估")
        
        # 进行高级评估
        advanced_score, advanced_report = assess_quality_advanced(eda_signal, sampling_rate)
        
        quality_report = {
            'assessment_type': 'comprehensive',
            'advanced_assessment': advanced_report,
            'context': context or {}
        }
        
        scores = [advanced_score]
        
        # 1. 上下文相关性评估（如果有上下文信息）
        if context:
            context_score, context_info = _assess_contextual_quality(eda_signal, sampling_rate, context)
            scores.append(context_score)
            quality_report['contextual_assessment'] = context_info
        
        # 2. 时间序列质量评估
        temporal_score, temporal_info = _assess_temporal_quality(eda_signal, sampling_rate)
        scores.append(temporal_score)
        quality_report['temporal_quality'] = temporal_info
        
        # 3. 可用性评估（用于后续分析的适用性）
        usability_score, usability_info = _assess_data_usability(eda_signal, sampling_rate)
        scores.append(usability_score)
        quality_report['data_usability'] = usability_info
        
        # 计算最终质量分数
        if context:
            weights = [0.5, 0.2, 0.15, 0.15]
        else:
            weights = [0.6, 0.2, 0.2]
            scores = scores[:3]
        
        overall_score = np.average(scores, weights=weights)
        
        quality_report['final_scores'] = dict(zip(
            ['advanced_assessment', 'contextual_assessment', 'temporal_quality', 'data_usability'][:len(scores)],
            scores
        ))
        quality_report['overall_score'] = float(overall_score)
        quality_report['quality_level'] = _get_quality_level(overall_score)
        quality_report['confidence_level'] = 0.8  # 默认置信度
        
        # 生成详细的质量报告和建议
        quality_report['detailed_recommendations'] = _get_quality_recommendations(overall_score, quality_report)
        quality_report['processing_suggestions'] = _get_quality_recommendations(overall_score, quality_report)
        
        logger.debug(f"综合质量评估完成，总分: {overall_score:.3f}")
        return float(overall_score), quality_report
        
    except Exception as e:
        logger.error(f"综合质量评估失败: {str(e)}")
        return 0.0, {'error': str(e), 'assessment_type': 'comprehensive'}

def _assess_data_completeness(eda_signal: np.ndarray) -> Tuple[float, Dict[str, Any]]:
    """评估数据完整性"""
    total_points = len(eda_signal)
    
    if total_points == 0:
        return 0.0, {'valid_ratio': 0.0, 'missing_points': total_points, 'status': 'no_data'}
    
    # 检查NaN和无穷值
    valid_mask = np.isfinite(eda_signal)
    valid_points = np.sum(valid_mask)
    valid_ratio = valid_points / total_points
    
    # 检查连续缺失段
    invalid_mask = ~valid_mask
    if np.any(invalid_mask):
        # 找到连续缺失段
        diff = np.diff(np.concatenate(([False], invalid_mask, [False])).astype(int))
        gap_starts = np.where(diff == 1)[0]
        gap_ends = np.where(diff == -1)[0]
        gap_lengths = gap_ends - gap_starts
        max_gap = np.max(gap_lengths) if len(gap_lengths) > 0 else 0
        num_gaps = len(gap_lengths)
    else:
        max_gap = 0
        num_gaps = 0
    
    # 计算完整性分数
    if valid_ratio >= 0.95:
        score = 1.0
    elif valid_ratio >= 0.90:
        score = 0.8
    elif valid_ratio >= 0.80:
        score = 0.6
    elif valid_ratio >= 0.70:
        score = 0.4
    else:
        score = 0.2
    
    # 如果有大的连续缺失段，降低分数
    if max_gap > total_points * 0.1:  # 超过10%的连续缺失
        score *= 0.5
    
    info = {
        'valid_ratio': float(valid_ratio),
        'missing_points': int(total_points - valid_points),
        'max_continuous_gap': int(max_gap),
        'num_gaps': int(num_gaps),
        'status': 'good' if score >= 0.8 else 'acceptable' if score >= 0.6 else 'poor'
    }
    
    return float(score), info

def _assess_data_range(eda_signal: np.ndarray) -> Tuple[float, Dict[str, Any]]:
    """评估数据范围合理性"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) == 0:
        return 0.0, {'status': 'no_valid_data'}
    
    min_val = np.min(valid_data)
    max_val = np.max(valid_data)
    range_val = max_val - min_val
    
    # EDA信号的合理范围（微西门子）
    reasonable_min = 0.0
    reasonable_max = 50.0
    reasonable_range_min = 0.1
    reasonable_range_max = 20.0
    
    score = 1.0
    issues = []
    
    # 检查最小值
    if min_val < reasonable_min:
        score *= 0.8
        issues.append(f'最小值过低: {min_val:.3f}')
    
    # 检查最大值
    if max_val > reasonable_max:
        score *= 0.8
        issues.append(f'最大值过高: {max_val:.3f}')
    
    # 检查范围
    if range_val < reasonable_range_min:
        score *= 0.7
        issues.append(f'信号范围过小: {range_val:.3f}')
    elif range_val > reasonable_range_max:
        score *= 0.9
        issues.append(f'信号范围过大: {range_val:.3f}')
    
    # 检查异常值
    q1, q3 = np.percentile(valid_data, [25, 75])
    iqr = q3 - q1
    outlier_threshold = 3 * iqr
    outliers = np.sum((valid_data < q1 - outlier_threshold) | (valid_data > q3 + outlier_threshold))
    outlier_ratio = outliers / len(valid_data)
    
    if outlier_ratio > 0.05:  # 超过5%的异常值
        score *= (1 - outlier_ratio)
        issues.append(f'异常值比例过高: {outlier_ratio:.1%}')
    
    info = {
        'min_value': float(min_val),
        'max_value': float(max_val),
        'range': float(range_val),
        'outlier_ratio': float(outlier_ratio),
        'issues': issues,
        'status': 'good' if score >= 0.8 else 'acceptable' if score >= 0.6 else 'poor'
    }
    
    return float(score), info

def _assess_noise_level(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估噪声水平"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 10:
        return 0.0, {'status': 'insufficient_data'}
    
    # 计算高频噪声（使用高通滤波）
    try:
        nyquist = sampling_rate / 2
        high_cutoff = 0.5 / nyquist  # 0.5Hz高通
        b, a = signal.butter(2, high_cutoff, btype='high')
        high_freq_component = signal.filtfilt(b, a, valid_data)
        noise_power = np.var(high_freq_component)
        signal_power = np.var(valid_data)
        
        if signal_power > 0:
            snr = signal_power / (noise_power + 1e-10)
            snr_db = 10 * np.log10(snr)
        else:
            snr_db = 0
    except:
        snr_db = 0
        noise_power = 0
    
    # 计算信号平滑度（连续差值的方差）
    diff_var = np.var(np.diff(valid_data))
    
    # 评估噪声水平
    if snr_db >= 20:
        noise_score = 1.0
        noise_level = 'low'
    elif snr_db >= 15:
        noise_score = 0.8
        noise_level = 'moderate'
    elif snr_db >= 10:
        noise_score = 0.6
        noise_level = 'high'
    else:
        noise_score = 0.4
        noise_level = 'very_high'
    
    info = {
        'snr_db': float(snr_db),
        'noise_power': float(noise_power),
        'diff_variance': float(diff_var),
        'noise_level': noise_level,
        'status': 'good' if noise_score >= 0.8 else 'acceptable' if noise_score >= 0.6 else 'poor'
    }
    
    return float(noise_score), info

def _assess_signal_stability(eda_signal: np.ndarray) -> Tuple[float, Dict[str, Any]]:
    """评估信号稳定性"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 10:
        return 0.0, {'status': 'insufficient_data'}
    
    # 计算趋势稳定性
    x = np.arange(len(valid_data))
    slope, _, r_value, _, _ = stats.linregress(x, valid_data)
    
    # 计算分段稳定性
    n_segments = min(10, len(valid_data) // 10)
    if n_segments >= 2:
        segment_size = len(valid_data) // n_segments
        segment_means = []
        segment_stds = []
        
        for i in range(n_segments):
            start = i * segment_size
            end = (i + 1) * segment_size if i < n_segments - 1 else len(valid_data)
            segment = valid_data[start:end]
            segment_means.append(np.mean(segment))
            segment_stds.append(np.std(segment))
        
        mean_stability = 1 - (np.std(segment_means) / (np.mean(segment_means) + 1e-10))
        std_stability = 1 - (np.std(segment_stds) / (np.mean(segment_stds) + 1e-10))
    else:
        mean_stability = 1.0
        std_stability = 1.0
    
    # 综合稳定性分数
    trend_score = max(0, 1 - abs(slope) * len(valid_data) / (np.std(valid_data) + 1e-10))
    stability_score = (trend_score + mean_stability + std_stability) / 3
    
    info = {
        'trend_slope': float(slope),
        'trend_r_squared': float(r_value**2),
        'mean_stability': float(mean_stability),
        'std_stability': float(std_stability),
        'status': 'stable' if stability_score >= 0.8 else 'moderate' if stability_score >= 0.6 else 'unstable'
    }
    
    return float(stability_score), info

def _assess_sampling_quality(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估采样质量"""
    # 检查采样率合理性
    if sampling_rate < 1:
        rate_score = 0.2
        rate_status = 'too_low'
    elif sampling_rate < 2:
        rate_score = 0.6
        rate_status = 'low'
    elif sampling_rate <= 10:
        rate_score = 1.0
        rate_status = 'good'
    else:
        rate_score = 0.9
        rate_status = 'high'
    
    # 检查信号长度
    duration = len(eda_signal) / sampling_rate
    if duration < 10:
        length_score = 0.4
        length_status = 'too_short'
    elif duration < 30:
        length_score = 0.7
        length_status = 'short'
    else:
        length_score = 1.0
        length_status = 'adequate'
    
    overall_score = (rate_score + length_score) / 2
    
    info = {
        'sampling_rate': float(sampling_rate),
        'signal_duration': float(duration),
        'rate_status': rate_status,
        'length_status': length_status,
        'status': 'good' if overall_score >= 0.8 else 'acceptable' if overall_score >= 0.6 else 'poor'
    }
    
    return float(overall_score), info

def _detect_artifacts(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """检测信号伪影"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 10:
        return 0.0, {'status': 'insufficient_data'}
    
    artifacts = []
    artifact_score = 1.0
    
    # 1. 检测突然跳跃
    diff = np.abs(np.diff(valid_data))
    threshold = np.percentile(diff, 95) * 3
    jumps = np.sum(diff > threshold)
    if jumps > 0:
        jump_ratio = jumps / len(diff)
        artifact_score *= (1 - jump_ratio)
        artifacts.append(f'突然跳跃: {jumps}次')
    
    # 2. 检测平坦段（可能的传感器故障）
    flat_threshold = np.std(valid_data) * 0.01
    flat_segments = 0
    current_flat = 0
    
    for i in range(1, len(valid_data)):
        if abs(valid_data[i] - valid_data[i-1]) < flat_threshold:
            current_flat += 1
        else:
            if current_flat > sampling_rate * 2:  # 超过2秒的平坦段
                flat_segments += 1
            current_flat = 0
    
    if flat_segments > 0:
        artifact_score *= 0.8
        artifacts.append(f'平坦段: {flat_segments}个')
    
    # 3. 检测饱和（信号达到极值并保持）
    saturation_threshold = 0.95
    high_sat = np.sum(valid_data >= np.percentile(valid_data, 99.5))
    low_sat = np.sum(valid_data <= np.percentile(valid_data, 0.5))
    
    if high_sat > len(valid_data) * 0.05 or low_sat > len(valid_data) * 0.05:
        artifact_score *= 0.7
        artifacts.append('信号饱和')
    
    info = {
        'artifacts_detected': artifacts,
        'artifact_count': len(artifacts),
        'status': 'clean' if artifact_score >= 0.9 else 'minor_artifacts' if artifact_score >= 0.7 else 'significant_artifacts'
    }
    
    return float(artifact_score), info

def _assess_signal_consistency(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估信号特征一致性"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 100:
        return 0.0, {'status': 'insufficient_data'}
    
    # 分段分析信号特征的一致性
    n_segments = min(5, len(valid_data) // 50)
    segment_size = len(valid_data) // n_segments
    
    segment_features = []
    for i in range(n_segments):
        start = i * segment_size
        end = (i + 1) * segment_size if i < n_segments - 1 else len(valid_data)
        segment = valid_data[start:end]
        
        features = {
            'mean': np.mean(segment),
            'std': np.std(segment),
            'skewness': stats.skew(segment),
            'kurtosis': stats.kurtosis(segment)
        }
        segment_features.append(features)
    
    # 计算特征的变异系数
    consistency_scores = []
    for feature in ['mean', 'std', 'skewness', 'kurtosis']:
        values = [seg[feature] for seg in segment_features]
        if np.mean(values) != 0:
            cv = np.std(values) / abs(np.mean(values))
            consistency_scores.append(max(0, 1 - cv))
        else:
            consistency_scores.append(1.0)
    
    overall_consistency = np.mean(consistency_scores)
    
    info = {
        'segment_count': n_segments,
        'feature_consistency': dict(zip(['mean', 'std', 'skewness', 'kurtosis'], consistency_scores)),
        'overall_consistency': float(overall_consistency),
        'status': 'consistent' if overall_consistency >= 0.8 else 'moderate' if overall_consistency >= 0.6 else 'inconsistent'
    }
    
    return float(overall_consistency), info

def _assess_physiological_validity(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估生理合理性"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 10:
        return 0.0, {'status': 'insufficient_data'}
    
    score = 1.0
    issues = []
    
    # 1. 检查基线水平
    baseline = np.percentile(valid_data, 10)  # 使用10%分位数作为基线
    if baseline < 0.1:
        score *= 0.8
        issues.append('基线过低')
    elif baseline > 10:
        score *= 0.9
        issues.append('基线过高')
    
    # 2. 检查变化幅度
    amplitude_range = np.percentile(valid_data, 90) - np.percentile(valid_data, 10)
    if amplitude_range < 0.05:
        score *= 0.7
        issues.append('变化幅度过小')
    elif amplitude_range > 15:
        score *= 0.8
        issues.append('变化幅度过大')
    
    # 3. 检查变化速率
    max_change_rate = np.max(np.abs(np.diff(valid_data))) * sampling_rate
    if max_change_rate > 5:  # 每秒变化超过5μS
        score *= 0.8
        issues.append('变化速率过快')
    
    # 4. 检查信号单调性（EDA通常不应该长时间单调）
    monotonic_length = _find_longest_monotonic_sequence(valid_data)
    if monotonic_length > len(valid_data) * 0.5:
        score *= 0.7
        issues.append('长时间单调变化')
    
    info = {
        'baseline_level': float(baseline),
        'amplitude_range': float(amplitude_range),
        'max_change_rate': float(max_change_rate),
        'longest_monotonic': int(monotonic_length),
        'issues': issues,
        'status': 'physiologically_valid' if score >= 0.8 else 'questionable' if score >= 0.6 else 'invalid'
    }
    
    return float(score), info

def _assess_frequency_domain_quality(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估频域质量"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 50:
        return 0.0, {'status': 'insufficient_data'}
    
    try:
        # FFT分析
        fft_vals = np.fft.fft(valid_data - np.mean(valid_data))
        freqs = np.fft.fftfreq(len(valid_data), 1/sampling_rate)
        power_spectrum = np.abs(fft_vals)**2
        
        # 只考虑正频率
        pos_freqs = freqs[:len(freqs)//2]
        pos_power = power_spectrum[:len(power_spectrum)//2]
        
        # 检查频谱特征
        score = 1.0
        
        # 1. 检查低频能量占比（EDA主要是低频信号）
        low_freq_mask = pos_freqs <= 0.5
        low_freq_power = np.sum(pos_power[low_freq_mask])
        total_power = np.sum(pos_power)
        
        if total_power > 0:
            low_freq_ratio = low_freq_power / total_power
            if low_freq_ratio < 0.7:  # 低频能量应该占主导
                score *= 0.8
        else:
            low_freq_ratio = 0
        
        # 2. 检查高频噪声
        high_freq_mask = pos_freqs > 1.0
        if np.any(high_freq_mask):
            high_freq_power = np.sum(pos_power[high_freq_mask])
            high_freq_ratio = high_freq_power / total_power if total_power > 0 else 0
            if high_freq_ratio > 0.2:  # 高频噪声过多
                score *= (1 - high_freq_ratio)
        else:
            high_freq_ratio = 0
        
        # 3. 检查频谱平滑度
        if len(pos_power) > 1:
            spectral_smoothness = 1 - np.std(np.diff(pos_power)) / (np.mean(pos_power) + 1e-10)
            spectral_smoothness = max(0, min(1, spectral_smoothness))
        else:
            spectral_smoothness = 1.0
        
        info = {
            'low_freq_ratio': float(low_freq_ratio),
            'high_freq_ratio': float(high_freq_ratio),
            'spectral_smoothness': float(spectral_smoothness),
            'dominant_frequency': float(pos_freqs[np.argmax(pos_power)]) if len(pos_power) > 0 else 0.0,
            'status': 'good_spectrum' if score >= 0.8 else 'acceptable_spectrum' if score >= 0.6 else 'poor_spectrum'
        }
        
        return float(score), info
        
    except Exception as e:
        return 0.5, {'error': str(e), 'status': 'analysis_failed'}

def _assess_contextual_quality(eda_signal: np.ndarray, sampling_rate: float, 
                             context: Dict[str, Any]) -> Tuple[float, Dict[str, Any]]:
    """基于上下文的质量评估"""
    # 这里可以根据实验类型、被试信息等进行特定的质量评估
    # 目前返回基本评估
    return 0.8, {'status': 'context_based_assessment_placeholder'}

def _assess_temporal_quality(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估时间序列质量"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) < 20:
        return 0.0, {'status': 'insufficient_data'}
    
    # 评估时间序列的自相关性
    max_lag = min(50, len(valid_data) // 4)
    autocorr = np.correlate(valid_data, valid_data, mode='full')
    autocorr = autocorr[autocorr.size // 2:]
    autocorr = autocorr / autocorr[0]  # 归一化
    
    # 计算自相关衰减
    decay_score = 1.0
    if len(autocorr) > max_lag:
        lag_10 = min(int(sampling_rate * 2), max_lag)  # 2秒滞后
        if autocorr[lag_10] > 0.8:  # 自相关衰减太慢
            decay_score = 0.7
    
    info = {
        'autocorr_at_2s': float(autocorr[min(int(sampling_rate * 2), len(autocorr)-1)]) if len(autocorr) > 1 else 0.0,
        'temporal_structure': 'good' if decay_score >= 0.8 else 'acceptable',
        'status': 'good_temporal' if decay_score >= 0.8 else 'acceptable_temporal'
    }
    
    return float(decay_score), info

def _assess_data_usability(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """评估数据可用性"""
    valid_data = eda_signal[np.isfinite(eda_signal)]
    
    if len(valid_data) == 0:
        return 0.0, {'status': 'unusable'}
    
    # 基于多个因素评估可用性
    factors = []
    
    # 1. 数据长度
    duration = len(valid_data) / sampling_rate
    if duration >= 60:
        length_score = 1.0
    elif duration >= 30:
        length_score = 0.8
    elif duration >= 10:
        length_score = 0.6
    else:
        length_score = 0.3
    factors.append(length_score)
    
    # 2. 信噪比
    signal_std = np.std(valid_data)
    noise_estimate = np.std(np.diff(valid_data))
    if noise_estimate > 0:
        snr = signal_std / noise_estimate
        snr_score = min(1.0, snr / 10)  # 归一化到0-1
    else:
        snr_score = 1.0
    factors.append(snr_score)
    
    # 3. 动态范围
    dynamic_range = (np.max(valid_data) - np.min(valid_data)) / (np.mean(valid_data) + 1e-10)
    range_score = min(1.0, dynamic_range / 0.5)  # 期望至少50%的相对变化
    factors.append(range_score)
    
    usability_score = np.mean(factors)
    
    info = {
        'duration_score': float(length_score),
        'snr_score': float(snr_score),
        'range_score': float(range_score),
        'overall_usability': float(usability_score),
        'recommended_for_analysis': usability_score >= 0.6,
        'status': 'highly_usable' if usability_score >= 0.8 else 'usable' if usability_score >= 0.6 else 'limited_use'
    }
    
    return float(usability_score), info

def _find_longest_monotonic_sequence(data: np.ndarray) -> int:
    """找到最长的单调序列"""
    if len(data) < 2:
        return len(data)
    
    max_length = 1
    current_length = 1
    
    for i in range(1, len(data)):
        if (data[i] >= data[i-1] and (i == 1 or data[i-1] >= data[i-2])) or \
           (data[i] <= data[i-1] and (i == 1 or data[i-1] <= data[i-2])):
            current_length += 1
        else:
            max_length = max(max_length, current_length)
            current_length = 1
    
    return max(max_length, current_length)

def assess_quality_improved(
    eda_signal: np.ndarray, 
    sampling_rate: float = 4.0,
    context: Optional[Dict[str, Any]] = None,
    peaks: Optional[np.ndarray] = None,
    processed_data: Optional[Dict[str, Any]] = None,
    **kwargs
) -> Tuple[float, Dict[str, Any]]:
    """改进的数据质量评估
    
    基于高级算法的EDA信号质量评估，提供更精确的质量判断。
    
    Args:
        eda_signal: EDA信号数据
        sampling_rate: 采样率（Hz）
        
    Returns:
        (quality_score, quality_report): 质量分数(0-1)和详细报告
    """
    try:
        logger.debug(f"开始改进质量评估，数据长度: {len(eda_signal)}")
        
        quality_report = {
            'assessment_type': 'improved',
            'signal_length': len(eda_signal),
            'sampling_rate': sampling_rate
        }
        
        scores = []
        
        # 1. 基础质量评估
        basic_score, basic_info = assess_quality_basic(eda_signal, sampling_rate)
        scores.append(basic_score * 0.4)  # 基础评估权重40%
        quality_report['basic_assessment'] = basic_info
        
        # 2. 高级质量评估
        advanced_score, advanced_info = assess_quality_advanced(eda_signal, sampling_rate)
        scores.append(advanced_score * 0.6)  # 高级评估权重60%
        quality_report['advanced_assessment'] = advanced_info
        
        # 3. 机器学习增强评估（简化版本）
        ml_score, ml_info = _assess_quality_ml_enhanced(eda_signal, sampling_rate)
        scores.append(ml_score * 0.3)  # ML增强权重30%
        quality_report['ml_enhanced'] = ml_info
        
        # 4. 自适应阈值评估
        adaptive_score, adaptive_info = _assess_quality_adaptive(eda_signal, sampling_rate)
        scores.append(adaptive_score * 0.2)  # 自适应评估权重20%
        quality_report['adaptive_assessment'] = adaptive_info
        
        # 计算加权平均分数（归一化权重）
        total_weight = 0.4 + 0.6 + 0.3 + 0.2
        final_score = sum(scores) / total_weight
        final_score = max(0.0, min(1.0, final_score))  # 确保在[0,1]范围内
        
        # 添加综合评估信息
        quality_report['final_score'] = final_score
        quality_report['quality_level'] = _get_quality_level(final_score)
        quality_report['recommendations'] = _get_quality_recommendations(final_score, quality_report)
        
        logger.debug(f"改进质量评估完成，最终分数: {final_score:.3f}")
        return final_score, quality_report
        
    except Exception as e:
        logger.error(f"改进质量评估失败: {str(e)}")
        return 0.0, {
            'assessment_type': 'improved',
            'error': str(e),
            'final_score': 0.0,
            'quality_level': 'poor'
        }

def _assess_quality_ml_enhanced(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """机器学习增强的质量评估"""
    try:
        # 提取特征
        features = {}
        
        # 统计特征
        valid_mask = ~np.isnan(eda_signal)
        valid_signal = eda_signal[valid_mask]
        
        if len(valid_signal) == 0:
            return 0.0, {'error': '无有效数据'}
        
        features['mean'] = np.mean(valid_signal)
        features['std'] = np.std(valid_signal)
        features['skewness'] = stats.skew(valid_signal)
        features['kurtosis'] = stats.kurtosis(valid_signal)
        
        # 频域特征
        if len(valid_signal) > 10:
            freqs, psd = signal.welch(valid_signal, fs=sampling_rate, nperseg=min(256, len(valid_signal)//4))
            features['dominant_freq'] = freqs[np.argmax(psd)]
            features['spectral_entropy'] = -np.sum(psd * np.log(psd + 1e-12))
        
        # 简化的ML评分（基于经验规则）
        score = 0.5  # 基础分数
        
        # 基于统计特征的评分调整
        if 0.1 <= features['mean'] <= 10.0:  # 合理的EDA范围
            score += 0.2
        if 0.01 <= features['std'] <= 2.0:  # 合理的变异性
            score += 0.2
        if -2 <= features['skewness'] <= 2:  # 合理的偏度
            score += 0.1
        
        score = max(0.0, min(1.0, score))
        
        return score, {
            'features': features,
            'ml_score': score,
            'method': 'rule_based_ml'
        }
        
    except Exception as e:
        logger.warning(f"ML增强评估失败: {str(e)}")
        return 0.5, {'error': str(e)}

def _assess_quality_adaptive(eda_signal: np.ndarray, sampling_rate: float) -> Tuple[float, Dict[str, Any]]:
    """自适应阈值质量评估"""
    try:
        valid_mask = ~np.isnan(eda_signal)
        valid_signal = eda_signal[valid_mask]
        
        if len(valid_signal) == 0:
            return 0.0, {'error': '无有效数据'}
        
        # 自适应阈值计算
        signal_median = np.median(valid_signal)
        signal_mad = np.median(np.abs(valid_signal - signal_median))  # 中位数绝对偏差
        
        # 基于信号特性的自适应阈值
        lower_threshold = signal_median - 3 * signal_mad
        upper_threshold = signal_median + 3 * signal_mad
        
        # 计算在合理范围内的数据比例
        valid_range_mask = (valid_signal >= lower_threshold) & (valid_signal <= upper_threshold)
        valid_ratio = np.sum(valid_range_mask) / len(valid_signal)
        
        # 计算信号稳定性
        if len(valid_signal) > 1:
            diff_signal = np.diff(valid_signal)
            stability_score = 1.0 / (1.0 + np.std(diff_signal))
        else:
            stability_score = 0.0
        
        # 综合评分
        adaptive_score = (valid_ratio * 0.7 + stability_score * 0.3)
        adaptive_score = max(0.0, min(1.0, adaptive_score))
        
        return adaptive_score, {
            'adaptive_thresholds': {
                'lower': float(lower_threshold),
                'upper': float(upper_threshold)
            },
            'valid_ratio': float(valid_ratio),
            'stability_score': float(stability_score),
            'adaptive_score': float(adaptive_score)
        }
        
    except Exception as e:
        logger.warning(f"自适应评估失败: {str(e)}")
        return 0.5, {'error': str(e)}

def _get_quality_level(score: float) -> str:
    """根据分数获取质量等级"""
    if score >= 0.8:
        return 'excellent'
    elif score >= 0.6:
        return 'good'
    elif score >= 0.4:
        return 'fair'
    elif score >= 0.2:
        return 'poor'
    else:
        return 'very_poor'

def _get_quality_recommendations(score: float, quality_report: Dict[str, Any]) -> List[str]:
    """根据质量评估结果提供改进建议"""
    recommendations = []
    
    if score < 0.6:
        recommendations.append("建议检查传感器连接和数据采集设置")
    
    if score < 0.4:
        recommendations.append("数据质量较差，建议重新采集或使用数据清洗算法")
    
    # 基于具体问题的建议
    if 'completeness' in quality_report and quality_report['completeness'].get('missing_ratio', 0) > 0.1:
        recommendations.append("检测到较多缺失数据，建议使用插值方法填补")
    
    if 'noise_assessment' in quality_report and quality_report['noise_assessment'].get('noise_level', 0) > 0.7:
        recommendations.append("信号噪声较大，建议应用滤波算法")
    
    return recommendations