#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TraceData预ProcessModule
用于Side-Channel Attack的TraceData预Process

功能：
- 基线校正
- TraceAlignment（互相关、DTW）
- Filtering（低通、带通、高通、频域）
- Denoising（移动平均、小波）
- Normalization（Z-score、Min-Max、Robust）
- 兴趣点选择（方差、SOST、SNR）
- Outlier检测AndRemove
- Trace重Sampling
- 主成分Analyze（PCA）降维
- 独立成分Analyze（ICA）信号分离
- Trace平均/池化
- 差分TraceAnalyze
- 时间窗口Extract
- 频谱Analyze
- DataSave/Load
- 可视化
"""

import numpy as np
import logging
import os
from typing import Optional, Tuple, List, Union
from scipy import signal
from dataclasses import dataclass, field

@dataclass
class PreprocessingConfig:
    """预ProcessConfiguration"""
    enable_alignment: bool = True
    enable_filtering: bool = True  
    enable_normalization: bool = True
    enable_denoising: bool = True
    enable_baseline_correction: bool = True
    enable_outlier_removal: bool = False  # 新增
    enable_resampling: bool = False  # 新增
    
    # AlignmentParameters
    alignment_method: str = 'correlation'  # 'correlation' or 'dtw'
    max_alignment_offset: int = 50
    
    # FilteringParameters
    filter_type: str = 'lowpass'  # 'lowpass', 'bandpass', 'highpass'
    filter_cutoff: float = 0.1
    filter_order: int = 4
    
    # NormalizationMethod
    normalization_method: str = 'zscore'  # 'zscore', 'minmax', 'robust'
    
    # DenoisingParameters
    denoising_method: str = 'moving_average'  # 'wavelet' or 'moving_average'
    moving_avg_window: int = 5
    wavelet_threshold: float = 0.1
    
    # Outlier检测
    outlier_method: str = 'zscore'  # 'zscore' or 'iqr'
    outlier_threshold: float = 3.0
    
    # 重Sampling
    resample_factor: float = 1.0  # >1 上Sampling, <1 下Sampling
    
    # PCA降维（新增）
    enable_pca: bool = False
    pca_components: int = 50  # 保留的主成分数量
    
    # ICA信号分离（新增）
    enable_ica: bool = False
    ica_components: int = 50
    
    # Trace平均/池化（新增）
    enable_trace_pooling: bool = False
    pooling_method: str = 'average'  # 'average', 'max', 'median'
    pooling_size: int = 2  # 池化窗口大小
    
    # 频域Filtering（新增）
    enable_fft_filtering: bool = False
    fft_low_freq: float = 0.0  # 低频截止（Hz）
    fft_high_freq: float = 0.5  # 高频截止（Hz）
    
    # 时间窗口Extract（新增）
    enable_window_extraction: bool = False
    window_start: int = 0
    window_end: Optional[int] = None


class TracePreprocessor:
    """Trace预Process器"""
    
    def __init__(self, config: Optional[PreprocessingConfig] = None):
        """
        Initialize预Process器
        
        Args:
            config: 预ProcessConfiguration
        """
        self.config = config if config is not None else PreprocessingConfig()
        self.logger = logging.getLogger("TracePreprocessor")
        
    def preprocess(self, traces: np.ndarray, 
                  plaintexts: Optional[np.ndarray] = None) -> Tuple[np.ndarray, dict]:
        """
        Complete的Trace预Process流水线
        
        Args:
            traces: 原始TraceData (n_traces, n_samples)
            plaintexts: PlaintextData（用于兴趣点选择）
            
        Returns:
            预Processed的TraceAnd统计Information
        """
        self.logger.info(f"Start预Process {traces.shape[0]} 条Trace")
        
        processed_traces = traces.copy()
        stats = {
            'original_shape': traces.shape,
            'preprocessing_steps': []
        }
        
        # 1. 基线校正
        if self.config.enable_baseline_correction:
            processed_traces = self._baseline_correction(processed_traces)
            stats['preprocessing_steps'].append('baseline_correction')
            self.logger.info("[OK] 基线校正Completed")
        
        # 2. TraceAlignment
        if self.config.enable_alignment:
            processed_traces = self._align_traces(processed_traces)
            stats['preprocessing_steps'].append('alignment')
            self.logger.info("[OK] TraceAlignmentCompleted")
        
        # 3. Filtering
        if self.config.enable_filtering:
            processed_traces = self._apply_filters(processed_traces)
            stats['preprocessing_steps'].append('filtering')
            self.logger.info("[OK] FilteringCompleted")
        
        # 4. Denoising
        if self.config.enable_denoising:
            processed_traces = self._denoise_traces(processed_traces)
            stats['preprocessing_steps'].append('denoising')
            self.logger.info("[OK] DenoisingCompleted")
        
        # 5. Normalization
        if self.config.enable_normalization:
            processed_traces = self._normalize_traces(processed_traces)
            stats['preprocessing_steps'].append('normalization')
            self.logger.info("[OK] NormalizationCompleted")
        
        # 6. OutlierRemove（新增）
        if self.config.enable_outlier_removal:
            processed_traces, outlier_indices = self._remove_outliers(processed_traces)
            stats['preprocessing_steps'].append('outlier_removal')
            stats['outliers_removed'] = len(outlier_indices)
            stats['outlier_indices'] = outlier_indices
            # CalculateHas效索引（保留的Trace索引）
            all_indices = np.arange(traces.shape[0])
            valid_indices = np.delete(all_indices, outlier_indices)
            stats['valid_indices'] = valid_indices
            self.logger.info(f"[OK] OutlierRemoveCompleted，Remove {len(outlier_indices)} 条Trace")
        
        # 7. 重Sampling
        if self.config.enable_resampling and self.config.resample_factor != 1.0:
            processed_traces = self._resample_traces(processed_traces)
            stats['preprocessing_steps'].append('resampling')
            self.logger.info(f"[OK] 重SamplingCompleted，因子: {self.config.resample_factor}")
        
        # 8. 频域Filtering（新增）
        if self.config.enable_fft_filtering:
            processed_traces = self._fft_filtering(processed_traces)
            stats['preprocessing_steps'].append('fft_filtering')
            self.logger.info("[OK] 频域FilteringCompleted")
        
        # 9. Trace池化（新增）
        if self.config.enable_trace_pooling:
            processed_traces = self._trace_pooling(processed_traces)
            stats['preprocessing_steps'].append('trace_pooling')
            self.logger.info(f"[OK] Trace池化Completed，Method: {self.config.pooling_method}")
        
        # 10. 时间窗口Extract（新增）
        if self.config.enable_window_extraction:
            processed_traces = self._window_extraction(processed_traces)
            stats['preprocessing_steps'].append('window_extraction')
            self.logger.info(f"[OK] 时间窗口ExtractCompleted，窗口: [{self.config.window_start}:{self.config.window_end}]")
        
        # 11. PCA降维（新增）
        if self.config.enable_pca:
            processed_traces, pca_info = self._apply_pca(processed_traces)
            stats['preprocessing_steps'].append('pca')
            stats['pca_explained_variance'] = pca_info['explained_variance_ratio']
            self.logger.info(f"[OK] PCA降维Completed，保留{self.config.pca_components}个成分，解释方差: {pca_info['total_variance']:.2%}")
        
        # 12. ICA信号分离（新增）
        if self.config.enable_ica:
            processed_traces = self._apply_ica(processed_traces)
            stats['preprocessing_steps'].append('ica')
            self.logger.info(f"[OK] ICA信号分离Completed，成分数: {self.config.ica_components}")
        
        # Calculate预Process效果统计
        stats.update(self._calculate_preprocessing_stats(traces, processed_traces))
        
        self.logger.info(f"预ProcessCompleted，信噪比提升: {stats['snr_improvement']:.2f}dB")
        
        return processed_traces, stats
    
    def _baseline_correction(self, traces: np.ndarray) -> np.ndarray:
        """基线校正"""
        corrected_traces = []
        
        for trace in traces:
            # UseTraceStartPartial作For基线
            baseline = np.mean(trace[:min(50, len(trace)//10)])
            corrected = trace - baseline
            corrected_traces.append(corrected)
        
        return np.array(corrected_traces)
    
    def _align_traces(self, traces: np.ndarray) -> np.ndarray:
        """TraceAlignment（改进版）"""
        if len(traces) == 0:
            return traces
        
        if self.config.alignment_method == 'correlation':
            return self._correlation_alignment(traces)
        elif self.config.alignment_method == 'dtw':
            return self._dtw_alignment(traces)
        else:
            self.logger.warning(f"未知AlignmentMethod: {self.config.alignment_method}，Usecorrelation")
            return self._correlation_alignment(traces)
    
    def _correlation_alignment(self, traces: np.ndarray) -> np.ndarray:
        """互相关Alignment（改进版）"""
        # Use平均Trace作For参考（更稳定）
        reference = np.mean(traces, axis=0)
        aligned_traces = []
        
        max_offset = self.config.max_alignment_offset
        
        for trace in traces:
            # 只InHas限窗口内Search，提高效率
            search_range = slice(len(reference)//2 - max_offset, 
                               len(reference)//2 + max_offset)
            
            # UseNormalization互相关
            correlation = np.correlate(trace, reference, mode='full')
            # 限制Search范围
            center = len(correlation) // 2
            correlation_window = correlation[center-max_offset:center+max_offset+1]
            
            if len(correlation_window) > 0:
                offset = np.argmax(correlation_window) - max_offset
            else:
                offset = 0
            
            aligned_trace = np.roll(trace, -offset)
            aligned_traces.append(aligned_trace)
        
        return np.array(aligned_traces)
    
    def _dtw_alignment(self, traces: np.ndarray) -> np.ndarray:
        """DTW动态时间规整Alignment"""
        try:
            from dtaidistance import dtw
            reference = np.mean(traces, axis=0)
            aligned_traces = []
            
            for trace in traces:
                # DTWAlignment
                path = dtw.warping_path(reference, trace)
                # 简化：Use线性插值Alignment
                aligned = np.interp(np.arange(len(reference)), 
                                  [p[1] for p in path], trace)
                aligned_traces.append(aligned)
            
            return np.array(aligned_traces)
        except ImportError:
            self.logger.warning("dtaidistance未安装，UsecorrelationAlignment")
            return self._correlation_alignment(traces)
        except Exception as e:
            self.logger.warning(f"DTWAlignmentFailed: {e}，UsecorrelationAlignment")
            return self._correlation_alignment(traces)
    
    def _apply_filters(self, traces: np.ndarray) -> np.ndarray:
        """应用Filtering器"""
        try:
            # 设计低通Filtering器
            b, a = signal.butter(self.config.filter_order, 
                               self.config.filter_cutoff, 'low')
            
            filtered_traces = []
            for trace in traces:
                filtered = signal.filtfilt(b, a, trace)
                filtered_traces.append(filtered)
            
            return np.array(filtered_traces)
        except Exception as e:
            self.logger.warning(f"FilteringFailed，Skip此Step: {e}")
            return traces
    
    def _denoise_traces(self, traces: np.ndarray) -> np.ndarray:
        """TraceDenoising"""
        if self.config.denoising_method == 'moving_average':
            return self._moving_average_denoise(traces)
        elif self.config.denoising_method == 'wavelet':
            return self._wavelet_denoise(traces)
        else:
            return traces
    
    def _moving_average_denoise(self, traces: np.ndarray) -> np.ndarray:
        """移动平均Denoising"""
        window_size = self.config.moving_avg_window
        denoised_traces = []
        
        for trace in traces:
            # 边界填充
            padded = np.pad(trace, window_size//2, mode='edge')
            # 移动平均
            kernel = np.ones(window_size) / window_size
            smoothed = np.convolve(padded, kernel, mode='valid')
            denoised_traces.append(smoothed)
        
        return np.array(denoised_traces)
    
    def _wavelet_denoise(self, traces: np.ndarray) -> np.ndarray:
        """小波Denoising"""
        try:
            import pywt
            denoised_traces = []
            
            for trace in traces:
                # 小波分解
                coeffs = pywt.wavedec(trace, 'db4', level=4)
                # 阈值Process
                threshold = self.config.wavelet_threshold * np.max(coeffs[0])
                coeffs_thresh = [pywt.threshold(c, threshold, 'soft') for c in coeffs]
                # 重构信号
                denoised = pywt.waverec(coeffs_thresh, 'db4')
                # Ensure长度一致
                if len(denoised) != len(trace):
                    denoised = denoised[:len(trace)]
                denoised_traces.append(denoised)
            
            return np.array(denoised_traces)
        except ImportError:
            self.logger.warning("PyWavelets未安装，Use移动平均Denoising")
            return self._moving_average_denoise(traces)
        except Exception as e:
            self.logger.warning(f"小波DenoisingFailed，Use移动平均: {e}")
            return self._moving_average_denoise(traces)
    
    def _normalize_traces(self, traces: np.ndarray) -> np.ndarray:
        """TraceNormalization（改进版）"""
        if self.config.normalization_method == 'zscore':
            # Z-score标准化
            mean = np.mean(traces, axis=1, keepdims=True)
            std = np.std(traces, axis=1, keepdims=True)
            return (traces - mean) / (std + 1e-8)
        
        elif self.config.normalization_method == 'minmax':
            # Min-MaxNormalization
            min_vals = np.min(traces, axis=1, keepdims=True)
            max_vals = np.max(traces, axis=1, keepdims=True)
            return (traces - min_vals) / (max_vals - min_vals + 1e-8)
        
        elif self.config.normalization_method == 'robust':
            # Robust Scaler（对Outlier更鲁棒）
            normalized_traces = []
            for trace in traces:
                median = np.median(trace)
                q1 = np.percentile(trace, 25)
                q3 = np.percentile(trace, 75)
                iqr = q3 - q1
                if iqr > 1e-8:
                    normalized = (trace - median) / iqr
                else:
                    normalized = trace - median
                normalized_traces.append(normalized)
            return np.array(normalized_traces)
        
        else:
            return traces
    
    def _remove_outliers(self, traces: np.ndarray) -> Tuple[np.ndarray, List[int]]:
        """
        Outlier检测AndRemove（新增）
        
        Returns:
            Filter后的TraceAndOutlier索引List
        """
        if self.config.outlier_method == 'zscore':
            return self._zscore_outlier_removal(traces)
        elif self.config.outlier_method == 'iqr':
            return self._iqr_outlier_removal(traces)
        else:
            return traces, []
    
    def _zscore_outlier_removal(self, traces: np.ndarray) -> Tuple[np.ndarray, List[int]]:
        """Based onZ-score的Outlier检测"""
        # Calculate每条Trace的特征（均值、方差etc.）
        trace_means = np.mean(traces, axis=1)
        trace_stds = np.std(traces, axis=1)
        
        # CalculateZ-score
        mean_of_means = np.mean(trace_means)
        std_of_means = np.std(trace_means)
        
        if std_of_means < 1e-10:
            return traces, []
        
        zscores = np.abs((trace_means - mean_of_means) / std_of_means)
        
        # 找ToOutlier
        outlier_mask = zscores > self.config.outlier_threshold
        outlier_indices = np.where(outlier_mask)[0].tolist()
        
        # RemoveOutlier
        filtered_traces = traces[~outlier_mask]
        
        return filtered_traces, outlier_indices
    
    def _iqr_outlier_removal(self, traces: np.ndarray) -> Tuple[np.ndarray, List[int]]:
        """Based onIQR（四分位距）的Outlier检测"""
        trace_means = np.mean(traces, axis=1)
        
        # CalculateIQR
        q1 = np.percentile(trace_means, 25)
        q3 = np.percentile(trace_means, 75)
        iqr = q3 - q1
        
        # 定义Outlier边界
        lower_bound = q1 - self.config.outlier_threshold * iqr
        upper_bound = q3 + self.config.outlier_threshold * iqr
        
        # 找ToOutlier
        outlier_mask = (trace_means < lower_bound) | (trace_means > upper_bound)
        outlier_indices = np.where(outlier_mask)[0].tolist()
        
        # RemoveOutlier
        filtered_traces = traces[~outlier_mask]
        
        return filtered_traces, outlier_indices
    
    def _resample_traces(self, traces: np.ndarray) -> np.ndarray:
        """
        Trace重Sampling
        
        resample_factor > 1: 上Sampling（增加Sampling点）
        resample_factor < 1: 下Sampling（减少Sampling点）
        """
        n_traces, n_samples = traces.shape
        new_n_samples = int(n_samples * self.config.resample_factor)
        
        resampled_traces = []
        for trace in traces:
            # Usescipy的resampleFunction
            resampled = signal.resample(trace, new_n_samples)
            resampled_traces.append(resampled)
        
        return np.array(resampled_traces)
    
    def _fft_filtering(self, traces: np.ndarray) -> np.ndarray:
        """
        频域Filtering（新增）
        
        In频域进行Filtering，去除指定频率范围外的频率成分
        """
        filtered_traces = []
        
        for trace in traces:
            # FFT变换
            fft_trace = np.fft.fft(trace)
            freqs = np.fft.fftfreq(len(trace))
            
            # Create频域Filtering器
            freq_mask = (np.abs(freqs) >= self.config.fft_low_freq) & \
                       (np.abs(freqs) <= self.config.fft_high_freq)
            
            # 应用Filtering器
            fft_filtered = fft_trace * freq_mask
            
            # 逆FFT
            filtered = np.fft.ifft(fft_filtered).real
            filtered_traces.append(filtered)
        
        return np.array(filtered_traces)
    
    def _trace_pooling(self, traces: np.ndarray) -> np.ndarray:
        """
        Trace池化（新增）
        
        Through池化减少Trace长度，同时保留主要特征
        """
        n_traces, n_samples = traces.shape
        pool_size = self.config.pooling_size
        
        # Calculate池化后的长度
        pooled_length = n_samples // pool_size
        
        pooled_traces = []
        for trace in traces:
            pooled = []
            for i in range(0, n_samples - pool_size + 1, pool_size):
                window = trace[i:i + pool_size]
                
                if self.config.pooling_method == 'average':
                    pooled.append(np.mean(window))
                elif self.config.pooling_method == 'max':
                    pooled.append(np.max(window))
                elif self.config.pooling_method == 'median':
                    pooled.append(np.median(window))
                else:
                    pooled.append(np.mean(window))
            
            pooled_traces.append(pooled)
        
        return np.array(pooled_traces)
    
    def _window_extraction(self, traces: np.ndarray) -> np.ndarray:
        """
        时间窗口Extract（新增）
        
        ExtractTrace的特定时间窗口
        """
        window_end = self.config.window_end
        if window_end is None:
            window_end = traces.shape[1]
        
        window_start = max(0, self.config.window_start)
        window_end = min(window_end, traces.shape[1])
        
        return traces[:, window_start:window_end]
    
    def _apply_pca(self, traces: np.ndarray) -> Tuple[np.ndarray, dict]:
        """
        主成分Analyze（PCA）降维（新增）
        
        Args:
            traces: InputTrace
        
        Returns:
            降维后的TraceAndPCAInformation
        """
        try:
            from sklearn.decomposition import PCA
            
            n_components = min(self.config.pca_components, 
                             traces.shape[0], traces.shape[1])
            
            pca = PCA(n_components=n_components)
            traces_pca = pca.fit_transform(traces)
            
            pca_info = {
                'explained_variance_ratio': pca.explained_variance_ratio_,
                'total_variance': np.sum(pca.explained_variance_ratio_),
                'n_components': n_components
            }
            
            return traces_pca, pca_info
            
        except ImportError:
            self.logger.warning("scikit-learn未安装，SkipPCA")
            return traces, {'explained_variance_ratio': [], 'total_variance': 0, 'n_components': 0}
        except Exception as e:
            self.logger.warning(f"PCAFailed: {e}")
            return traces, {'explained_variance_ratio': [], 'total_variance': 0, 'n_components': 0}
    
    def _apply_ica(self, traces: np.ndarray) -> np.ndarray:
        """
        独立成分Analyze（ICA）信号分离（新增）
        
        ICA用于分离独立的信号源，可以Has效分离Has用信号And噪声
        """
        try:
            from sklearn.decomposition import FastICA
            
            n_components = min(self.config.ica_components, 
                             traces.shape[0], traces.shape[1])
            
            ica = FastICA(n_components=n_components, random_state=0, max_iter=500)
            traces_ica = ica.fit_transform(traces)
            
            return traces_ica
            
        except ImportError:
            self.logger.warning("scikit-learn未安装，SkipICA")
            return traces
        except Exception as e:
            self.logger.warning(f"ICAFailed: {e}")
            return traces
    
    def select_points_of_interest(self, traces: np.ndarray, 
                                plaintexts: np.ndarray, 
                                num_poi: int = 100,
                                method: str = 'combined') -> np.ndarray:
        """
        选择兴趣点（改进版）
        
        Args:
            traces: TraceData
            plaintexts: PlaintextData
            num_poi: 兴趣点数量
            method: 选择Method ('variance', 'snr', 'sost', 'combined')
            
        Returns:
            兴趣点索引
        """
        self.logger.info(f"选择 {num_poi} 个兴趣点，Method: {method}")
        
        if method == 'variance':
            scores = np.var(traces, axis=0)
        elif method == 'snr':
            scores = self._calculate_poi_snr(traces)
        elif method == 'sost':
            scores = self._calculate_sost(traces, plaintexts)
        elif method == 'combined':
            # 结合多种Method
            variances = np.var(traces, axis=0)
            snr_scores = self._calculate_poi_snr(traces)
            sost_scores = self._calculate_sost(traces, plaintexts)
            
            # Normalization并组合
            variances_norm = (variances - np.min(variances)) / (np.max(variances) - np.min(variances) + 1e-10)
            snr_norm = (snr_scores - np.min(snr_scores)) / (np.max(snr_scores) - np.min(snr_scores) + 1e-10)
            sost_norm = (sost_scores - np.min(sost_scores)) / (np.max(sost_scores) - np.min(sost_scores) + 1e-10)
            
            scores = variances_norm * 0.3 + snr_norm * 0.3 + sost_norm * 0.4
        else:
            self.logger.warning(f"未知Method: {method}，Usevariance")
            scores = np.var(traces, axis=0)
        
        # 选择最高分数的点
        poi_indices = np.argsort(scores)[-num_poi:]
        
        self.logger.info(f"兴趣点选择Completed，平均得分: {np.mean(scores[poi_indices]):.4f}")
        
        return np.sort(poi_indices)
    
    def _calculate_poi_snr(self, traces: np.ndarray) -> np.ndarray:
        """CalculateEachSampling点的SNR"""
        mean_trace = np.mean(traces, axis=0)
        signal_power = mean_trace ** 2
        noise_power = np.var(traces, axis=0)
        
        snr = signal_power / (noise_power + 1e-10)
        return snr
    
    def _calculate_sost(self, traces: np.ndarray, plaintexts: np.ndarray) -> np.ndarray:
        """
        CalculateSOST (Sum of Squared T-statistics)
        改进：更健壮地ProcessNot同Class型的Plaintext
        """
        n_samples = traces.shape[1]
        t_statistics = np.zeros(n_samples)
        
        # ExtractFirst个字节的汉明权重
        hw_values = []
        for pt in plaintexts:
            if isinstance(pt, (bytes, bytearray)):
                # ProcessbytesClass型
                hw = bin(pt[0]).count('1')
            elif isinstance(pt, np.ndarray):
                # ProcessnumpyArray
                if pt.dtype == np.uint8:
                    hw = bin(int(pt[0])).count('1')
                else:
                    hw = bin(int(pt) & 0xFF).count('1')
            else:
                # ProcessInteger
                hw = bin(int(pt) & 0xFF).count('1')
            hw_values.append(hw)
        
        hw_values = np.array(hw_values)
        
        # 对EachSampling点Calculatet统计量
        for i in range(n_samples):
            # 按汉明权重分组
            groups_by_hw = {}
            for hw in range(9):  # 0-8的汉明权重
                mask = hw_values == hw
                if np.any(mask):
                    groups_by_hw[hw] = traces[mask, i]
            
            # CalculateAll组之间的t统计量
            if len(groups_by_hw) >= 2:
                groups = list(groups_by_hw.values())
                t_sum = 0.0
                count = 0
                
                # CalculateAll组对之间的t统计量
                for idx1 in range(len(groups)):
                    for idx2 in range(idx1+1, len(groups)):
                        if len(groups[idx1]) > 0 and len(groups[idx2]) > 0:
                            mean1 = np.mean(groups[idx1])
                            mean2 = np.mean(groups[idx2])
                            var1 = np.var(groups[idx1])
                            var2 = np.var(groups[idx2])
                            n1 = len(groups[idx1])
                            n2 = len(groups[idx2])
                            
                            # 两样本t检验统计量
                            pooled_var = var1/n1 + var2/n2
                            if pooled_var > 1e-10:
                                t_stat = (mean1 - mean2) / np.sqrt(pooled_var)
                                t_sum += t_stat ** 2
                                count += 1
                
                if count > 0:
                    t_statistics[i] = t_sum / count
        
        return t_statistics
    
    def _calculate_preprocessing_stats(self, original: np.ndarray, 
                                     processed: np.ndarray) -> dict:
        """Calculate预Process效果统计"""
        stats = {}
        
        # 信噪比改善
        original_snr = self._calculate_snr(original)
        processed_snr = self._calculate_snr(processed)
        stats['snr_improvement'] = processed_snr - original_snr
        stats['original_snr'] = original_snr
        stats['processed_snr'] = processed_snr
        
        # 方差变化
        stats['variance_reduction'] = (np.var(original) - np.var(processed)) / np.var(original)
        
        # 均值变化
        stats['mean_shift'] = np.abs(np.mean(processed) - np.mean(original))
        
        return stats
    
    def _calculate_snr(self, traces: np.ndarray) -> float:
        """
        Calculate信噪比（修复版）
        
        SNR = 10 * log10(Var(信号) / Var(噪声))
        信号：AllTrace的平均（跨Trace）
        噪声：每条Trace与平均信号的偏差
        """
        # Calculate平均Trace（信号）
        mean_trace = np.mean(traces, axis=0)
        
        # 信号功率 = 信号的方差
        signal_power = np.var(mean_trace)
        
        # 噪声功率 = 每条Trace与平均的方差的平均
        noise_power = np.mean(np.var(traces - mean_trace, axis=1))
        
        if noise_power > 1e-10 and signal_power > 0:
            snr_linear = signal_power / noise_power
            snr_db = 10 * np.log10(snr_linear)
        else:
            snr_db = 100  # 很高的SNR
        
        return snr_db
    
    def save_processed_traces(self, traces: np.ndarray, output_file: str,
                             plaintexts: Optional[np.ndarray] = None,
                             keys: Optional[np.ndarray] = None,
                             stats: Optional[dict] = None):
        """
        Save预Processed的TraceData（新增）
        
        Args:
            traces: TraceData
            output_file: Output filePath（.npzFormat）
            plaintexts: PlaintextData
            keys: KeyData
            stats: 预Process统计Information
        """
        save_data = {'traces': traces}
        
        if plaintexts is not None:
            save_data['plaintexts'] = plaintexts
        
        if keys is not None:
            save_data['keys'] = keys
        
        if stats is not None:
            # 将DictionaryConvertFor可Save的Format
            for key, value in stats.items():
                if isinstance(value, (list, tuple)):
                    save_data[f'stat_{key}'] = np.array(value)
                elif isinstance(value, (int, float, str)):
                    save_data[f'stat_{key}'] = value
        
        np.savez_compressed(output_file, **save_data)
        self.logger.info(f"[OK] Data已SaveTo: {output_file}")
    
    def load_processed_traces(self, input_file: str) -> Tuple[np.ndarray, dict]:
        """
        Load预Processed的TraceData（新增）
        
        Args:
            input_file: Input filePath（.npzFormat）
        
        Returns:
            TraceDataAnd元DataDictionary
        """
        data = np.load(input_file)
        traces = data['traces']
        
        metadata = {}
        for key in data.files:
            if key != 'traces':
                metadata[key] = data[key]
        
        self.logger.info(f"[OK] From {input_file} Load了 {len(traces)} 条Trace")
        return traces, metadata
    
    def compute_differential_traces(self, traces: np.ndarray, 
                                   plaintexts: np.ndarray,
                                   target_bit: int = 0) -> Tuple[np.ndarray, np.ndarray]:
        """
        差分TraceAnalyze（新增）
        
        Calculate差分PowerTrace，用于DPAAttack
        
        Args:
            traces: PowerTrace
            plaintexts: PlaintextData
            target_bit: 目标比特位
        
        Returns:
            (差分Trace, 比特值)
        """
        # Extract目标比特
        bit_values = []
        for pt in plaintexts:
            if isinstance(pt, (bytes, bytearray)):
                bit = (pt[0] >> target_bit) & 1
            elif isinstance(pt, np.ndarray):
                bit = (int(pt[0]) >> target_bit) & 1
            else:
                bit = (int(pt) >> target_bit) & 1
            bit_values.append(bit)
        
        bit_values = np.array(bit_values)
        
        # 按比特值分组
        traces_0 = traces[bit_values == 0]
        traces_1 = traces[bit_values == 1]
        
        # Calculate差分
        if len(traces_0) > 0 and len(traces_1) > 0:
            mean_0 = np.mean(traces_0, axis=0)
            mean_1 = np.mean(traces_1, axis=0)
            diff_trace = mean_1 - mean_0
        else:
            diff_trace = np.zeros(traces.shape[1])
        
        return diff_trace, bit_values
    
    def compute_power_spectrum(self, traces: np.ndarray, 
                              sampling_rate: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
        """
        Calculate功率谱密度（新增）
        
        Args:
            traces: PowerTrace
            sampling_rate: Sampling率（Hz）
        
        Returns:
            (频率, 功率谱密度)
        """
        # Calculate平均Trace
        mean_trace = np.mean(traces, axis=0)
        
        # Calculate功率谱
        freqs, psd = signal.welch(mean_trace, fs=sampling_rate, 
                                 nperseg=min(256, len(mean_trace)))
        
        return freqs, psd
    
    def compute_trace_snr_per_sample(self, traces: np.ndarray) -> np.ndarray:
        """
        CalculateEachSampling点的SNR（新增）
        
        对Each时间点CalculateSNR，用于识别高质量的Sampling点
        
        Returns:
            EachSampling点的SNR值
        """
        mean_trace = np.mean(traces, axis=0)
        signal_power = mean_trace ** 2
        noise_power = np.var(traces, axis=0)
        
        snr = signal_power / (noise_power + 1e-10)
        snr_db = 10 * np.log10(snr + 1e-10)
        
        return snr_db
    
    def visualize_preprocessing(self, original: np.ndarray, processed: np.ndarray,
                               num_traces: int = 3, save_path: Optional[str] = None):
        """
        可视化预Process效果（新增）
        
        Args:
            original: 原始Trace
            processed: 预Processed的Trace
            num_traces: Display的Trace数量
            save_path: Save图片的Path（Optional）
        """
        try:
            import matplotlib.pyplot as plt
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 10))
            
            # 1. 原始TraceExample
            for i in range(min(num_traces, len(original))):
                axes[0, 0].plot(original[i], alpha=0.6, label=f'Trace {i}')
            axes[0, 0].set_title('Original Traces')
            axes[0, 0].set_xlabel('Sample')
            axes[0, 0].set_ylabel('Amplitude')
            axes[0, 0].legend()
            axes[0, 0].grid(True, alpha=0.3)
            
            # 2. 预Processed的Trace
            for i in range(min(num_traces, len(processed))):
                axes[0, 1].plot(processed[i], alpha=0.6, label=f'Trace {i}')
            axes[0, 1].set_title('Processed Traces')
            axes[0, 1].set_xlabel('Sample')
            axes[0, 1].set_ylabel('Amplitude')
            axes[0, 1].legend()
            axes[0, 1].grid(True, alpha=0.3)
            
            # 3. 平均Trace对比
            axes[1, 0].plot(np.mean(original, axis=0), label='Original Mean', alpha=0.8)
            axes[1, 0].plot(np.mean(processed, axis=0), label='Processed Mean', alpha=0.8)
            axes[1, 0].set_title('Mean Trace Comparison')
            axes[1, 0].set_xlabel('Sample')
            axes[1, 0].set_ylabel('Amplitude')
            axes[1, 0].legend()
            axes[1, 0].grid(True, alpha=0.3)
            
            # 4. 方差对比
            axes[1, 1].plot(np.var(original, axis=0), label='Original Variance', alpha=0.8)
            axes[1, 1].plot(np.var(processed, axis=0), label='Processed Variance', alpha=0.8)
            axes[1, 1].set_title('Variance Comparison')
            axes[1, 1].set_xlabel('Sample')
            axes[1, 1].set_ylabel('Variance')
            axes[1, 1].legend()
            axes[1, 1].grid(True, alpha=0.3)
            
            plt.tight_layout()
            
            if save_path:
                plt.savefig(save_path, dpi=300, bbox_inches='tight')
                self.logger.info(f"[OK] 可视化图片已SaveTo: {save_path}")
            else:
                plt.show()
            
            plt.close()
            
        except ImportError:
            self.logger.warning("Matplotlib未安装，No法可视化")
        except Exception as e:
            self.logger.warning(f"可视化Failed: {e}")


# 便捷Function
def quick_preprocess(traces: np.ndarray, 
                    plaintexts: Optional[np.ndarray] = None,
                    config: Optional[PreprocessingConfig] = None) -> np.ndarray:
    """
    快速预ProcessFunction
    
    Args:
        traces: TraceData
        plaintexts: PlaintextData
        config: 预ProcessConfiguration
        
    Returns:
        预Processed的Trace
    """
    preprocessor = TracePreprocessor(config)
    processed_traces, _ = preprocessor.preprocess(traces, plaintexts)
    return processed_traces


if __name__ == "__main__":
    # Test预Process器
    print("Testing Trace Preprocessor...")
    print("=" * 70)
    
    # Generate模拟Data
    n_traces, n_samples = 100, 1000
    traces = np.random.randn(n_traces, n_samples) * 0.1  # 噪声
    traces += np.sin(np.linspace(0, 10*np.pi, n_samples))  # 信号
    
    plaintexts = np.random.randint(0, 256, n_traces, dtype=np.uint8)
    
    # 预Process
    config = PreprocessingConfig()
    # config = PreprocessingConfig(
    #     enable_pca=True,
    #     pca_components=50  # 保留50个主成分
    # )

    preprocessor = TracePreprocessor(config)
    
    processed_traces, stats = preprocessor.preprocess(traces, plaintexts)
    
    # Display统计Information
    print(f"\nPreprocessing Statistics:")
    print(f"  Original shape: {stats['original_shape']}")
    print(f"  Preprocessing steps: {stats['preprocessing_steps']}")
    print(f"  SNR improvement: {stats['snr_improvement']:.2f} dB")
    print(f"  Variance reduction: {stats['variance_reduction']:.2%}")
    
    # 兴趣点选择
    poi_indices = preprocessor.select_points_of_interest(processed_traces, plaintexts, 50)
    print(f"  Points of Interest selected: {len(poi_indices)}")
    
    # 可视化对比（新增）
    print(f"\nGenerating visualization...")
    try:
        import matplotlib.pyplot as plt
        
        fig, axes = plt.subplots(1, 2, figsize=(14, 5))
        
        # 左图：原始Trace
        for i in range(min(5, len(traces))):
            axes[0].plot(traces[i], alpha=0.5, linewidth=0.8)
        axes[0].plot(np.mean(traces, axis=0), 'r-', linewidth=2, label='Mean Trace')
        axes[0].set_title('Original Traces', fontsize=14, fontweight='bold')
        axes[0].set_xlabel('Sample Index', fontsize=11)
        axes[0].set_ylabel('Amplitude', fontsize=11)
        axes[0].legend()
        axes[0].grid(True, alpha=0.3)
        
        # 右图：预Processed的Trace
        for i in range(min(5, len(processed_traces))):
            axes[1].plot(processed_traces[i], alpha=0.5, linewidth=0.8)
        axes[1].plot(np.mean(processed_traces, axis=0), 'r-', linewidth=2, label='Mean Trace')
        axes[1].set_title('Preprocessed Traces', fontsize=14, fontweight='bold')
        axes[1].set_xlabel('Sample Index', fontsize=11)
        axes[1].set_ylabel('Amplitude', fontsize=11)
        axes[1].legend()
        axes[1].grid(True, alpha=0.3)
        
        # Add总标题
        fig.suptitle(f'Trace Preprocessing Comparison (SNR: {stats["original_snr"]:.1f} dB → {stats["processed_snr"]:.1f} dB)', 
                     fontsize=15, fontweight='bold', y=1.02)
        
        plt.tight_layout()
        
        # Save图片
        output_file = 'preprocessing_test_comparison.png'
        plt.savefig(output_file, dpi=150, bbox_inches='tight')
        print(f"[OK] Visualization saved to: {output_file}")
        
        # Display图片
        plt.show()
        
    except ImportError:
        print("  Warning: Matplotlib not installed, skipping visualization")
    except Exception as e:
        print(f"  Warning: Visualization failed: {e}")
    
    print("\n" + "=" * 70)
    print("Test completed successfully!")
