import pandas as pd  # 数据处理和分析
import numpy as np  # 数值计算
from typing import Dict, List, Tuple, Optional  # 类型提示
from sklearn.preprocessing import StandardScaler  # 特征标准化
from scipy import stats  # 统计函数
from scipy import signal  # 信号处理


def extract_statistical_features(segment: pd.DataFrame, acc_sensitivity_factor: float = 2.5) -> Dict[str, float]:
    """
    Extract statistical features from a time series segment.
    
    Args:
        segment: DataFrame containing time series data
        acc_sensitivity_factor: Factor to increase the sensitivity of accelerometer data (default: 2.5)
        
    Returns:
        Dictionary of features
    """
    features = {}  # 存储提取的特征字典
    
    # Get the sensor columns
    sensor_cols = ['acc_x', 'acc_y', 'acc_z', 'angle_x', 'angle_y', 'angle_z']  # 所有传感器列
    acc_cols = ['acc_x', 'acc_y', 'acc_z']  # 加速度计列
    angle_cols = ['angle_x', 'angle_y', 'angle_z']  # 角度传感器列
    
    # Ensure all required columns are present
    for col in sensor_cols:  # 检查所有必需的传感器列
        if col not in segment.columns:  # 如果列不存在
            raise ValueError(f"Column {col} not found in segment")  # 抛出异常
    
    # Create a copy of the segment with amplified accelerometer data for feature extraction
    segment_enhanced = segment.copy()
    
    # Amplify accelerometer data to increase sensitivity
    for col in acc_cols:
        segment_enhanced[col] = segment[col] * acc_sensitivity_factor
    
    # Extract features for each sensor
    for col in sensor_cols:
        # Determine which dataframe to use based on column type
        df_to_use = segment_enhanced if col in acc_cols else segment
        
        # Basic statistics  # 基本统计特征
        features[f'{col}_mean'] = df_to_use[col].mean()  # 平均值
        features[f'{col}_std'] = df_to_use[col].std()  # 标准差
        features[f'{col}_min'] = df_to_use[col].min()  # 最小值
        features[f'{col}_max'] = df_to_use[col].max()  # 最大值
        features[f'{col}_range'] = df_to_use[col].max() - df_to_use[col].min()  # 极差
        features[f'{col}_median'] = df_to_use[col].median()  # 中位数
        
        # Percentiles  # 百分位数特征
        features[f'{col}_25th'] = df_to_use[col].quantile(0.25)  # 25百分位
        features[f'{col}_75th'] = df_to_use[col].quantile(0.75)  # 75百分位
        features[f'{col}_iqr'] = features[f'{col}_75th'] - features[f'{col}_25th']  # 四分位距
        
        # Higher order statistics  # 高阶统计特征
        features[f'{col}_skew'] = df_to_use[col].skew()  # 偏度
        features[f'{col}_kurtosis'] = df_to_use[col].kurtosis()  # 峰度
        
        # Energy and power  # 能量和功率特征
        features[f'{col}_energy'] = np.sum(df_to_use[col] ** 2)  # 能量(平方和)
        features[f'{col}_power'] = features[f'{col}_energy'] / len(df_to_use)  # 平均功率
        
        # Zero crossings  # 过零检测
        zero_crossings = np.where(np.diff(np.signbit(df_to_use[col])))[0]  # 计算过零点
        features[f'{col}_zero_crossings'] = len(zero_crossings)  # 过零次数
        
        # Mean absolute deviation
        features[f'{col}_mad'] = np.mean(np.abs(df_to_use[col] - df_to_use[col].mean()))
        
        # Root mean square
        features[f'{col}_rms'] = np.sqrt(np.mean(df_to_use[col] ** 2))
        
        # Peak-to-peak amplitude
        features[f'{col}_p2p'] = features[f'{col}_max'] - features[f'{col}_min']
        
        # Signal magnitude area
        features[f'{col}_sma'] = np.sum(np.abs(df_to_use[col]))
        
        # Entropy
        hist, _ = np.histogram(df_to_use[col], bins=10)
        prob = hist / np.sum(hist)
        prob = prob[prob > 0]  # Remove zeros
        features[f'{col}_entropy'] = -np.sum(prob * np.log2(prob))
        
        # Additional features for accelerometer data
        if col in acc_cols:
            # Jerk (derivative of acceleration)
            jerk = np.diff(df_to_use[col])
            if len(jerk) > 0:
                features[f'{col}_jerk_mean'] = np.mean(np.abs(jerk))
                features[f'{col}_jerk_std'] = np.std(jerk)
                features[f'{col}_jerk_max'] = np.max(np.abs(jerk))
            else:
                features[f'{col}_jerk_mean'] = 0
                features[f'{col}_jerk_std'] = 0
                features[f'{col}_jerk_max'] = 0
            
            # Detect peaks in accelerometer data
            peaks, _ = signal.find_peaks(df_to_use[col], height=0)
            features[f'{col}_peak_count'] = len(peaks)
            features[f'{col}_peak_mean'] = np.mean(df_to_use[col].iloc[peaks]) if len(peaks) > 0 else 0
            
            # Autocorrelation at lag 1 (helps detect patterns)
            if len(df_to_use[col]) > 1:
                features[f'{col}_autocorr_lag1'] = df_to_use[col].autocorr(lag=1)
            else:
                features[f'{col}_autocorr_lag1'] = 0
            
            # Wavelet features for accelerometer data
            if len(df_to_use[col]) >= 4:  # Minimum length for wavelet
                try:
                    coeffs = signal.cwt(df_to_use[col], signal.ricker, np.arange(1, 5))
                    features[f'{col}_wavelet_mean'] = np.mean(np.abs(coeffs))
                    features[f'{col}_wavelet_std'] = np.std(coeffs)
                except:
                    features[f'{col}_wavelet_mean'] = 0
                    features[f'{col}_wavelet_std'] = 0
    
    # Cross-axis features for acceleration with enhanced sensitivity
    features['acc_magnitude_mean'] = np.mean(np.sqrt(np.sum(segment_enhanced[acc_cols] ** 2, axis=1)))
    features['acc_magnitude_std'] = np.std(np.sqrt(np.sum(segment_enhanced[acc_cols] ** 2, axis=1)))
    features['acc_magnitude_max'] = np.max(np.sqrt(np.sum(segment_enhanced[acc_cols] ** 2, axis=1)))
    
    # Additional cross-axis features for acceleration
    acc_magnitude = np.sqrt(np.sum(segment_enhanced[acc_cols] ** 2, axis=1))
    features['acc_magnitude_median'] = np.median(acc_magnitude)
    features['acc_magnitude_iqr'] = np.percentile(acc_magnitude, 75) - np.percentile(acc_magnitude, 25)
    features['acc_magnitude_entropy'] = stats.entropy(np.histogram(acc_magnitude, bins=10)[0])
    
    # Jerk magnitude (rate of change of acceleration magnitude)
    jerk_magnitude = np.diff(acc_magnitude)
    if len(jerk_magnitude) > 0:
        features['acc_jerk_magnitude_mean'] = np.mean(np.abs(jerk_magnitude))
        features['acc_jerk_magnitude_std'] = np.std(jerk_magnitude)
        features['acc_jerk_magnitude_max'] = np.max(np.abs(jerk_magnitude))
    else:
        features['acc_jerk_magnitude_mean'] = 0
        features['acc_jerk_magnitude_std'] = 0
        features['acc_jerk_magnitude_max'] = 0
    
    # Cross-axis features for angle
    features['angle_magnitude_mean'] = np.mean(np.sqrt(np.sum(segment[angle_cols] ** 2, axis=1)))
    features['angle_magnitude_std'] = np.std(np.sqrt(np.sum(segment[angle_cols] ** 2, axis=1)))
    features['angle_magnitude_max'] = np.max(np.sqrt(np.sum(segment[angle_cols] ** 2, axis=1)))
    
    # Correlation between axes
    for i, col1 in enumerate(sensor_cols):
        for j, col2 in enumerate(sensor_cols):
            if i < j:  # Only compute for unique pairs
                # Use enhanced data for accelerometer correlations
                df1 = segment_enhanced if col1 in acc_cols else segment
                df2 = segment_enhanced if col2 in acc_cols else segment
                features[f'corr_{col1}_{col2}'] = df1[col1].corr(df2[col2])
    
    # FFT features with more detailed analysis for accelerometer data  # 频域特征分析
    for col in sensor_cols:  # 对每个传感器列
        df_to_use = segment_enhanced if col in acc_cols else segment  # 选择使用增强数据还是原始数据
        
        # Apply Hanning window to reduce spectral leakage  # 应用汉宁窗减少频谱泄漏
        windowed_data = df_to_use[col] * signal.windows.hann(len(df_to_use[col]))
        
        # Compute FFT  # 计算快速傅里叶变换
        fft_values = np.abs(np.fft.fft(windowed_data))  # 获取FFT幅值
        fft_freq = np.fft.fftfreq(len(windowed_data))  # 计算频率
        
        # Only consider positive frequencies  # 只考虑正频率
        positive_freq_idx = np.where(fft_freq > 0)[0]  # 获取正频率索引
        fft_values = fft_values[positive_freq_idx]  # 提取正频率幅值
        fft_freq = fft_freq[positive_freq_idx]  # 提取正频率
        
        if len(fft_values) > 0:
            # Top frequencies
            sorted_idx = np.argsort(fft_values)[::-1]  # Sort in descending order
            for k in range(min(5 if col in acc_cols else 3, len(sorted_idx))):  # More frequencies for acc
                features[f'{col}_fft_freq_{k+1}'] = fft_freq[sorted_idx[k]]
                features[f'{col}_fft_amp_{k+1}'] = fft_values[sorted_idx[k]]
            
            # Spectral features
            features[f'{col}_spectral_centroid'] = np.sum(fft_freq * fft_values) / np.sum(fft_values) if np.sum(fft_values) > 0 else 0
            features[f'{col}_spectral_spread'] = np.sqrt(np.sum(((fft_freq - features[f'{col}_spectral_centroid']) ** 2) * fft_values) / np.sum(fft_values)) if np.sum(fft_values) > 0 else 0
            
            # Frequency bands (only for accelerometer)
            if col in acc_cols:
                # Define frequency bands (adjust based on your sampling rate)
                low_band = (0.0, 0.1)
                mid_band = (0.1, 0.3)
                high_band = (0.3, 0.5)
                
                # Calculate energy in each band
                low_mask = (fft_freq >= low_band[0]) & (fft_freq < low_band[1])
                mid_mask = (fft_freq >= mid_band[0]) & (fft_freq < mid_band[1])
                high_mask = (fft_freq >= high_band[0]) & (fft_freq < high_band[1])
                
                features[f'{col}_low_band_energy'] = np.sum(fft_values[low_mask] ** 2) if np.any(low_mask) else 0
                features[f'{col}_mid_band_energy'] = np.sum(fft_values[mid_mask] ** 2) if np.any(mid_mask) else 0
                features[f'{col}_high_band_energy'] = np.sum(fft_values[high_mask] ** 2) if np.any(high_mask) else 0
                
                # Band energy ratio
                total_energy = np.sum(fft_values ** 2)
                if total_energy > 0:
                    features[f'{col}_low_band_ratio'] = features[f'{col}_low_band_energy'] / total_energy
                    features[f'{col}_mid_band_ratio'] = features[f'{col}_mid_band_energy'] / total_energy
                    features[f'{col}_high_band_ratio'] = features[f'{col}_high_band_energy'] / total_energy
                else:
                    features[f'{col}_low_band_ratio'] = 0
                    features[f'{col}_mid_band_ratio'] = 0
                    features[f'{col}_high_band_ratio'] = 0
    
    return features


def normalize_features(features: pd.DataFrame) -> Tuple[pd.DataFrame, StandardScaler]:
    """
    使用StandardScaler标准化特征。
    
    参数:
        features: 包含特征的DataFrame
        
    返回:
        标准化后的特征DataFrame和scaler对象
    """
    scaler = StandardScaler()  # 创建标准化器
    normalized_features = pd.DataFrame(
        scaler.fit_transform(features),  # 拟合并转换数据
        columns=features.columns  # 保持列名不变
    )
    
    return normalized_features, scaler  # 返回标准化后的数据和scaler对象


def extract_features_from_segments(segments: List[pd.DataFrame], acc_sensitivity_factor: float = 2.5) -> pd.DataFrame:
    """
    Extract features from a list of time series segments with optimized feature selection.
    
    Args:
        segments: List of DataFrames containing time series data
        acc_sensitivity_factor: Factor to increase the sensitivity of accelerometer data (default: 2.5)
        
    Returns:
        DataFrame containing selected features for all segments, prioritized by importance
    """
    # Extract all features for each segment
    feature_dicts = []
    for segment in segments:
        feature_dict = extract_statistical_features(segment, acc_sensitivity_factor)
        feature_dicts.append(feature_dict)
    
    # Convert to DataFrame
    features_df = pd.DataFrame(feature_dicts)
    
    # Fill NaN values with 0
    features_df = features_df.fillna(0)
    
    # Select top features based on importance analysis
    top_features = [
        'acc_x_mean', 'acc_y_min', 'angle_x_median',
        'angle_y_mean', 'angle_y_max', 'acc_z_mean',
        'angle_y_min', 'angle_y_median', 'acc_y_median',
        'angle_z_mean', 'acc_magnitude_mean', 'angle_magnitude_mean'
    ]
    
    # Keep only the selected features
    # selected_features = [f for f in top_features if f in features_df.columns]
    # features_df = features_df[selected_features]
    
    return features_df


if __name__ == "__main__":
    # Test the feature extraction functions
    from data_loader import prepare_dataset
    
    # Load and prepare data
    data_dir = "../data"
    segments, labels, classes = prepare_dataset(data_dir)
    
    # Extract features with enhanced accelerometer sensitivity
    features = extract_features_from_segments(segments, acc_sensitivity_factor=2.5)
    
    # Normalize features
    normalized_features, scaler = normalize_features(features)
    
    print(f"Extracted {features.shape[1]} features for {features.shape[0]} segments")
    print(f"Feature names: {list(features.columns)[:10]} ... (and {features.shape[1] - 10} more)")
    print(f"Normalized features shape: {normalized_features.shape}")
