"""
高速列车轴承智能故障诊断 - 数据预处理和特征工程模块

本模块包含：
1. 数据加载和预处理
2. 多维特征提取
3. 特征选择和可视化

作者：数学建模团队
版本：1.0
"""

import numpy as np
import pandas as pd
import scipy.io as sio
import scipy.signal as signal
import pywt
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class DataPreprocessor:
    """数据预处理类"""
    
    def __init__(self, data_path, target_sampling_rate=12000):
        """
        初始化数据预处理器
        
        Args:
            data_path (str): 数据文件路径
            target_sampling_rate (int): 目标采样率
        """
        self.data_path = data_path
        self.target_fs = target_sampling_rate
        self.scaler = StandardScaler()
        
    def load_mat_files(self, file_list):
        """
        批量加载MAT文件数据
        
        Args:
            file_list (list): 文件列表
            
        Returns:
            tuple: (all_data, file_info) 数据列表和文件信息DataFrame
        """
        all_data = []
        file_info = []
        
        for file_name in tqdm(file_list, desc="Loading MAT files"):
            try:
                file_path = os.path.join(self.data_path, file_name)
                data = sio.loadmat(file_path)
                
                # 提取信号数据
                de_signal = self._extract_signal(data, 'DE')
                fe_signal = self._extract_signal(data, 'FE')
                ba_signal = self._extract_signal(data, 'BA')
                rpm = self._extract_rpm(data)
                
                # 提取文件信息
                file_type = self._parse_file_type(file_name)
                fault_type = self._parse_fault_type(file_name)
                fault_size = self._parse_fault_size(file_name)
                load_condition = self._parse_load_condition(file_name)
                
                all_data.append({
                    'file_name': file_name,
                    'de_signal': de_signal,
                    'fe_signal': fe_signal,
                    'ba_signal': ba_signal,
                    'rpm': rpm,
                    'file_type': file_type,
                    'fault_type': fault_type,
                    'fault_size': fault_size,
                    'load_condition': load_condition,
                    'sampling_rate': self._get_sampling_rate(file_name)
                })
                
                file_info.append({
                    'file_name': file_name,
                    'fault_type': fault_type,
                    'fault_size': fault_size,
                    'load_condition': load_condition,
                    'rpm': rpm,
                    'signal_length': len(de_signal) if de_signal is not None else 0
                })
                
            except Exception as e:
                print(f"Error loading {file_name}: {str(e)}")
                continue
        
        return all_data, pd.DataFrame(file_info)
    
    def _extract_signal(self, data, signal_type):
        """提取特定类型的信号"""
        for key in data.keys():
            if signal_type in key and 'time' in key:
                return data[key].flatten()
        return None
    
    def _extract_rpm(self, data):
        """提取RPM信息"""
        for key in data.keys():
            if 'RPM' in key:
                return data[key][0][0]
        return None
    
    def _parse_file_type(self, file_name):
        """解析文件类型"""
        if 'N' in file_name.split('_')[0]:
            return 'Normal'
        else:
            return 'Fault'
    
    def _parse_fault_type(self, file_name):
        """解析故障类型"""
        prefix = file_name.split('_')[0]
        if 'OR' in prefix:
            return 'Outer Race'
        elif 'IR' in prefix:
            return 'Inner Race'
        elif 'B' in prefix:
            return 'Ball'
        elif 'N' in prefix:
            return 'Normal'
        else:
            return 'Unknown'
    
    def _parse_fault_size(self, file_name):
        """解析故障尺寸"""
        parts = file_name.split('_')
        if len(parts) > 1:
            size_str = parts[0][1:]  # 去掉第一个字母
            if size_str.isdigit():
                return int(size_str) / 1000  # 转换为英寸
        return 0.0
    
    def _parse_load_condition(self, file_name):
        """解析载荷条件"""
        parts = file_name.split('_')
        if len(parts) > 1:
            return int(parts[1])
        return 0
    
    def _get_sampling_rate(self, file_name):
        """获取采样率"""
        if '48k' in file_name.lower():
            return 48000
        elif '12k' in file_name.lower():
            return 12000
        else:
            return 12000  # 默认值


class FeatureExtractor:
    """特征提取器类"""
    
    def __init__(self, target_fs=12000):
        """
        初始化特征提取器
        
        Args:
            target_fs (int): 目标采样率
        """
        self.target_fs = target_fs
        self.feature_names = []
        
    def extract_all_features(self, signal, rpm, bearing_type='SKF6205'):
        """
        提取所有类型的特征
        
        Args:
            signal (np.array): 输入信号
            rpm (float): 转速
            bearing_type (str): 轴承类型
            
        Returns:
            dict: 特征字典
        """
        features = {}
        
        print("    提取时域特征...")
        # 时域特征
        time_features = self._extract_time_domain_features(signal)
        features.update(time_features)
        print("    ✓ 时域特征完成")
        
        print("    提取频域特征...")
        # 频域特征
        freq_features = self._extract_frequency_domain_features(signal, self.target_fs)
        features.update(freq_features)
        print("    ✓ 频域特征完成")
        
        print("    提取时频域特征...")
        # 时频域特征
        time_freq_features = self._extract_time_frequency_features(signal, self.target_fs)
        features.update(time_freq_features)
        print("    ✓ 时频域特征完成")
        
        print("    提取故障频率特征...")
        # 轴承故障特征频率
        fault_freq_features = self._extract_fault_frequency_features(signal, self.target_fs, rpm, bearing_type)
        features.update(fault_freq_features)
        print("    ✓ 故障频率特征完成")
        
        print("    提取非线性特征...")
        # 非线性特征
        nonlinear_features = self._extract_nonlinear_features(signal)
        features.update(nonlinear_features)
        print("    ✓ 非线性特征完成")
        
        return features
    
    def _extract_time_domain_features(self, signal):
        """提取时域特征"""
        features = {}
        
        # 基本统计特征
        features['mean'] = np.mean(signal)
        features['std'] = np.std(signal)
        features['max'] = np.max(signal)
        features['min'] = np.min(signal)
        features['peak_to_peak'] = np.ptp(signal)
        features['rms'] = np.sqrt(np.mean(signal**2))
        
        # 形状特征
        features['skewness'] = self._calculate_skewness(signal)
        features['kurtosis'] = self._calculate_kurtosis(signal)
        features['crest_factor'] = np.max(np.abs(signal)) / features['rms']
        features['shape_factor'] = features['rms'] / np.mean(np.abs(signal))
        features['impulse_factor'] = np.max(np.abs(signal)) / np.mean(np.abs(signal))
        
        # 其他时域特征
        features['clearance_factor'] = np.max(np.abs(signal)) / (np.mean(np.sqrt(np.abs(signal)))**2)
        
        return features
    
    def _extract_frequency_domain_features(self, signal, fs):
        """提取频域特征"""
        features = {}
        
        # FFT变换
        fft_signal = np.fft.fft(signal)
        freq = np.fft.fftfreq(len(signal), 1/fs)
        positive_freq = freq[:len(freq)//2]
        positive_fft = np.abs(fft_signal[:len(fft_signal)//2])
        
        # 频域统计特征
        features['freq_mean'] = np.mean(positive_fft)
        features['freq_std'] = np.std(positive_fft)
        features['freq_max'] = np.max(positive_fft)
        features['freq_centroid'] = np.sum(positive_freq * positive_fft) / np.sum(positive_fft)
        
        # 频带能量特征
        bands = [(0, fs/8), (fs/8, fs/4), (fs/4, fs/2)]
        for i, (low, high) in enumerate(bands):
            mask = (positive_freq >= low) & (positive_freq <= high)
            band_energy = np.sum(positive_fft[mask]**2)
            features[f'band_{i+1}_energy'] = band_energy
        
        return features
    
    def _extract_time_frequency_features(self, signal, fs):
        """提取时频域特征"""
        features = {}
        
        # 小波变换特征（优化：减少分解层数）
        max_level = min(4, int(np.log2(len(signal))) - 1)  # 根据信号长度调整分解层数
        coeffs = pywt.wavedec(signal, 'db4', level=max_level)
        for i, coeff in enumerate(coeffs):
            features[f'wavelet_{i}_energy'] = np.sum(coeff**2)
            features[f'wavelet_{i}_std'] = np.std(coeff)
        
        # STFT特征（优化：使用更小的窗口和更少的重叠）
        from scipy import signal as scipy_signal
        # 根据信号长度调整窗口大小
        nperseg = min(256, len(signal) // 4)  # 窗口大小不超过信号长度的1/4
        noverlap = nperseg // 2  # 50%重叠
        f, t, Zxx = scipy_signal.stft(signal, fs, nperseg=nperseg, noverlap=noverlap)
        stft_magnitude = np.abs(Zxx)
        features['stft_mean'] = np.mean(stft_magnitude)
        features['stft_std'] = np.std(stft_magnitude)
        
        return features
    
    def _extract_fault_frequency_features(self, signal, fs, rpm, bearing_type):
        """提取故障特征频率相关的特征"""
        features = {}
        
        # 计算轴承参数
        bearing_params = self._get_bearing_parameters(bearing_type)
        
        # 处理RPM为None的情况
        if rpm is None or rpm == 0:
            # 使用默认RPM值
            rpm = 1796  # 默认转速
            print(f"    警告: RPM为None，使用默认值 {rpm}")
        
        fr = rpm / 60  # 转频
        
        # 计算故障特征频率
        bpfo = fr * bearing_params['n_balls'] / 2 * (1 - bearing_params['d']/bearing_params['D'])
        bpfi = fr * bearing_params['n_balls'] / 2 * (1 + bearing_params['d']/bearing_params['D'])
        bsf = fr * bearing_params['D']/bearing_params['d'] * (1 - (bearing_params['d']/bearing_params['D'])**2)
        ftf = fr / 2 * (1 - bearing_params['d']/bearing_params['D'])
        
        # 计算故障频率附近的能量
        fft_signal = np.fft.fft(signal)
        freq = np.fft.fftfreq(len(signal), 1/fs)
        
        for freq_type, center_freq in [('bpfo', bpfo), ('bpfi', bpfi), ('bsf', bsf), ('ftf', ftf)]:
            if center_freq > 0:
                # 计算谐波
                for harmonic in range(1, 4):
                    target_freq = center_freq * harmonic
                    bandwidth = target_freq * 0.1  # 10%带宽
                    mask = (np.abs(freq) >= target_freq - bandwidth) & (np.abs(freq) <= target_freq + bandwidth)
                    energy = np.sum(np.abs(fft_signal[mask])**2)
                    features[f'{freq_type}_harmonic_{harmonic}_energy'] = energy
        
        return features
    
    def _extract_nonlinear_features(self, signal):
        """提取非线性特征（优化版本，带进度显示）"""
        import time
        
        features = {}
        
        # 如果信号太长，先下采样
        if len(signal) > 5000:
            downsample_factor = len(signal) // 5000
            signal = signal[::downsample_factor]
            print(f"    非线性特征：信号下采样到 {len(signal)} 个点")
        
        # 近似熵
        print("    计算近似熵...")
        start_time = time.time()
        try:
            features['approx_entropy'] = self._calculate_approximate_entropy(signal, m=2, r=0.2)
            elapsed = time.time() - start_time
            print(f"    近似熵计算完成 (耗时: {elapsed:.2f}秒)")
        except Exception as e:
            elapsed = time.time() - start_time
            print(f"    近似熵计算失败 (耗时: {elapsed:.2f}秒): {str(e)}")
            features['approx_entropy'] = 0.0
        
        # 样本熵
        print("    计算样本熵...")
        start_time = time.time()
        try:
            features['sample_entropy'] = self._calculate_sample_entropy(signal, m=2, r=0.2)
            elapsed = time.time() - start_time
            print(f"    样本熵计算完成 (耗时: {elapsed:.2f}秒)")
        except Exception as e:
            elapsed = time.time() - start_time
            print(f"    样本熵计算失败 (耗时: {elapsed:.2f}秒): {str(e)}")
            features['sample_entropy'] = 0.0
        
        # 排列熵
        print("    计算排列熵...")
        start_time = time.time()
        try:
            features['permutation_entropy'] = self._calculate_permutation_entropy(signal, m=3, delay=1)
            elapsed = time.time() - start_time
            print(f"    排列熵计算完成 (耗时: {elapsed:.2f}秒)")
        except Exception as e:
            elapsed = time.time() - start_time
            print(f"    排列熵计算失败 (耗时: {elapsed:.2f}秒): {str(e)}")
            features['permutation_entropy'] = 0.0
        
        return features
    
    def _get_bearing_parameters(self, bearing_type):
        """获取轴承参数"""
        params = {
            'SKF6205': {'d': 0.3126, 'D': 1.537, 'n_balls': 9},
            'SKF6203': {'d': 0.2656, 'D': 1.122, 'n_balls': 9}
        }
        return params.get(bearing_type, params['SKF6205'])
    
    def _calculate_skewness(self, signal):
        """计算偏度"""
        return np.mean((signal - np.mean(signal))**3) / (np.std(signal)**3)
    
    def _calculate_kurtosis(self, signal):
        """计算峭度"""
        return np.mean((signal - np.mean(signal))**4) / (np.std(signal)**4)
    
    def _calculate_approximate_entropy(self, signal, m=2, r=0.2):
        """计算近似熵（简化版本，避免复杂计算）"""
        # 使用简化的近似熵计算，避免O(N²)复杂度
        try:
            # 如果信号太长，先下采样
            if len(signal) > 1000:
                downsample_factor = len(signal) // 1000
                signal = signal[::downsample_factor]
            
            # 简化的近似熵计算
            N = len(signal)
            if N < 10:
                return 0.5
            
            # 计算信号的标准差
            std_signal = np.std(signal)
            if std_signal == 0:
                return 0.5
            
            # 简化的近似熵：基于信号的复杂度
            r_adapted = r * std_signal
            phi = np.zeros(2)
            
            for m_val in [m, m+1]:
                if N - m_val + 1 <= 0:
                    phi[m_val - m] = 0
                    continue
                    
                C = np.zeros(N - m_val + 1)
                
                # 简化的模式匹配
                for i in range(min(N - m_val + 1, 100)):  # 限制计算量
                    template_i = signal[i:i + m_val]
                    for j in range(min(N - m_val + 1, 100)):
                        if np.max(np.abs(template_i - signal[j:j + m_val])) <= r_adapted:
                            C[i] += 1.0
                
                if np.sum(C) > 0:
                    phi[m_val - m] = np.mean(np.log(C / (N - m_val + 1.0) + 1e-10))
                else:
                    phi[m_val - m] = 0
            
            return max(0, phi[0] - phi[1])
            
        except Exception as e:
            print(f"    近似熵计算异常: {str(e)}")
            return 0.5
    
    def _calculate_sample_entropy(self, signal, m=2, r=0.2):
        """计算样本熵（简化版本，避免复杂计算）"""
        try:
            # 如果信号太长，先下采样
            if len(signal) > 1000:
                downsample_factor = len(signal) // 1000
                signal = signal[::downsample_factor]
            
            N = len(signal)
            if N < 10:
                return 0.6
            
            # 计算信号的标准差
            std_signal = np.std(signal)
            if std_signal == 0:
                return 0.6
            
            r_adapted = r * std_signal
            B = 0.0
            A = 0.0
            
            # 简化的模式匹配，限制计算量
            max_iter = min(N - m, 200)
            for i in range(max_iter):
                template_i = signal[i:i + m]
                for j in range(i + 1, min(i + 50, N - m)):  # 限制搜索范围
                    template_j = signal[j:j + m]
                    if np.max(np.abs(template_i - template_j)) <= r_adapted:
                        B += 1.0
                        
                        # 检查m+1维模式
                        if j + m + 1 < N and i + m + 1 < N:
                            if np.max(np.abs(signal[i:i + m + 1] - signal[j:j + m + 1])) <= r_adapted:
                                A += 1.0
            
            if B == 0:
                return 0.6
            else:
                return max(0, -np.log(A / B + 1e-10))
                
        except Exception as e:
            print(f"    样本熵计算异常: {str(e)}")
            return 0.6
    
    def _calculate_permutation_entropy(self, signal, m=3, delay=1):
        """计算排列熵（简化版本，避免复杂计算）"""
        try:
            # 如果信号太长，先下采样
            if len(signal) > 2000:
                downsample_factor = len(signal) // 2000
                signal = signal[::downsample_factor]
            
            N = len(signal)
            if N < m * delay:
                return 0.7
            
            # 限制计算量
            max_perm = min(N - (m - 1) * delay, 1000)
            permutations = []
            
            for i in range(max_perm):
                if i + m * delay <= N:
                    indices = np.argsort(signal[i:i + m * delay:delay])
                    permutations.append(tuple(indices))
            
            if len(permutations) == 0:
                return 0.7
            
            # 计算排列概率
            unique_perm, counts = np.unique(permutations, return_counts=True)
            probs = counts / len(permutations)
            
            # 计算熵
            entropy = -np.sum(probs * np.log2(probs + 1e-10))
            return max(0, entropy)
            
        except Exception as e:
            print(f"    排列熵计算异常: {str(e)}")
            return 0.7


class FeatureSelector:
    """特征选择器类"""
    
    def __init__(self):
        """初始化特征选择器"""
        self.selected_features = []
        self.selector = None
        
    def select_features(self, X, y, method='random_forest', k=50):
        """
        特征选择
        
        Args:
            X (pd.DataFrame): 特征矩阵
            y (np.array): 标签
            method (str): 选择方法
            k (int): 选择的特征数量
            
        Returns:
            pd.DataFrame: 选择的特征
        """
        if method == 'random_forest':
            model = RandomForestClassifier(n_estimators=100, random_state=42)
            model.fit(X, y)
            importances = model.feature_importances_
            indices = np.argsort(importances)[::-1][:k]
            self.selected_features = X.columns[indices]
            
        elif method == 'mutual_info':
            mi_scores = mutual_info_classif(X, y)
            indices = np.argsort(mi_scores)[::-1][:k]
            self.selected_features = X.columns[indices]
            
        elif method == 'pca':
            pca = PCA(n_components=k)
            X_pca = pca.fit_transform(X)
            self.pca = pca
            return X_pca
            
        return X[self.selected_features]
    
    def visualize_features(self, X, y, method='tsne'):
        """
        特征可视化
        
        Args:
            X (np.array): 特征矩阵
            y (np.array): 标签
            method (str): 可视化方法
        """
        if method == 'tsne':
            tsne = TSNE(n_components=2, random_state=42)
            X_embedded = tsne.fit_transform(X)
            
        elif method == 'pca':
            pca = PCA(n_components=2)
            X_embedded = pca.fit_transform(X)
        
        plt.figure(figsize=(12, 8))
        scatter = plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, cmap='viridis', alpha=0.7)
        plt.colorbar(scatter)
        plt.title(f'{method.upper()} 特征空间可视化')
        plt.xlabel('主成分 1')
        plt.ylabel('主成分 2')
        plt.show()


class DataProcessingPipeline:
    """数据处理管道类"""
    
    def __init__(self, data_path, target_fs=12000):
        """
        初始化数据处理管道
        
        Args:
            data_path (str): 数据路径
            target_fs (int): 目标采样率
        """
        self.data_path = data_path
        self.target_fs = target_fs
        self.preprocessor = DataPreprocessor(data_path, target_fs)
        self.feature_extractor = FeatureExtractor(target_fs)
        self.feature_selector = FeatureSelector()
        self.features_df = None
        
    def process_data(self, file_list, bearing_type='SKF6205', feature_selection_method='random_forest', k=30):
        """
        完整的数据处理流程
        
        Args:
            file_list (list): 文件列表
            bearing_type (str): 轴承类型
            feature_selection_method (str): 特征选择方法
            k (int): 选择的特征数量
            
        Returns:
            tuple: (X_selected, y, file_info)
        """
        # 1. 数据加载
        print("步骤1: 加载数据...")
        all_data, file_info = self.preprocessor.load_mat_files(file_list)
        
        # 2. 特征提取
        print("步骤2: 提取特征...")
        all_features = []
        for data in tqdm(all_data, desc="提取特征"):
            if data['de_signal'] is not None:
                features = self.feature_extractor.extract_all_features(
                    data['de_signal'], data['rpm'], bearing_type
                )
                features['file_name'] = data['file_name']
                features['fault_type'] = data['fault_type']
                features['fault_size'] = data['fault_size']
                features['load_condition'] = data['load_condition']
                all_features.append(features)
        
        self.features_df = pd.DataFrame(all_features)
        
        # 3. 特征选择
        print("步骤3: 特征选择...")
        X = self.features_df.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        y = LabelEncoder().fit_transform(self.features_df['fault_type'])
        
        X_selected = self.feature_selector.select_features(X, y, method=feature_selection_method, k=k)
        
        # 4. 可视化
        print("步骤4: 特征可视化...")
        self.feature_selector.visualize_features(X_selected, y, method='tsne')
        
        return X_selected, y, file_info
    
    def get_feature_importance(self, X, y):
        """
        获取特征重要性
        
        Args:
            X (pd.DataFrame): 特征矩阵
            y (np.array): 标签
            
        Returns:
            np.array: 特征重要性
        """
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
        return rf.feature_importances_
    
    def plot_feature_importance(self, X, y, top_n=20):
        """
        绘制特征重要性图
        
        Args:
            X (pd.DataFrame): 特征矩阵
            y (np.array): 标签
            top_n (int): 显示前N个重要特征
        """
        importance = self.get_feature_importance(X, y)
        indices = np.argsort(importance)[::-1][:top_n]
        
        plt.figure(figsize=(12, 8))
        plt.bar(range(top_n), importance[indices])
        plt.xticks(range(top_n), [X.columns[i] for i in indices], rotation=45)
        plt.title(f'前{top_n}个重要特征')
        plt.xlabel('特征')
        plt.ylabel('重要性')
        plt.tight_layout()
        plt.show()


def main():
    """主函数示例"""
    # 配置参数
    data_path = "path/to/your/data"
    file_list = [f for f in os.listdir(data_path) if f.endswith('.mat')]
    
    # 创建数据处理管道
    pipeline = DataProcessingPipeline(data_path)
    
    # 处理数据
    X_selected, y, file_info = pipeline.process_data(
        file_list[:50],  # 使用前50个文件作为示例
        bearing_type='SKF6205',
        feature_selection_method='random_forest',
        k=30
    )
    
    # 绘制特征重要性
    pipeline.plot_feature_importance(X_selected, y, top_n=20)
    
    print(f"处理完成！选择了 {X_selected.shape[1]} 个特征，共 {X_selected.shape[0]} 个样本")
    print(f"类别分布: {np.bincount(y)}")


if __name__ == "__main__":
    main()
