import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import signal
from scipy.io import loadmat
from scipy.stats import kurtosis, skew, entropy
from scipy.fftpack import fft, fftfreq
import os
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import warnings

warnings.filterwarnings('ignore')

# 设置中文字体和图表样式
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


class EnhancedBearingFeatureAnalyzer:
    def __init__(self, data_path):
        self.data_path = data_path
        self.features_df = None
        self.raw_signals = {}
        self.bearing_params = {
            'SKF6205': {'n': 9, 'd': 0.3126, 'D': 1.537},  # 驱动端
            'SKF6203': {'n': 9, 'd': 0.2656, 'D': 1.122}  # 风扇端
        }

        # 创建结果文件夹
        self.results_dir = "results"
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)
            print(f"创建结果文件夹: {self.results_dir}")

    def calculate_fault_frequencies(self, rpm, bearing_type='SKF6205'):
        """计算轴承故障特征频率"""
        fr = rpm / 60  # 转频
        params = self.bearing_params[bearing_type]
        n, d, D = params['n'], params['d'], params['D']

        # 外圈故障特征频率 BPFO
        bpfo = fr * n / 2 * (1 - d / D)
        # 内圈故障特征频率 BPFI
        bpfi = fr * n / 2 * (1 + d / D)
        # 滚动体故障特征频率 BSF
        bsf = fr * D / d * (1 - (d / D) ** 2)
        # 保持架频率 FTF
        ftf = fr / 2 * (1 - d / D)

        return {'BPFO': bpfo, 'BPFI': bpfi, 'BSF': bsf, 'FTF': ftf, 'FR': fr}

    def extract_comprehensive_features(self, signal_data, fs, rpm, bearing_type='SKF6205'):
        """提取全面的特征集合"""
        features = {}

        # 1. 基本时域特征
        features.update(self.extract_time_domain_features(signal_data))

        # 2. 频域特征
        freq_features, freqs, magnitude = self.extract_frequency_domain_features(signal_data, fs)
        features.update(freq_features)

        # 3. 时频域特征
        features.update(self.extract_time_frequency_features(signal_data, fs))

        # 4. 包络分析特征
        env_features, envelope = self.extract_envelope_features(signal_data, fs)
        features.update(env_features)

        # 5. 故障特征频率相关特征
        fault_freq_features = self.extract_fault_frequency_features(signal_data, fs, rpm, bearing_type)
        features.update(fault_freq_features)

        # 6. 高阶统计特征
        features.update(self.extract_higher_order_features(signal_data))

        # 7. 非线性特征
        features.update(self.extract_nonlinear_features(signal_data))

        # 8. 能量特征
        features.update(self.extract_energy_features(signal_data, fs))

        return features

    def extract_time_domain_features(self, signal_data):
        """提取时域特征"""
        features = {}

        # 基本统计特征
        features['mean'] = np.mean(signal_data)
        features['std'] = np.std(signal_data)
        features['var'] = np.var(signal_data)
        features['rms'] = np.sqrt(np.mean(signal_data ** 2))
        features['max'] = np.max(signal_data)
        features['min'] = np.min(signal_data)
        features['peak_to_peak'] = features['max'] - features['min']
        features['abs_mean'] = np.mean(np.abs(signal_data))

        # 高阶统计特征
        features['skewness'] = skew(signal_data)
        features['kurtosis'] = kurtosis(signal_data)

        # 形状因子
        if features['abs_mean'] != 0:
            features['shape_factor'] = features['rms'] / features['abs_mean']
            features['impulse_factor'] = features['max'] / features['abs_mean']
        else:
            features['shape_factor'] = 0
            features['impulse_factor'] = 0

        if features['rms'] != 0:
            features['crest_factor'] = features['max'] / features['rms']
            features['clearance_factor'] = features['max'] / (np.mean(np.sqrt(np.abs(signal_data))) ** 2)
        else:
            features['crest_factor'] = 0
            features['clearance_factor'] = 0

        # 能量特征
        features['energy'] = np.sum(signal_data ** 2)
        features['log_energy'] = np.log(features['energy'] + 1e-8)

        # 零交叉率
        zero_crossings = np.where(np.diff(np.sign(signal_data)))[0]
        features['zero_crossing_rate'] = len(zero_crossings) / len(signal_data)

        # 波峰因子
        features['waveform_factor'] = features['rms'] / features['abs_mean'] if features['abs_mean'] != 0 else 0

        return features

    def extract_frequency_domain_features(self, signal_data, fs):
        """提取频域特征"""
        # 计算FFT
        fft_data = np.fft.fft(signal_data)
        fft_magnitude = np.abs(fft_data)
        freqs = np.fft.fftfreq(len(signal_data), 1 / fs)

        # 只取正频率部分
        positive_freqs = freqs[:len(freqs) // 2]
        positive_magnitude = fft_magnitude[:len(fft_magnitude) // 2]

        features = {}

        # 基本频域统计特征
        features['freq_mean'] = np.mean(positive_magnitude)
        features['freq_std'] = np.std(positive_magnitude)
        features['freq_var'] = np.var(positive_magnitude)
        features['freq_max'] = np.max(positive_magnitude)
        features['freq_min'] = np.min(positive_magnitude)
        features['freq_rms'] = np.sqrt(np.mean(positive_magnitude ** 2))

        # 频谱重心和其他谱特征
        total_power = np.sum(positive_magnitude)
        if total_power > 0:
            features['spectral_centroid'] = np.sum(positive_freqs * positive_magnitude) / total_power
            features['spectral_spread'] = np.sqrt(
                np.sum(((positive_freqs - features['spectral_centroid']) ** 2) * positive_magnitude) / total_power)
            features['spectral_skewness'] = np.sum(
                ((positive_freqs - features['spectral_centroid']) ** 3) * positive_magnitude) / (
                                                        total_power * features['spectral_spread'] ** 3)
            features['spectral_kurtosis'] = np.sum(
                ((positive_freqs - features['spectral_centroid']) ** 4) * positive_magnitude) / (
                                                        total_power * features['spectral_spread'] ** 4)
        else:
            features['spectral_centroid'] = 0
            features['spectral_spread'] = 0
            features['spectral_skewness'] = 0
            features['spectral_kurtosis'] = 0

        # 频谱滚降点
        cumulative_energy = np.cumsum(positive_magnitude ** 2)
        total_energy = cumulative_energy[-1]
        rolloff_idx = np.where(cumulative_energy >= 0.85 * total_energy)[0]
        features['spectral_rolloff'] = positive_freqs[rolloff_idx[0]] if len(rolloff_idx) > 0 else fs / 2

        # 频谱熵
        power_spectrum = positive_magnitude ** 2
        power_spectrum_normalized = power_spectrum / np.sum(power_spectrum)
        features['spectral_entropy'] = entropy(power_spectrum_normalized + 1e-8)

        # 峰值频率
        peak_idx = np.argmax(positive_magnitude)
        features['peak_frequency'] = positive_freqs[peak_idx]
        features['peak_magnitude'] = positive_magnitude[peak_idx]

        # 频带能量分布
        total_energy = np.sum(positive_magnitude ** 2)
        freq_bands = [
            (0, fs / 16), (fs / 16, fs / 8), (fs / 8, fs / 4),
            (fs / 4, 3 * fs / 8), (3 * fs / 8, fs / 2), (fs / 2, 5 * fs / 8),
            (5 * fs / 8, 3 * fs / 4), (3 * fs / 4, 7 * fs / 8), (7 * fs / 8, fs)
        ]

        for i, (f_low, f_high) in enumerate(freq_bands):
            band_mask = (positive_freqs >= f_low) & (positive_freqs < f_high)
            band_energy = np.sum(positive_magnitude[band_mask] ** 2)
            features[f'band_{i + 1}_energy'] = band_energy
            features[f'band_{i + 1}_energy_ratio'] = band_energy / total_energy if total_energy > 0 else 0

        return features, positive_freqs, positive_magnitude

    def extract_time_frequency_features(self, signal_data, fs):
        """提取时频域特征"""
        features = {}

        # 短时傅里叶变换
        nperseg = min(1024, len(signal_data) // 4)
        f, t, Zxx = signal.stft(signal_data, fs, nperseg=nperseg)

        # 时频谱的统计特征
        magnitude_spectrogram = np.abs(Zxx)
        features['stft_mean'] = np.mean(magnitude_spectrogram)
        features['stft_std'] = np.std(magnitude_spectrogram)
        features['stft_max'] = np.max(magnitude_spectrogram)
        features['stft_energy'] = np.sum(magnitude_spectrogram ** 2)

        # 瞬时频率特征
        instantaneous_freq = np.diff(np.unwrap(np.angle(Zxx)), axis=0) * fs / (2 * np.pi)
        features['inst_freq_mean'] = np.mean(instantaneous_freq)
        features['inst_freq_std'] = np.std(instantaneous_freq)

        return features

    def extract_envelope_features(self, signal_data, fs):
        """提取包络分析特征"""
        # Hilbert变换获取包络
        analytic_signal = signal.hilbert(signal_data)
        envelope = np.abs(analytic_signal)
        instantaneous_phase = np.angle(analytic_signal)
        instantaneous_frequency = np.diff(np.unwrap(instantaneous_phase)) * fs / (2 * np.pi)

        features = {}

        # 包络统计特征
        features['envelope_max'] = np.max(envelope)
        features['envelope_min'] = np.min(envelope)
        features['envelope_mean'] = np.mean(envelope)
        features['envelope_std'] = np.std(envelope)
        features['envelope_var'] = np.var(envelope)
        features['envelope_rms'] = np.sqrt(np.mean(envelope ** 2))
        features['envelope_skewness'] = skew(envelope)
        features['envelope_kurtosis'] = kurtosis(envelope)

        # 包络形状因子
        envelope_abs_mean = np.mean(np.abs(envelope))
        if envelope_abs_mean != 0:
            features['envelope_shape_factor'] = features['envelope_rms'] / envelope_abs_mean
            features['envelope_crest_factor'] = features['envelope_max'] / features['envelope_rms']
        else:
            features['envelope_shape_factor'] = 0
            features['envelope_crest_factor'] = 0

        # 包络FFT
        envelope_fft = np.fft.fft(envelope)
        envelope_magnitude = np.abs(envelope_fft)
        envelope_freqs = np.fft.fftfreq(len(envelope), 1 / fs)

        positive_env_freqs = envelope_freqs[:len(envelope_freqs) // 2]
        positive_env_magnitude = envelope_magnitude[:len(envelope_magnitude) // 2]

        # 包络谱特征
        if len(positive_env_magnitude) > 0:
            env_peak_idx = np.argmax(positive_env_magnitude)
            features['envelope_peak_freq'] = positive_env_freqs[env_peak_idx]
            features['envelope_peak_magnitude'] = positive_env_magnitude[env_peak_idx]

            # 包络谱重心
            env_total_power = np.sum(positive_env_magnitude)
            if env_total_power > 0:
                features['envelope_spectral_centroid'] = np.sum(
                    positive_env_freqs * positive_env_magnitude) / env_total_power
            else:
                features['envelope_spectral_centroid'] = 0
        else:
            features['envelope_peak_freq'] = 0
            features['envelope_peak_magnitude'] = 0
            features['envelope_spectral_centroid'] = 0

        # 瞬时频率特征
        if len(instantaneous_frequency) > 0:
            features['inst_freq_mean'] = np.mean(instantaneous_frequency)
            features['inst_freq_std'] = np.std(instantaneous_frequency)
            features['inst_freq_max'] = np.max(instantaneous_frequency)
        else:
            features['inst_freq_mean'] = 0
            features['inst_freq_std'] = 0
            features['inst_freq_max'] = 0

        return features, envelope

    def extract_fault_frequency_features(self, signal_data, fs, rpm, bearing_type='SKF6205'):
        """提取故障特征频率相关特征"""
        features = {}

        # 计算故障特征频率
        fault_freqs = self.calculate_fault_frequencies(rpm, bearing_type)

        # 计算FFT
        fft_data = np.fft.fft(signal_data)
        fft_magnitude = np.abs(fft_data)
        freqs = np.fft.fftfreq(len(signal_data), 1 / fs)
        positive_freqs = freqs[:len(freqs) // 2]
        positive_magnitude = fft_magnitude[:len(fft_magnitude) // 2]

        # 在故障特征频率及其谐波处提取能量
        for fault_type, fault_freq in fault_freqs.items():
            # 基频及前5个谐波
            for harmonic in range(1, 6):
                target_freq = fault_freq * harmonic
                freq_tolerance = 5  # Hz

                # 查找目标频率附近的能量
                freq_mask = (positive_freqs >= target_freq - freq_tolerance) & (
                            positive_freqs <= target_freq + freq_tolerance)

                if np.any(freq_mask):
                    energy = np.sum(positive_magnitude[freq_mask] ** 2)
                    peak_magnitude = np.max(positive_magnitude[freq_mask])
                    features[f'{fault_type}_{harmonic}H_energy'] = energy
                    features[f'{fault_type}_{harmonic}H_peak'] = peak_magnitude
                else:
                    features[f'{fault_type}_{harmonic}H_energy'] = 0
                    features[f'{fault_type}_{harmonic}H_peak'] = 0

            # 总体故障频率能量（所有谐波之和）
            total_energy = sum([features[f'{fault_type}_{h}H_energy'] for h in range(1, 6)])
            features[f'{fault_type}_total_energy'] = total_energy

        return features

    def extract_higher_order_features(self, signal_data):
        """提取高阶统计特征"""
        features = {}

        # 高阶矩
        for order in range(3, 7):
            moment = np.mean(signal_data ** order)
            features[f'moment_{order}'] = moment

            # 中心矩
            central_moment = np.mean((signal_data - np.mean(signal_data)) ** order)
            features[f'central_moment_{order}'] = central_moment

        # 累积量
        mean_val = np.mean(signal_data)
        std_val = np.std(signal_data)

        if std_val > 0:
            # 标准化数据
            normalized_signal = (signal_data - mean_val) / std_val

            # 三阶和四阶累积量
            features['cumulant_3'] = np.mean(normalized_signal ** 3)
            features['cumulant_4'] = np.mean(normalized_signal ** 4) - 3
        else:
            features['cumulant_3'] = 0
            features['cumulant_4'] = 0

        return features

    def extract_nonlinear_features(self, signal_data):
        """提取非线性特征"""
        features = {}

        # 样本熵（简化版本）
        def sample_entropy(data, m=2, r=None):
            if r is None:
                r = 0.2 * np.std(data)

            def _maxdist(xi, xj, m):
                return max([abs(ua - va) for ua, va in zip(xi, xj)])

            def _phi(m):
                patterns = np.array([data[i:i + m] for i in range(len(data) - m + 1)])
                C = np.zeros(len(patterns))
                for i in range(len(patterns)):
                    template_i = patterns[i]
                    for j in range(len(patterns)):
                        if _maxdist(template_i, patterns[j], m) <= r:
                            C[i] += 1.0
                phi = np.mean(np.log(C / len(patterns)))
                return phi

            try:
                return _phi(m) - _phi(m + 1)
            except:
                return 0

        features['sample_entropy'] = sample_entropy(signal_data[:min(1000, len(signal_data))])

        # 近似熵
        def approximate_entropy(data, m=2, r=None):
            if r is None:
                r = 0.2 * np.std(data)

            def _patterns(m):
                return np.array([data[i:i + m] for i in range(len(data) - m + 1)])

            def _phi(m):
                patterns = _patterns(m)
                C = np.zeros(len(patterns))
                for i in range(len(patterns)):
                    template_i = patterns[i]
                    for j in range(len(patterns)):
                        if np.max(np.abs(template_i - patterns[j])) <= r:
                            C[i] += 1.0
                phi = np.mean(np.log(C / len(patterns)))
                return phi

            try:
                return _phi(m) - _phi(m + 1)
            except:
                return 0

        features['approximate_entropy'] = approximate_entropy(signal_data[:min(1000, len(signal_data))])

        # 分形维数（盒计数法简化版）
        def fractal_dimension(data, max_box_size=None):
            if max_box_size is None:
                max_box_size = len(data) // 10

            scales = np.logspace(0.5, np.log10(max_box_size), num=10, dtype=int)
            counts = []

            for scale in scales:
                if scale >= len(data):
                    continue
                boxes = len(data) // scale
                count = 0
                for i in range(boxes):
                    segment = data[i * scale:(i + 1) * scale]
                    if len(segment) > 1 and np.max(segment) - np.min(segment) > 0:
                        count += 1
                counts.append(count)

            if len(counts) > 1 and len(scales[:len(counts)]) > 1:
                scales = scales[:len(counts)]
                coeffs = np.polyfit(np.log(scales), np.log(counts), 1)
                return -coeffs[0]
            else:
                return 1.5

        features['fractal_dimension'] = fractal_dimension(signal_data[:min(2000, len(signal_data))])

        return features

    def extract_energy_features(self, signal_data, fs):
        """提取能量相关特征"""
        features = {}

        # 总能量
        total_energy = np.sum(signal_data ** 2)
        features['total_energy'] = total_energy
        features['normalized_energy'] = total_energy / len(signal_data)

        # 相对变异指数（RVI）
        rms_val = np.sqrt(np.mean(signal_data ** 2))
        features['relative_variance_index'] = np.var(signal_data) / (rms_val ** 2) if rms_val > 0 else 0

        # Teager-Kaiser能量算子
        def teager_kaiser_energy(x):
            if len(x) < 3:
                return 0
            tke = x[1:-1] ** 2 - x[:-2] * x[2:]
            return np.mean(tke)

        features['teager_kaiser_energy'] = teager_kaiser_energy(signal_data)

        # 小波能量（简化版）
        try:
            from scipy.signal import cwt, ricker
            scales = np.arange(1, 32)
            coefficients = cwt(signal_data[:min(1024, len(signal_data))], ricker, scales)
            features['wavelet_energy'] = np.sum(coefficients ** 2)
        except:
            features['wavelet_energy'] = 0

        return features

    def load_and_process_data(self, max_files_per_category=25):
        """加载并处理数据"""
        print("开始加载和处理源域数据...")

        all_features = []
        all_labels = []
        all_filenames = []
        self.raw_signals = {}

        file_count = 0
        category_counts = {'N': 0, 'B': 0, 'IR': 0, 'OR': 0}

        # 遍历各个数据文件夹
        for root, dirs, files in os.walk(self.data_path):
            for file in files:
                if file.endswith('.mat') and file_count < max_files_per_category * 4:
                    file_path = os.path.join(root, file)

                    try:
                        # 确定标签
                        if 'OR' in file or 'OR' in root:
                            label = 'OR'  # 外圈故障
                        elif 'IR' in file or 'IR' in root:
                            label = 'IR'  # 内圈故障
                        elif 'B' in file or ('B' in root and 'BA' not in root):
                            label = 'B'  # 滚动体故障
                        elif 'N' in file or 'Normal' in root:
                            label = 'N'  # 正常
                        else:
                            continue

                        # 检查该类别是否已达到上限
                        if category_counts[label] >= max_files_per_category:
                            continue

                        # 加载mat文件
                        mat_data = loadmat(file_path)

                        # 提取振动数据 - 优先使用DE数据
                        signal_data = None
                        rpm = 1797  # 默认转速

                        for key in mat_data.keys():
                            if 'DE_time' in key:
                                signal_data = mat_data[key].flatten()
                                break
                            elif 'FE_time' in key:
                                signal_data = mat_data[key].flatten()
                                break

                        # 获取RPM信息
                        for key in mat_data.keys():
                            if 'RPM' in key:
                                rpm = float(mat_data[key][0])
                                break

                        if signal_data is not None and len(signal_data) > 1000:
                            # 截取固定长度的信号
                            signal_length = min(120000, len(signal_data))  # 约10秒数据
                            signal_data = signal_data[:signal_length]

                            # 采样频率判断
                            fs = 12000 if '12k' in root else 48000

                            # 确定轴承类型
                            bearing_type = 'SKF6205' if 'DE' in root else 'SKF6203'

                            # 综合特征提取
                            features = self.extract_comprehensive_features(signal_data, fs, rpm, bearing_type)

                            # 添加元信息
                            features['rpm'] = rpm
                            features['filename'] = file
                            features['sampling_rate'] = fs
                            features['bearing_type'] = bearing_type
                            features['signal_length'] = len(signal_data)

                            all_features.append(features)
                            all_labels.append(label)
                            all_filenames.append(file)

                            # 保存原始信号用于可视化
                            self.raw_signals[file] = {
                                'signal': signal_data,
                                'fs': fs,
                                'label': label,
                                'rpm': rpm
                            }

                            category_counts[label] += 1
                            file_count += 1
                            print(f"处理文件: {file}, 标签: {label}, 特征数: {len(features)}")

                    except Exception as e:
                        print(f"处理文件 {file} 时出错: {e}")
                        continue

        # 转换为DataFrame
        self.features_df = pd.DataFrame(all_features)
        self.features_df['label'] = all_labels
        self.features_df['filename'] = all_filenames

        print(f"\n数据加载完成！")
        print(f"总样本数: {len(self.features_df)}")
        print(f"标签分布:\n{pd.Series(all_labels).value_counts()}")

        # 保存特征数据
        self.features_df.to_csv(f'{self.results_dir}/bearing_features.csv', index=False, encoding='utf-8-sig')
        print(f"特征数据已保存到 {self.results_dir}/bearing_features.csv")

        return self.features_df

    def plot_signal_overview(self):
        """绘制信号概览"""
        fig = plt.figure(figsize=(20, 16))

        # 为每个类别选择代表性样本
        sample_files = {}
        for label in ['N', 'B', 'IR', 'OR']:
            label_files = [f for f, info in self.raw_signals.items() if info['label'] == label]
            if label_files:
                sample_files[label] = label_files[0]

        for i, (label, filename) in enumerate(sample_files.items()):
            if filename not in self.raw_signals:
                continue

            signal_info = self.raw_signals[filename]
            signal_data = signal_info['signal']
            fs = signal_info['fs']

            # 截取显示段
            display_length = min(4000, len(signal_data))
            signal_segment = signal_data[:display_length]
            time_axis = np.arange(len(signal_segment)) / fs

            # 时域信号
            ax1 = plt.subplot(4, 4, i * 4 + 1)
            plt.plot(time_axis, signal_segment, linewidth=0.8)
            plt.title(f'{label} - 时域信号', fontsize=10)
            plt.xlabel('时间 (s)')
            plt.ylabel('幅值')
            plt.grid(True, alpha=0.3)

            # 频域信号
            ax2 = plt.subplot(4, 4, i * 4 + 2)
            freqs = np.fft.fftfreq(len(signal_segment), 1 / fs)
            fft_magnitude = np.abs(np.fft.fft(signal_segment))
            positive_freqs = freqs[:len(freqs) // 2]
            positive_magnitude = fft_magnitude[:len(fft_magnitude) // 2]

            plt.semilogy(positive_freqs, positive_magnitude, linewidth=0.8)
            plt.title(f'{label} - 频域信号', fontsize=10)
            plt.xlabel('频率 (Hz)')
            plt.ylabel('幅值')
            plt.xlim(0, min(3000, fs / 2))
            plt.grid(True, alpha=0.3)

            # 包络信号
            ax3 = plt.subplot(4, 4, i * 4 + 3)
            analytic_signal = signal.hilbert(signal_segment)
            envelope = np.abs(analytic_signal)
            plt.plot(time_axis, signal_segment, alpha=0.5, label='原信号', linewidth=0.6)
            plt.plot(time_axis, envelope, 'r-', label='包络', linewidth=1.2)
            plt.plot(time_axis, -envelope, 'r-', linewidth=1.2)
            plt.title(f'{label} - 包络分析', fontsize=10)
            plt.xlabel('时间 (s)')
            plt.ylabel('幅值')
            plt.legend(fontsize=8)
            plt.grid(True, alpha=0.3)

            # 时频图
            ax4 = plt.subplot(4, 4, i * 4 + 4)
            nperseg = min(256, len(signal_segment) // 4)
            f, t, Zxx = signal.stft(signal_segment, fs, nperseg=nperseg)
            plt.pcolormesh(t, f, np.abs(Zxx), shading='gouraud', cmap='viridis')
            plt.title(f'{label} - 时频图', fontsize=10)
            plt.xlabel('时间 (s)')
            plt.ylabel('频率 (Hz)')
            plt.ylim(0, min(2000, fs / 2))
            plt.colorbar(label='幅值')

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/signal_overview.png', dpi=300, bbox_inches='tight')
        plt.show()

    def plot_fault_frequency_analysis(self):
        """故障特征频率分析"""
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('轴承故障特征频率分析', fontsize=16)

        # 理论故障频率计算
        rpm_range = np.linspace(1000, 2000, 100)
        fault_freqs = {fault_type: [] for fault_type in ['BPFO', 'BPFI', 'BSF', 'FTF']}

        for rpm in rpm_range:
            freqs = self.calculate_fault_frequencies(rpm)
            for fault_type in fault_freqs.keys():
                fault_freqs[fault_type].append(freqs[fault_type])

        # 绘制故障频率随转速变化
        ax = axes[0, 0]
        colors = ['blue', 'red', 'green', 'orange']
        for i, (fault_type, freq_list) in enumerate(fault_freqs.items()):
            ax.plot(rpm_range, freq_list, color=colors[i], label=fault_type, linewidth=2)

        ax.set_xlabel('转速 (RPM)')
        ax.set_ylabel('故障特征频率 (Hz)')
        ax.set_title('故障特征频率随转速变化')
        ax.legend()
        ax.grid(True, alpha=0.3)

        # 实际信号中的故障频率检测
        for idx, (label, ax) in enumerate(zip(['B', 'IR', 'OR'], axes.flat[1:])):
            label_files = [f for f, info in self.raw_signals.items() if info['label'] == label]
            if not label_files:
                continue

            filename = label_files[0]
            signal_info = self.raw_signals[filename]
            signal_data = signal_info['signal']
            fs = signal_info['fs']
            rpm = signal_info['rpm']

            # 计算理论故障频率
            theoretical_freqs = self.calculate_fault_frequencies(rpm)

            # 计算频谱
            freqs = np.fft.fftfreq(len(signal_data), 1 / fs)
            fft_magnitude = np.abs(np.fft.fft(signal_data))
            positive_freqs = freqs[:len(freqs) // 2]
            positive_magnitude = fft_magnitude[:len(fft_magnitude) // 2]

            # 绘制频谱
            ax.semilogy(positive_freqs, positive_magnitude, alpha=0.7, linewidth=0.8)

            # 标记理论故障频率
            fault_type_map = {'B': 'BSF', 'IR': 'BPFI', 'OR': 'BPFO'}
            target_fault = fault_type_map.get(label)

            if target_fault:
                target_freq = theoretical_freqs[target_fault]
                for harmonic in range(1, 4):
                    harmonic_freq = target_freq * harmonic
                    if harmonic_freq < max(positive_freqs):
                        ax.axvline(x=harmonic_freq, color='red', linestyle='--',
                                   alpha=0.8, label=f'{target_fault} {harmonic}H' if harmonic == 1 else None)

            ax.set_xlabel('频率 (Hz)')
            ax.set_ylabel('幅值')
            ax.set_title(f'{label}类故障频谱分析 (RPM={rpm})')
            ax.set_xlim(0, min(1000, fs / 2))
            ax.grid(True, alpha=0.3)
            if target_fault:
                ax.legend()

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/fault_frequency_analysis.png', dpi=300, bbox_inches='tight')
        plt.show()

    def plot_envelope_analysis(self):
        """包络分析可视化"""
        fig, axes = plt.subplots(3, 4, figsize=(20, 15))
        fig.suptitle('轴承故障包络分析', fontsize=16)

        labels = ['N', 'B', 'IR', 'OR']
        for i, label in enumerate(labels):
            label_files = [f for f, info in self.raw_signals.items() if info['label'] == label]
            if not label_files:
                continue

            filename = label_files[0]
            signal_info = self.raw_signals[filename]
            signal_data = signal_info['signal'][:8000]
            fs = signal_info['fs']
            time_axis = np.arange(len(signal_data)) / fs

            # 原始信号和包络
            ax1 = axes[0, i]
            analytic_signal = signal.hilbert(signal_data)
            envelope = np.abs(analytic_signal)

            ax1.plot(time_axis, signal_data, alpha=0.6, linewidth=0.8, label='原信号')
            ax1.plot(time_axis, envelope, 'r-', linewidth=1.5, label='包络')
            ax1.plot(time_axis, -envelope, 'r-', linewidth=1.5)
            ax1.set_title(f'{label} - 原信号与包络')
            ax1.set_xlabel('时间 (s)')
            ax1.set_ylabel('幅值')
            ax1.legend()
            ax1.grid(True, alpha=0.3)

            # 包络频谱
            ax2 = axes[1, i]
            envelope_fft = np.fft.fft(envelope)
            envelope_freqs = np.fft.fftfreq(len(envelope), 1 / fs)
            envelope_magnitude = np.abs(envelope_fft)

            positive_env_freqs = envelope_freqs[:len(envelope_freqs) // 2]
            positive_env_magnitude = envelope_magnitude[:len(envelope_magnitude) // 2]

            ax2.semilogy(positive_env_freqs, positive_env_magnitude, linewidth=1.0)
            ax2.set_title(f'{label} - 包络频谱')
            ax2.set_xlabel('频率 (Hz)')
            ax2.set_ylabel('幅值')
            ax2.set_xlim(0, min(500, fs / 2))
            ax2.grid(True, alpha=0.3)

            # 包络统计特征
            ax3 = axes[2, i]
            env_stats = {
                'RMS': np.sqrt(np.mean(envelope ** 2)),
                '峰度': kurtosis(envelope),
                '偏度': skew(envelope),
                '峰值因子': np.max(envelope) / np.sqrt(np.mean(envelope ** 2)) if np.sqrt(
                    np.mean(envelope ** 2)) > 0 else 0
            }

            features = list(env_stats.keys())
            values = list(env_stats.values())

            bars = ax3.bar(features, values, alpha=0.7)
            ax3.set_title(f'{label} - 包络统计特征')
            ax3.set_ylabel('数值')
            ax3.tick_params(axis='x', rotation=45)

            for bar, value in zip(bars, values):
                ax3.text(bar.get_x() + bar.get_width() / 2., bar.get_height() + 0.01,
                         f'{value:.2f}', ha='center', va='bottom')

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/envelope_analysis.png', dpi=300, bbox_inches='tight')
        plt.show()

    def plot_feature_distributions(self):
        """特征分布分析"""
        statistical_features = ['mean', 'std', 'skewness', 'kurtosis', 'rms', 'crest_factor']

        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        axes = axes.flatten()
        fig.suptitle('轴承信号统计特性分析', fontsize=16)

        for i, feature in enumerate(statistical_features):
            if feature in self.features_df.columns:
                data_by_label = []
                labels = []

                for label in ['N', 'B', 'IR', 'OR']:
                    feature_data = self.features_df[self.features_df['label'] == label][feature]
                    if len(feature_data) > 0:
                        data_by_label.append(feature_data)
                        labels.append(label)

                if data_by_label:
                    box_plot = axes[i].boxplot(data_by_label, labels=labels, patch_artist=True)

                    colors = ['green', 'blue', 'red', 'orange']
                    for patch, color in zip(box_plot['boxes'], colors[:len(labels)]):
                        patch.set_facecolor(color)
                        patch.set_alpha(0.7)

                    axes[i].set_title(f'{feature} 分布对比')
                    axes[i].set_ylabel(feature)
                    axes[i].grid(True, alpha=0.3)

                    means = [np.mean(data) for data in data_by_label]
                    axes[i].scatter(range(1, len(means) + 1), means, color='red',
                                    marker='D', s=50, zorder=3, label='均值')
                    axes[i].legend()

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/feature_distributions.png', dpi=300, bbox_inches='tight')
        plt.show()

    def plot_correlation_analysis(self):
        """相关性分析"""
        correlation_features = ['mean', 'std', 'skewness', 'kurtosis', 'rms', 'crest_factor',
                                'spectral_centroid', 'envelope_max', 'BPFO_total_energy',
                                'BPFI_total_energy', 'BSF_total_energy']
        available_features = [f for f in correlation_features if f in self.features_df.columns]

        if len(available_features) > 1:
            plt.figure(figsize=(14, 10))
            correlation_matrix = self.features_df[available_features].corr()

            mask = np.triu(np.ones_like(correlation_matrix, dtype=bool))
            sns.heatmap(correlation_matrix, mask=mask, annot=True, cmap='coolwarm', center=0,
                        square=True, fmt='.2f', cbar_kws={"shrink": .8})
            plt.title('特征相关性分析', fontsize=16)
            plt.tight_layout()
            plt.savefig(f'{self.results_dir}/correlation_analysis.png', dpi=300, bbox_inches='tight')
            plt.show()

    def plot_feature_space_analysis(self):
        """多维特征空间分析"""
        main_features = [col for col in self.features_df.columns
                         if col not in ['label', 'filename', 'rpm', 'sampling_rate', 'bearing_type', 'signal_length']]

        if len(main_features) < 2:
            print("特征数量不足，跳过特征空间分析")
            return

        # 数据预处理
        feature_data = self.features_df[main_features].fillna(0)
        feature_data = feature_data.replace([np.inf, -np.inf], np.nan).fillna(0)

        scaler = StandardScaler()
        scaled_features = scaler.fit_transform(feature_data)

        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('多维特征空间分析', fontsize=16)

        # PCA分析
        pca = PCA(n_components=min(10, len(main_features)))
        pca_result = pca.fit_transform(scaled_features)

        ax1 = axes[0, 0]
        colors = {'N': 'green', 'B': 'blue', 'IR': 'red', 'OR': 'orange'}

        for label, color in colors.items():
            mask = self.features_df['label'] == label
            if np.any(mask):
                ax1.scatter(pca_result[mask, 0], pca_result[mask, 1],
                            c=color, label=label, alpha=0.7, s=30)

        ax1.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
        ax1.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
        ax1.set_title('PCA特征空间分布')
        ax1.legend()
        ax1.grid(True, alpha=0.3)

        # PCA贡献度分析
        ax2 = axes[0, 1]
        explained_variance = pca.explained_variance_ratio_[:min(10, len(pca.explained_variance_ratio_))]
        component_names = [f'PC{i + 1}' for i in range(len(explained_variance))]

        bars = ax2.bar(component_names, explained_variance, alpha=0.7)
        ax2.set_title('主成分贡献度')
        ax2.set_ylabel('解释方差比')
        ax2.tick_params(axis='x', rotation=45)

        cumulative_variance = np.cumsum(explained_variance)
        ax2_twin = ax2.twinx()
        ax2_twin.plot(component_names, cumulative_variance, 'ro-', color='red', linewidth=2)
        ax2_twin.set_ylabel('累积贡献度', color='red')
        ax2_twin.tick_params(axis='y', labelcolor='red')

        for bar, value in zip(bars, explained_variance):
            ax2.text(bar.get_x() + bar.get_width() / 2., bar.get_height() + 0.005,
                     f'{value:.3f}', ha='center', va='bottom')

        # t-SNE分析
        ax3 = axes[1, 0]

        if len(scaled_features) > 500:
            sample_indices = np.random.choice(len(scaled_features), 500, replace=False)
            sample_features = scaled_features[sample_indices]
            sample_labels = self.features_df.iloc[sample_indices]['label'].values
        else:
            sample_features = scaled_features
            sample_labels = self.features_df['label'].values

        tsne = TSNE(n_components=2, random_state=42, perplexity=min(30, len(sample_features) // 4))
        tsne_result = tsne.fit_transform(sample_features)

        for label, color in colors.items():
            mask = sample_labels == label
            if np.any(mask):
                ax3.scatter(tsne_result[mask, 0], tsne_result[mask, 1],
                            c=color, label=label, alpha=0.7, s=30)

        ax3.set_xlabel('t-SNE 第一维')
        ax3.set_ylabel('t-SNE 第二维')
        ax3.set_title('t-SNE特征空间分布')
        ax3.legend()
        ax3.grid(True, alpha=0.3)

        # 聚类分析
        ax4 = axes[1, 1]

        kmeans = KMeans(n_clusters=4, random_state=42)
        cluster_labels = kmeans.fit_predict(pca_result[:, :2])

        scatter = ax4.scatter(pca_result[:, 0], pca_result[:, 1],
                              c=cluster_labels, cmap='viridis', alpha=0.7, s=30)

        centers_2d = kmeans.cluster_centers_
        ax4.scatter(centers_2d[:, 0], centers_2d[:, 1],
                    c='red', marker='x', s=200, linewidths=3, label='聚类中心')

        ax4.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
        ax4.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
        ax4.set_title('K-means聚类结果')
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        plt.colorbar(scatter, ax=ax4, label='聚类标签')

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/feature_space_analysis.png', dpi=300, bbox_inches='tight')
        plt.show()

    def plot_feature_importance(self):
        """特征重要性分析"""
        main_features = [col for col in self.features_df.columns
                         if col not in ['label', 'filename', 'rpm', 'sampling_rate', 'bearing_type', 'signal_length']]

        feature_data = self.features_df[main_features].fillna(0)
        feature_data = feature_data.replace([np.inf, -np.inf], np.nan).fillna(0)

        scaler = StandardScaler()
        scaled_features = scaler.fit_transform(feature_data)

        fig, axes = plt.subplots(1, 3, figsize=(18, 6))
        fig.suptitle('特征重要性分析', fontsize=16)

        # 基于方差的特征重要性
        feature_variances = np.var(scaled_features, axis=0)
        variance_importance = feature_variances / np.sum(feature_variances)

        top_indices = np.argsort(variance_importance)[::-1][:20]
        top_features = [main_features[i] for i in top_indices]
        top_importance = variance_importance[top_indices]

        ax1 = axes[0]
        bars = ax1.barh(range(len(top_features)), top_importance)
        ax1.set_yticks(range(len(top_features)))
        ax1.set_yticklabels(top_features, fontsize=8)
        ax1.set_xlabel('归一化方差')
        ax1.set_title('基于方差的特征重要性')
        ax1.invert_yaxis()

        # 基于类别分离度的特征重要性
        class_separation_scores = []
        labels = self.features_df['label'].values

        for i, feature_name in enumerate(main_features):
            feature_values = scaled_features[:, i]

            between_class_var = 0
            within_class_var = 0

            unique_labels = np.unique(labels)
            overall_mean = np.mean(feature_values)

            for label in unique_labels:
                class_mask = labels == label
                class_values = feature_values[class_mask]

                if len(class_values) > 0:
                    class_mean = np.mean(class_values)
                    class_var = np.var(class_values)
                    class_size = len(class_values)

                    between_class_var += class_size * (class_mean - overall_mean) ** 2
                    within_class_var += class_size * class_var

            separation_score = between_class_var / (within_class_var + 1e-8)
            class_separation_scores.append(separation_score)

        top_sep_indices = np.argsort(class_separation_scores)[::-1][:20]
        top_sep_features = [main_features[i] for i in top_sep_indices]
        top_sep_scores = [class_separation_scores[i] for i in top_sep_indices]

        ax2 = axes[1]
        bars = ax2.barh(range(len(top_sep_features)), top_sep_scores)
        ax2.set_yticks(range(len(top_sep_features)))
        ax2.set_yticklabels(top_sep_features, fontsize=8)
        ax2.set_xlabel('类别分离度')
        ax2.set_title('基于类别分离度的特征重要性')
        ax2.invert_yaxis()

        # 组合重要性评分
        norm_variance = (variance_importance - np.min(variance_importance)) / (
                    np.max(variance_importance) - np.min(variance_importance) + 1e-8)
        norm_separation = (np.array(class_separation_scores) - np.min(class_separation_scores)) / (
                    np.max(class_separation_scores) - np.min(class_separation_scores) + 1e-8)

        combined_scores = 0.5 * norm_variance + 0.5 * norm_separation

        top_combined_indices = np.argsort(combined_scores)[::-1][:20]
        top_combined_features = [main_features[i] for i in top_combined_indices]
        top_combined_scores = combined_scores[top_combined_indices]

        ax3 = axes[2]
        bars = ax3.barh(range(len(top_combined_features)), top_combined_scores)
        ax3.set_yticks(range(len(top_combined_features)))
        ax3.set_yticklabels(top_combined_features, fontsize=8)
        ax3.set_xlabel('组合重要性评分')
        ax3.set_title('组合特征重要性排序')
        ax3.invert_yaxis()

        plt.tight_layout()
        plt.savefig(f'{self.results_dir}/feature_importance.png', dpi=300, bbox_inches='tight')
        plt.show()

    def run_complete_analysis(self):
        """运行完整分析"""
        print("=== 开始完整特征分析 ===")

        # 1. 加载和处理数据
        self.load_and_process_data()

        # 2. 信号概览
        print("\n绘制信号概览...")
        self.plot_signal_overview()

        # 3. 故障频率分析
        print("\n进行故障频率分析...")
        self.plot_fault_frequency_analysis()

        # 4. 包络分析
        print("\n进行包络分析...")
        self.plot_envelope_analysis()

        # 5. 特征分布分析
        print("\n分析特征分布...")
        self.plot_feature_distributions()

        # 6. 相关性分析
        print("\n进行相关性分析...")
        self.plot_correlation_analysis()

        # 7. 特征空间分析
        print("\n分析特征空间...")
        self.plot_feature_space_analysis()

        # 8. 特征重要性分析
        print("\n分析特征重要性...")
        self.plot_feature_importance()

        print(f"\n=== 分析完成！所有结果已保存到 {self.results_dir} 文件夹 ===")

        return self.features_df


def main():
    # 设置数据路径
    data_path = r"C:\Users\18344\Desktop\中文ACDEF题\中文赛题ACDEF题\E题\数据集\源域数据集"

    # 创建分析器
    analyzer = EnhancedBearingFeatureAnalyzer(data_path)

    # 运行完整分析
    features_df = analyzer.run_complete_analysis()

    return analyzer, features_df


if __name__ == "__main__":
    analyzer, features_df = main()