# -*- coding: utf-8 -*-
"""
振动信号特征提取、样本均衡与特征选择完整流程（决策树模型，橙色热力图版本）
"""

import pandas as pd
import warnings
import numpy as np
from scipy.signal import hilbert, welch, stft
from scipy.stats import kurtosis, skew
import pywt
import matplotlib.pyplot as plt
import seaborn as sns
import random
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc, f1_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import label_binarize
import time

# 忽略警告
warnings.filterwarnings('ignore')

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 自定义ROC曲线颜色方案
ROC_COLORS = [
    '#FFC0CB',  # 珊瑚红
    '#000000',  # 青绿色
    '#45B7D1',  # 天蓝色
    '#FFA07A',  # 浅橙色
    '#98D8C8',  # 淡青色
    '#F7DC6F',  # 金黄色
    '#D7BDE2',  # 淡紫色
    '#58D68D',  # 浅绿色
]

# 热力图使用橙色系颜色映射
HEATMAP_CMAP = "Oranges"  # 橙色系渐变（浅橙到深橙）


# ------------------ 工具函数 ------------------ #
def time_features(x):
    """时域统计特征"""
    x = np.asarray(x).ravel()
    if len(x) == 0:
        return {}
    rms = np.sqrt(np.mean(x ** 2))
    mean_abs = np.mean(np.abs(x))
    sqr_mean = np.mean(np.sqrt(np.abs(x)))
    peak = np.max(np.abs(x))

    feats = {
        "均值": np.mean(x),
        "标准差": np.std(x),
        "方差": np.var(x),
        "均方根": rms,
        "峰值": peak,
        "峰峰值": np.ptp(x),
        "平均绝对值": mean_abs,
        "波形指标": rms / (mean_abs + 1e-12),
        "峭度指标": peak / (rms + 1e-12),
        "脉冲指标": peak / (mean_abs + 1e-12),
        "裕度指标": peak / (sqr_mean ** 2 + 1e-12),
        "间隙指标": peak / (sqr_mean + 1e-12),
        "峭度": kurtosis(x),
        "偏度": skew(x),
    }
    return feats


def freq_features(x, fs, fr):
    """频域特征（转速归一化）"""
    f, Pxx = welch(x, fs=fs, nperseg=2048)
    Pxx = Pxx / np.sum(Pxx)  # 归一化功率谱

    # 转频归一化
    f_norm = f / fr

    feats = {
        "谱质心": np.sum(f_norm * Pxx),
        "谱带宽": np.sqrt(np.sum(((f_norm - np.mean(f_norm)) ** 2) * Pxx)),
        "谱偏度": skew(Pxx),
        "谱峭度": kurtosis(Pxx),
        "谱熵": -np.sum(Pxx * np.log(Pxx + 1e-12)),
    }

    # 带通能量（按 fr 倍频划分）
    bands = [(0.8, 1.2), (1.8, 2.2), (2.8, 3.2), (4.5, 5.5)]
    for i, (lo, hi) in enumerate(bands, 1):
        mask = (f_norm >= lo) & (f_norm <= hi)
        feats[f"带通能量_{i}"] = np.sum(Pxx[mask])
    return feats


def tf_features(x, fs, fr):
    """时频域特征（STFT + 小波能量）"""
    # --- STFT ---
    f, t, Zxx = stft(x, fs=fs, nperseg=1024)
    power = np.abs(Zxx) ** 2
    power = power / (np.sum(power) + 1e-12)

    # 归一化频率
    f_norm = f / fr
    feats = {
        "时频熵": -np.sum(power * np.log(power + 1e-12)),
        "时频均值频率": np.sum(np.mean(power, axis=1) * f_norm),
    }

    # --- 小波分解 ---
    coeffs = pywt.wavedec(x, 'db4', level=4)
    energy = np.array([np.sum(c ** 2) for c in coeffs])
    energy_ratio = energy / (np.sum(energy) + 1e-12)
    for i, e in enumerate(energy_ratio):
        feats[f"小波能量_{i}"] = e
    return feats


# ------------------ 主函数 ------------------ #
def extract_features_from_long(long_table, signal_col="DE_time", fs=32000):
    """提取特征主函数"""
    feature_rows = []
    labels = []

    for fid, group in long_table.groupby("file"):
        x = group[signal_col].dropna().values
        if len(x) == 0:
            continue

        # 获取转速 fr (Hz)
        rpm = group["RPM"].iloc[0]
        fr = (rpm / 60.0) if rpm and rpm > 0 else 1.0

        feats = {}
        feats.update(time_features(x))
        feats.update(freq_features(x, fs, fr))
        feats.update(tf_features(x, fs, fr))

        feats["文件名"] = fid
        feature_rows.append(feats)
        labels.append(group["status"].iloc[0])

    X = pd.DataFrame(feature_rows).set_index("文件名")
    y = pd.Series(labels, index=X.index, name="状态")
    return X, y


def extract_features_balanced_from_long(
        long_table: pd.DataFrame,
        signal_col: str = "DE_time",
        fs: int = 32000,
        target_per_class: int = 77,
        mix_ratio: float = 0.5,
        random_state: int = 42
):
    """样本均衡处理"""
    rng = np.random.default_rng(random_state)
    random.seed(random_state)

    feature_rows = []
    labels = []
    raw_pool = {}

    for fid, group in long_table.groupby("file"):
        x = group[signal_col].dropna().values
        if len(x) == 0:
            continue
        rpm = group["RPM"].iloc[0]
        fr = (rpm / 60.0) if (pd.notna(rpm) and rpm > 0) else 1.0
        status = group["status"].iloc[0]

        feats = {}
        feats.update(time_features(x))
        feats.update(freq_features(x, fs, fr))
        feats.update(tf_features(x, fs, fr))
        feats["文件名"] = fid

        feature_rows.append(feats)
        labels.append(status)
        raw_pool.setdefault(status, []).append((fid, x, rpm))

    X = pd.DataFrame(feature_rows).set_index("文件名")
    y = pd.Series(labels, index=X.index, name="状态")

    counts = y.value_counts()
    need_augment = {
        cls: max(0, target_per_class - cnt)
        for cls, cnt in counts.items()
    }

    aug_feature_rows = []
    aug_labels = []

    for cls, n_needed in need_augment.items():
        if n_needed <= 0:
            continue

        pool = raw_pool.get(cls, [])
        if len(pool) == 0:
            print(f"警告：类别 {cls} 在原始数据中不存在，无法增广。")
            continue
        if len(pool) == 1:
            print(f"提示：类别 {cls} 只有 1 个原始样本，增广将以同一条信号自混合方式进行。")

        for k in range(n_needed):
            (fid_a, xa, rpm_a) = random.choice(pool)
            (fid_b, xb, rpm_b) = random.choice(pool)

            L = min(len(xa), len(xb))
            xa_ = xa[:L]
            xb_ = xb[:L]

            mask = rng.random(L) < mix_ratio
            x_new = np.where(mask, xa_, xb_)

            rpm_new = rpm_a
            fr_new = (rpm_new / 60.0) if (pd.notna(rpm_new) and rpm_new > 0) else 1.0

            feats_new = {}
            feats_new.update(time_features(x_new))
            feats_new.update(freq_features(x_new, fs, fr_new))
            feats_new.update(tf_features(x_new, fs, fr_new))

            new_id = f"{fid_a}__aug_{k + 1:03d}_cls_{cls}"
            feats_new["文件名"] = new_id

            aug_feature_rows.append(feats_new)
            aug_labels.append(cls)

    if len(aug_feature_rows) > 0:
        X_aug = pd.DataFrame(aug_feature_rows).set_index("文件名")
        y_aug = pd.Series(aug_labels, index=X_aug.index, name="状态")
        X_balanced = pd.concat([X, X_aug], axis=0)
        y_balanced = pd.concat([y, y_aug], axis=0)
    else:
        X_balanced, y_balanced = X, y

    print("均衡后各类数量：")
    print(y_balanced.value_counts())

    return X_balanced, y_balanced


# PSO优化决策树类
class SimplePSO_DT:
    def __init__(self, dim, lb, ub, fitness_func, num_particles=20, iters=25,
                 w=0.72, c1=1.49, c2=1.49, seed=2025):
        self.dim = dim
        self.lb = lb
        self.ub = ub
        self.fitness_func = fitness_func
        self.num_particles = num_particles
        self.iters = iters
        self.w = w
        self.c1 = c1
        self.c2 = c2
        self.rng = np.random.default_rng(seed)

        self.positions = self.rng.uniform(lb, ub, size=(num_particles, dim))
        self.velocities = self.rng.uniform(-np.abs(ub - lb), np.abs(ub - lb),
                                           size=(num_particles, dim)) * 0.1

        self.pbest_pos = self.positions.copy()
        self.pbest_val = np.array([fitness_func(p) for p in self.positions])

        self.gbest_idx = int(np.argmax(self.pbest_val))
        self.gbest_pos = self.pbest_pos[self.gbest_idx].copy()
        self.gbest_val = float(self.pbest_val[self.gbest_idx])

    def optimize(self):
        t0 = time.time()
        for it in range(1, self.iters + 1):
            r1 = self.rng.random((self.num_particles, self.dim))
            r2 = self.rng.random((self.num_particles, self.dim))
            self.velocities = (
                    self.w * self.velocities
                    + self.c1 * r1 * (self.pbest_pos - self.positions)
                    + self.c2 * r2 * (self.gbest_pos - self.positions)
            )
            self.positions = self.positions + self.velocities
            self.positions = np.minimum(np.maximum(self.positions, self.lb), self.ub)

            vals = np.array([self.fitness_func(p) for p in self.positions])

            improved = vals > self.pbest_val
            self.pbest_pos[improved] = self.positions[improved]
            self.pbest_val[improved] = vals[improved]

            if self.pbest_val.max() > self.gbest_val:
                self.gbest_idx = int(np.argmax(self.pbest_val))
                self.gbest_pos = self.pbest_pos[self.gbest_idx].copy()
                self.gbest_val = float(self.pbest_val[self.gbest_idx])

            if it % 5 == 0 or it == self.iters:
                print(f"[PSO] 迭代 {it:02d}/{self.iters}，当前最优 F1_macro = {self.gbest_val:.4f}")

        t1 = time.time()
        print(f"[PSO] 完成。耗时 {t1 - t0:.1f}s")
        return self.gbest_pos, self.gbest_val


def main():
    # 读取数据
    long_table = pd.read_csv(r'E:\研究生\数学建模\2025研究生题\新\expanded_long_table.csv')
    print("数据读取完成")

    # 提取特征
    X, y = extract_features_from_long(long_table, signal_col="DE_time", fs=32000)
    print(f"特征提取完成，形状: {X.shape}, {y.shape}")
    print(X.head())

    # 标签分布统计
    print("\n标签分布：")
    dist = y.value_counts()
    ratio = y.value_counts(normalize=True)
    print(pd.DataFrame({"数量": dist, "比例": ratio}))

    # 绘制标签分布柱状图
    plt.figure(figsize=(6, 4))
    ax = dist.plot(kind="bar")
    plt.xlabel("状态")
    plt.ylabel("数量")
    plt.title("状态分布统计")
    plt.xticks(rotation=45)

    for i, v in enumerate(dist):
        ax.text(i, v + 0.5, str(v), ha='center', va='bottom')
    plt.tight_layout()
    plt.show()

    # 样本均衡处理
    X_bal, y_bal = extract_features_balanced_from_long(
        long_table,
        signal_col="DE_time",
        fs=32000,
        target_per_class=77,
        mix_ratio=0.5,
        random_state=42
    )
    print("样本均衡后数量：")
    print(y_bal.value_counts())

    # 特征选择
    estimator = DecisionTreeClassifier(random_state=42)
    selector = RFE(estimator, n_features_to_select=20, step=1)
    selector.fit(X_bal, y_bal)
    selected_features = X_bal.columns[selector.support_]
    print("\n选择的特征：")
    print(selected_features)
    X_selected = X_bal[selected_features]
    print(f"原始特征维度：{X_bal.shape[1]}")
    print(f"筛选后特征维度：{X_selected.shape[1]}")

    # 数据划分
    X_train, X_test, y_train, y_test = train_test_split(
        X_selected, y_bal, test_size=0.3, random_state=42, stratify=y_bal
    )

    # 决策树模型（未优化）
    print("\n----- 决策树（未优化）-----")
    clf_dt = DecisionTreeClassifier(random_state=42)
    clf_dt.fit(X_train, y_train)
    y_pred_dt = clf_dt.predict(X_test)
    y_pred_proba_dt = clf_dt.predict_proba(X_test)

    # 绘制ROC曲线
    classes = np.unique(y_bal)
    y_test_bin = label_binarize(y_test, classes=classes)
    n_classes = y_test_bin.shape[1]

    plt.figure(figsize=(6, 5))
    for i in range(n_classes):
        color = ROC_COLORS[i % len(ROC_COLORS)]
        fpr, tpr, _ = roc_curve(y_test_bin[:, i], y_pred_proba_dt[:, i])
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, lw=2, label=f"{classes[i]} (AUC = {roc_auc:.2f})", color=color)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel("假阳率 (FPR)")
    plt.ylabel("真正率 (TPR)")
    plt.title("决策树多分类 ROC 曲线")
    plt.legend(loc="lower right")
    plt.tight_layout()
    plt.show()

    # 混淆矩阵（橙色热力图）
    cm = confusion_matrix(y_test, y_pred_dt, labels=classes)
    plt.figure(figsize=(6, 5))
    sns.heatmap(cm, annot=True, fmt="d", cmap=HEATMAP_CMAP,  # 使用橙色系映射
                xticklabels=classes, yticklabels=classes)
    plt.xlabel("预测标签")
    plt.ylabel("真实标签")
    plt.title("决策树混淆矩阵热力图")
    plt.tight_layout()
    plt.show()

    # F1报告
    print("决策树 F1 分类报告：")
    print(classification_report(y_test, y_pred_dt, target_names=classes))

    # PSO优化决策树
    print("\n----- PSO优化决策树 -----")
    X_tr, X_val, y_tr, y_val = X_train, X_test, y_train, y_test

    # 定义搜索空间
    SPACE = {
        "max_depth": (3, 50),
        "min_samples_split": (2, 20),
        "min_samples_leaf": (1, 20),
        "criterion": [0, 1]
    }

    def decode_particle(vec):
        (md_lo, md_hi) = SPACE["max_depth"]
        (mss_lo, mss_hi) = SPACE["min_samples_split"]
        (msl_lo, msl_hi) = SPACE["min_samples_leaf"]

        params = {
            "max_depth": int(round(np.clip(vec[0], md_lo, md_hi))),
            "min_samples_split": int(round(np.clip(vec[1], mss_lo, mss_hi))),
            "min_samples_leaf": int(round(np.clip(vec[2], msl_lo, msl_hi))),
            "criterion": "gini" if vec[3] < 0.5 else "entropy",
            "random_state": 42
        }
        params["min_samples_split"] = max(params["min_samples_split"], params["min_samples_leaf"])
        return params

    def evaluate_particle(vec):
        params = decode_particle(vec)
        clf_pso = DecisionTreeClassifier(**params)
        clf_pso.fit(X_tr, y_tr)
        pred_val = clf_pso.predict(X_val)
        f1 = f1_score(y_val, pred_val, average="macro")
        return f1

    # PSO参数
    NUM_DIM = 4
    LOWS = np.array([SPACE["max_depth"][0],
                     SPACE["min_samples_split"][0],
                     SPACE["min_samples_leaf"][0],
                     SPACE["criterion"][0]])
    HIGHS = np.array([SPACE["max_depth"][1],
                      SPACE["min_samples_split"][1],
                      SPACE["min_samples_leaf"][1],
                      SPACE["criterion"][1]])

    # 运行PSO
    pso_dt = SimplePSO_DT(
        dim=NUM_DIM,
        lb=LOWS,
        ub=HIGHS,
        fitness_func=evaluate_particle,
        num_particles=20,
        iters=25,
        w=0.72,
        c1=1.49,
        c2=1.49,
        seed=2025
    )



    print(f"[PSO] 初始最优 F1_macro = {pso_dt.gbest_val:.4f}，超参数 = {decode_particle(pso_dt.gbest_pos)}")
    best_pos_dt, best_val_dt = pso_dt.optimize()
    best_params_dt = decode_particle(best_pos_dt)
    print("[PSO] 最优超参数：", best_params_dt)
    print(best_params_dt['max_depth'])
    print(best_params_dt['min_samples_split'])
    print(best_params_dt['min_samples_leaf'])

    # 用最优超参数重训
    clf_best_dt = DecisionTreeClassifier(**best_params_dt)
    clf_best_dt.fit(X_train, y_train)
    y_pred_best_dt = clf_best_dt.predict(X_test)
    y_proba_best_dt = clf_best_dt.predict_proba(X_test)

    f1_macro_test = f1_score(y_test, y_pred_best_dt, average="macro")
    print(f"[PSO] 最优模型在测试集上的 F1_macro：{f1_macro_test:.4f}")

    # 绘制优化后的ROC曲线
    plt.figure(figsize=(6, 5))
    for i in range(n_classes):
        color = ROC_COLORS[i % len(ROC_COLORS)]
        fpr, tpr, _ = roc_curve(y_test_bin[:, i], y_proba_best_dt[:, i])
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, lw=2, label=f"{classes[i]} (AUC = {roc_auc:.2f})", color=color)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel("假阳率 (FPR)")
    plt.ylabel("真正率 (TPR)")
    plt.title("PSO优化决策树多分类 ROC 曲线")
    plt.legend(loc="lower right")
    plt.tight_layout()
    plt.show()

    # 混淆矩阵（橙色热力图）
    cm_best = confusion_matrix(y_test, y_pred_best_dt, labels=classes)
    plt.figure(figsize=(6, 5))
    sns.heatmap(cm_best, annot=True, fmt="d", cmap=HEATMAP_CMAP,  # 使用橙色系映射
                xticklabels=classes, yticklabels=classes)
    plt.xlabel("预测标签")
    plt.ylabel("真实标签")
    plt.title("PSO优化决策树混淆矩阵热力图")
    plt.tight_layout()
    plt.show()

    # F1报告
    print("PSO优化决策树 F1 分类报告：")
    print(classification_report(y_test, y_pred_best_dt, target_names=classes))


if __name__ == "__main__":
    main()
