# =============== 初始化/依赖导入 ===============
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets.sleep_physionet.age import fetch_data
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer

# =============== 数据处理模块 ===============
# --- 数据加载 ---
ALICE, BOB = 0, 1  # 受试者编号（实际可用范围0-82）
[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])

# --- 数据预处理 ---
mapping = {
    'EOG horizontal': 'eog',
    'Resp oro-nasal': 'misc',
    'EMG submental': 'misc',
    'Temp rectal': 'misc',
    'Event marker': 'misc'
}#将数据中通道映射成mne能识别的数据结构

# 训练集处理
raw_train = mne.io.read_raw_edf(alice_files[0])  # EDF原始数据 返回一个mne.raw实例
annot_train = mne.read_annotations(alice_files[1])  # 注释文件
#raw里的sset_annotations方法 （要设置的注释，在设置时是否警告） 返回带注释raw
raw_train.set_annotations(annot_train, emit_warning=False)
#（映射字典） 返回修改后的实例
raw_train.set_channel_types(mapping)

# 可视化原始信号
raw_train.plot(duration=40, scalings='auto')
plt.show(block = False)

# --- 事件处理 ---
annotation_desc_2_event_id : dict = {
    'Sleep stage W': 1, 'Sleep stage 1': 2,
    'Sleep stage 2': 3, 'Sleep stage 3': 4,
    'Sleep stage 4': 4,
    'Sleep stage R': 5
}
'''
此外，记录中包含在每次夜间睡眠前后很长的清醒（W）时间段。
为了限制类别不平衡的影响，我们剪裁每个记录，
只保留醒前30min 睡着后30min。
'''
events_train, _ = mne.events_from_annotations(
    raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)

# 事件ID重构
event_id = {
    'Sleep stage W': 1, 'Sleep stage 1': 2,
    'Sleep stage 2': 3, 'Sleep stage 3/4': 4,  # 合并阶段3/4
    'Sleep stage R': 5
}

# 事件可视化
mne.viz.plot_events(events_train, event_id=event_id,
                    sfreq=raw_train.info['sfreq'])

# --- 特征提取 ---
stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']  # 颜色定义（后续未充分使用）
#这里-1是因为时刻从0开始（mne.Epochs里的tmin=0.）把连续的数据去切成每30一小段（epoch）
tmax = 30. - 1. / raw_train.info['sfreq']  # 时间窗口计算 sfreqq是采样频率单位hz

# 创建Epochs对象
epochs_train = mne.Epochs(
    raw=raw_train,
    events=events_train,
    event_id=event_id,
    tmin=0.,
    tmax=tmax,
    baseline=None
)

# 测试集处理
raw_test = mne.io.read_raw_edf(bob_files[0])
annot_test = mne.read_annotations(bob_files[1])
raw_test.set_annotations(annot_test, emit_warning=False)
raw_test.set_channel_types(mapping)
events_test, _ = mne.events_from_annotations(
    raw_test,
    event_id=annotation_desc_2_event_id,
    chunk_duration=30.
)
epochs_test = mne.Epochs(
    raw=raw_test,
    events=events_test,
    event_id=event_id,
    tmin=0.,
    tmax=tmax,
    baseline=None
)


# =============== 训练模块 ===============
def eeg_power_band(epochs):
    """特征工程函数"""
    # 定义脑电波频段范围（单位：赫兹Hz）
    FREQ_BANDS = {
        "delta": [0.5, 4.5],#深度睡眠
        "theta": [4.5, 8.5],#浅睡
        "alpha": [8.5, 11.5],#清醒
        "sigma": [11.5, 15.5],#睡眠纺锤波
        "beta": [15.5, 30]#活跃思考
    }
    # 计算功率谱密度（PSD）
    # picks="eeg" 表示只处理EEG通道（忽略EOG等其他信号）
    spectrum = epochs.compute_psd(picks="eeg", fmin=0.5, fmax=30.0)
    # 获取功率值和对应频率点
    # psds形状：(epoch数量, 通道数, 频率点数)
    # freqs形状：(频率点数, )
    psds, freqs = spectrum.get_data(return_freqs=True)
    # 归一化处理：让每个频段功率值变为相对百分比
    # 例如：delta占20%，theta占30%...
    psds /= np.sum(psds, axis=-1, keepdims=True)  # 归一化处理

    X = []
    # 遍历每个频段提取特征
    for fmin, fmax in FREQ_BANDS.values():
        # 提取当前频段的功率平均值
        # psds_band参数：(epoch数, 通道数)
        psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1)
        # 展平为二维数组：(epoch数, 通道数*1)
        # 例如：5个通道 → 5个特征
        X.append(psds_band.reshape(len(psds), -1))
    # 合并所有频段特征 → 最终形状：(epoch数, 5个频段*5个通道=25个特征)
    return np.concatenate(X, axis=1)


# 构建管道
pipe = make_pipeline(
    FunctionTransformer(eeg_power_band, validate=False),
    # 随机森林分类器
    RandomForestClassifier(n_estimators=100, random_state=42)
)

# 模型训练
y_train = epochs_train.events[:, 2]  # 获取标签列
pipe.fit(epochs_train, y_train)

# =============== 结果对比模块 ===============
# --- 预测与评估 ---
y_pred = pipe.predict(epochs_test)
y_test = epochs_test.events[:, 2]

# 数值结果
acc = accuracy_score(y_test, y_pred)
print("Accuracy score: {}".format(acc))
print(classification_report(y_test, y_pred, target_names=event_id.keys()))

# --- 可视化对比 ---
# PSD对比图（属于结果分析部分）
fig, (ax1, ax2) = plt.subplots(ncols=2)
stages = sorted(event_id.keys())

for ax, title, epochs in zip([ax1, ax2], ['Alice', 'Bob'], [epochs_train, epochs_test]):
    for stage, color in zip(stages, stage_colors):
        epochs[stage].plot_psd(
            area_mode=None, color=color, ax=ax,
            fmin=0.1, fmax=20., show=False,
            average=True, spatial_colors=False
        )
    ax.set(title=title, xlabel='Frequency (Hz)')

ax2.set(ylabel='uV^2/hz (dB)')
ax2.legend(ax2.lines[2::3], stages)
plt.tight_layout()
plt.show()
# 评估准确率
y_test = epochs_test.events[:, 2]
acc = accuracy_score(y_test, y_pred)
print("Accuracy score: {}".format(acc))
print(classification_report(y_test, y_pred, target_names=event_id.keys()))