import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout,BatchNormalization
import Data_preprocessing
import random
import h5py
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import EarlyStopping,ReduceLROnPlateau
from sklearn.metrics import confusion_matrix, classification_report
import os


# 数据生成器
def data_generator(file_names, batch_size):
    while True:  # 无限循环以持续生成数据
        random.shuffle(file_names)  # 打乱文件顺序以引入随机性
        for file_name in file_names:
            with h5py.File(file_name, 'r') as f:
                data = f['eeg']['eeg_data'][:]
                labels = f['eeg']['stage_labels'][:]
                indices = np.arange(len(data))
                np.random.shuffle(indices)  # 打乱索引
                for i in range(0, len(data), batch_size):
                    batch_indices = indices[i:i + batch_size]
                    yield data[batch_indices], labels[batch_indices]

# 计算steps_per_epoch
def calculate_steps_per_epoch(file_names, batch_size):
    total_samples = 0
    for file_name in file_names:
        with h5py.File(file_name, 'r') as f:
            total_samples += f['eeg']['eeg_data'].shape[0]
    return total_samples // batch_size

# 定义CNN模型
def create_cnn_model(input_shape):
    model = Sequential([
        Conv1D(filters=16, kernel_size=7, activation='relu', input_shape=input_shape, padding='same'),
        BatchNormalization(),
        MaxPooling1D(pool_size=3),
        Dropout(0.3),
        Conv1D(filters=64, kernel_size=7, activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling1D(pool_size=3),
        Dropout(0.3),
        Conv1D(filters=128, kernel_size=7, activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling1D(pool_size=3),
        Dropout(0.3),
        Flatten(),
        Dense(128, activation='relu', kernel_regularizer=l2(0.001)),
        BatchNormalization(),
        Dropout(0.4),
        Dense(5, activation='softmax')
    ])
    return model
# 主函数

def CNN():
    print("#######数据准备中#######")
    batch_size = 32
    #分割数据集 训练集 验证集 测试集 64 16 20
    train_files, test_files = train_test_split(Data_preprocessing.file_names, test_size=0.2, random_state=42)
    train_files, val_files = train_test_split(train_files, test_size=0.2, random_state=42)
    #数据生成器
    train_gen = data_generator(train_files, batch_size)
    val_gen = data_generator(val_files, batch_size)
    test_gen = data_generator(test_files, batch_size)
    #
    train_steps = calculate_steps_per_epoch(train_files, batch_size)
    val_steps = calculate_steps_per_epoch(val_files, batch_size)
    test_steps = calculate_steps_per_epoch(test_files, batch_size)
    if not os.path.exists('sleep_stage_cnn_model.h5'):
        # 创建CNN模型
        model = create_cnn_model((3000, 1))
        # 编译模型
        opt = Adam(learning_rate=0.001, beta_1=0.9)
        model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        # 打印模型概况
        model.summary()
        # 训练模型
        model.fit(
            x=train_gen,
            steps_per_epoch=train_steps,
            epochs=200,
            validation_data=val_gen,
            validation_steps=val_steps
        )
        test_loss, test_accuracy = model.evaluate(test_gen, steps=test_steps)
        print(f'\n测试集上的损失: {test_loss}, 准确率: {test_accuracy}')
        # 如果需要保存模型
        if not os.path.exists('sleep_stage_cnn_model.h5'):
            model.save('sleep_stage_cnn_model.h5')
        else:
            print("模型已存在")
    else:
        print("模型已存在")
        model = tf.keras.models.load_model('sleep_stage_cnn_model.h5')
        # 生成预测并计算指标
        print("评估模型并生成混淆矩阵及分类报告...")
        # 获取测试数据和标签
        test_data = []
        test_labels = []
        for _ in range(test_steps):
            data_batch, labels_batch = next(test_gen)
            test_data.append(data_batch)
            test_labels.append(labels_batch)
        test_data = np.concatenate(test_data, axis=0)
        test_labels = np.concatenate(test_labels, axis=0)
        # 预测测试集
        test_predictions = model.predict(test_data)
        test_pred_classes = np.argmax(test_predictions, axis=1)
        # 生成混淆矩阵
        conf_matrix = confusion_matrix(test_labels, test_pred_classes)
        print("混淆矩阵:")
        print(conf_matrix)
        # 生成分类报告
        class_report = classification_report(test_labels, test_pred_classes, target_names=['Wake', 'N1', 'N2', 'N3', 'REM'])
        print("分类报告:")
        print(class_report)


# 调用主函数
if __name__ == "__main__":
    CNN()














