# import numpy as np
# from sklearn.model_selection import train_test_split, KFold
# from sklearn.preprocessing import StandardScaler
# from sklearn.utils import compute_class_weight
# import os
# import tensorflow as tf
# from tensorflow.keras.models import Sequential, load_model
# from tensorflow.keras.layers import LSTM, Dense, Dropout, Conv1D, MaxPooling1D, Flatten, BatchNormalization, Bidirectional
# from tensorflow.keras.regularizers import l2
# from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
# import matplotlib.pyplot as plt
# import seaborn as sns
# from sklearn.metrics import confusion_matrix
#
# # 强制使用CPU
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#
# # 定义文件目录
# file_dir = r'E:\EEG\EEG-TransNet-main\data\dataset\bci_iv_2a\\'
#
# # 处理真实数据文件
# realfile = ['A0' + str(i) + 'T_data.npy' for i in range(1, 10)]
# fakefile = []
# for i in range(1, 10):
#     fakefile.append(['A0' + str(i) + 'Efake_eeg_' + str(j) + '.npy' for j in range(10)])
#
# # 初始化空列表用于存储数据和标签
# real_data = []
# real_labels = []
# fake_data = []
# fake_labels = []
#
# # 加载真实数据并进行转换
# for i, file_name in enumerate(realfile):
#     file_path = os.path.join(file_dir, file_name)
#     data = np.load(file_path)
#
#     # 将 (288, 22, 1125) 转换为 288 个 (1, 22, 1125)
#     split_data = [data[i:i + 1] for i in range(data.shape[0])]
#
#     # 添加到 real_data 和对应的标签到 real_labels
#     real_data.extend(split_data)
#     real_labels.extend([i] * len(split_data))  # 标签从0开始
#
# # 加载伪造数据
# for i in range(len(fakefile)):
#     for j in range(len(fakefile[i])):
#         file_name = fakefile[i][j]
#         file_path = os.path.join(file_dir, file_name)
#         data = np.load(file_path)
#
#         # 添加到 fake_data 和对应的标签到 fake_labels
#         fake_data.append(data)
#         fake_labels.append(i)  # 标签从0开始
#
# # 将列表转换为NumPy数组
# real_data = np.array(real_data)
# real_labels = np.array(real_labels)
# fake_data = np.array(fake_data)
# fake_labels = np.array(fake_labels)
#
# print(f"Total number of real samples: {len(real_data)}")
# print(f"Shape of each real sample: {real_data[0].shape}")
# print(f"Total number of fake samples: {len(fake_data)}")
# print(f"Shape of each fake sample: {fake_data[0].shape}")
#
# # 数据标准化
# scaler = StandardScaler()
#
# # 展平数据以便进行标准化处理
# real_data_flat = real_data.reshape((real_data.shape[0], -1))
# fake_data_flat = fake_data.reshape((fake_data.shape[0], -1))
#
# real_data_scaled = scaler.fit_transform(real_data_flat)
# fake_data_scaled = scaler.transform(fake_data_flat)
#
# real_data = real_data_scaled.reshape(real_data.shape[0], 22, 1125)
# fake_data = fake_data_scaled.reshape(fake_data.shape[0], 22, 1125)
#
# # 分割真实数据集为训练集、验证集和测试集
# X_train_real, X_test_real, y_train_real, y_test_real = train_test_split(
#     real_data, real_labels, test_size=0.2, random_state=42
# )
#
# # 合并真实数据的训练集和伪造数据作为最终的训练集
# X_train = np.concatenate([X_train_real, fake_data])
# y_train = np.concatenate([y_train_real, fake_labels])
#
# # 确保输入数据的形状为 (batch_size, timesteps, features)
# X_train = X_train.reshape(X_train.shape[0], 22, 1125)
# X_test = X_test_real.reshape(X_test_real.shape[0], 22, 1125)
#
# # 数据增强函数
# def add_noise(data, noise_factor=0.01):
#     noise = noise_factor * np.random.normal(size=data.shape)
#     return data + noise
#
# # 应用噪声注入
# X_train = add_noise(X_train)
#
# # 构建增强后的LSTM模型
# def create_model(input_shape):
#     model = Sequential([
#         Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=input_shape),
#         BatchNormalization(),
#         MaxPooling1D(pool_size=2),
#         Dropout(0.5),
#
#         Bidirectional(LSTM(units=128, return_sequences=True, kernel_regularizer=l2(0.001))),
#         Dropout(0.5),
#
#         Bidirectional(LSTM(units=128, kernel_regularizer=l2(0.001))),
#         Dropout(0.5),
#
#         Dense(units=128, activation='relu', kernel_regularizer=l2(0.001)),
#         Dropout(0.5),
#
#         Dense(units=9, activation='softmax', kernel_regularizer=l2(0.001))
#     ])
#
#     optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
#     model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#     return model
#
# # 使用早停法和学习率调整防止过拟合
# early_stopping = EarlyStopping(
#     monitor='val_loss',
#     patience=10,
#     restore_best_weights=True
# )
#
# reduce_lr = ReduceLROnPlateau(
#     monitor='val_loss',
#     factor=0.1,
#     patience=5,
#     min_lr=1e-6
# )
#
# # 定义余弦退火学习率调度器
# def cosine_annealing(epoch, lr):
#     initial_lr = 0.001
#     lr_min = 0.0001
#     T_max = 50
#     eta_min = lr_min
#     eta_max = initial_lr
#     lr = eta_min + 0.5 * (eta_max - eta_min) * (1 + np.cos(np.pi * (epoch % T_max) / T_max))
#     return lr
#
# lr_scheduler = LearningRateScheduler(cosine_annealing)
#
# # K折交叉验证
# kf = KFold(n_splits=5)
# best_val_acc = 0
# best_model = None
#
# for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
#     print(f"Fold {fold + 1}")
#     X_train_fold, X_val_fold = X_train[train_idx], X_train[val_idx]
#     y_train_fold, y_val_fold = y_train[train_idx], y_train[val_idx]
#
#     # 计算类别权重
#     class_weights = compute_class_weight('balanced', classes=np.unique(y_train_fold), y=y_train_fold)
#     class_weight_dict = dict(enumerate(class_weights))
#
#     model = create_model(input_shape=(22, 1125))
#     history = model.fit(
#         X_train_fold, y_train_fold,
#         epochs=150,
#         batch_size=32,
#         validation_data=(X_val_fold, y_val_fold),
#         callbacks=[early_stopping, reduce_lr, lr_scheduler],
#         class_weight=class_weight_dict,  # 使用类别权重
#         verbose=1
#     )
#
#     # 绘制训练过程中的损失和准确率变化
#     plt.figure(figsize=(12, 4))
#     plt.subplot(1, 2, 1)
#     plt.plot(history.history['loss'], label='train_loss')
#     plt.plot(history.history['val_loss'], label='val_loss')
#     plt.title('Model Loss')
#     plt.ylabel('Loss')
#     plt.xlabel('Epoch')
#     plt.legend()
#
#     plt.subplot(1, 2, 2)
#     plt.plot(history.history['accuracy'], label='train_accuracy')
#     plt.plot(history.history['val_accuracy'], label='val_accuracy')
#     plt.title('Model Accuracy')
#     plt.ylabel('Accuracy')
#     plt.xlabel('Epoch')
#     plt.legend()
#
#     plt.show()
#
#     # 模型评估
#     val_loss, val_acc = model.evaluate(X_val_fold, y_val_fold)
#     print(f"Validation accuracy: {val_acc:.4f}")
#
#     if val_acc > best_val_acc:
#         best_val_acc = val_acc
#         best_model = model
#
# # 保存最佳模型
# model_path = 'shibie.h5'
# best_model.save(model_path)
# print(f"Best model saved to {model_path}")
#
import os
import numpy as np
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.utils import compute_class_weight
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout, Conv1D, MaxPooling1D, Flatten, BatchNormalization, Bidirectional
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix

# 强制使用CPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# 定义文件目录
file_dir = r'E:\EEG\EEG-TransNet-main\data\dataset\bci_iv_2a\\'

# 处理真实数据文件
realfile = ['A0' + str(i) + 'T_data.npy' for i in range(1, 10)]
fakefile = []
for i in range(1, 10):
    fakefile.append(['A0' + str(i) + 'Efake_eeg_' + str(j) + '.npy' for j in range(10)])

# 初始化空列表用于存储数据和标签
real_data = []
real_labels = []
fake_data = []
fake_labels = []

# 加载真实数据并进行转换
for i, file_name in enumerate(realfile):
    file_path = os.path.join(file_dir, file_name)
    data = np.load(file_path)

    # 将 (288, 22, 1125) 转换为 288 个 (1, 22, 1125)
    split_data = [data[i:i + 1] for i in range(data.shape[0])]

    # 添加到 real_data 和对应的标签到 real_labels
    real_data.extend(split_data)
    real_labels.extend([i] * len(split_data))  # 标签从0开始

# 加载伪造数据
for i in range(len(fakefile)):
    for j in range(len(fakefile[i])):
        file_name = fakefile[i][j]
        file_path = os.path.join(file_dir, file_name)
        data = np.load(file_path)

        # 添加到 fake_data 和对应的标签到 fake_labels
        fake_data.append(data)
        fake_labels.append(i)  # 标签从0开始

# 将列表转换为NumPy数组
real_data = np.array(real_data)
real_labels = np.array(real_labels)
fake_data = np.array(fake_data)
fake_labels = np.array(fake_labels)

print(f"Total number of real samples: {len(real_data)}")
print(f"Shape of each real sample: {real_data[0].shape}")
print(f"Total number of fake samples: {len(fake_data)}")
print(f"Shape of each fake sample: {fake_data[0].shape}")

# 数据标准化
scaler = StandardScaler()

# 展平数据以便进行标准化处理
real_data_flat = real_data.reshape((real_data.shape[0], -1))
fake_data_flat = fake_data.reshape((fake_data.shape[0], -1))

real_data_scaled = scaler.fit_transform(real_data_flat)
fake_data_scaled = scaler.transform(fake_data_flat)

real_data = real_data_scaled.reshape(real_data.shape[0], 22, 1125)
fake_data = fake_data_scaled.reshape(fake_data.shape[0], 22, 1125)

# 保存标准缩放器的所有参数
np.save('scaler_mean.npy', scaler.mean_)
np.save('scaler_var.npy', scaler.var_)
np.save('scaler_scale.npy', scaler.scale_)

# 分割真实数据集为训练集、验证集和测试集
X_train_real, X_test_real, y_train_real, y_test_real = train_test_split(
    real_data, real_labels, test_size=0.2, random_state=42
)

# 合并真实数据的训练集和伪造数据作为最终的训练集
X_train = np.concatenate([X_train_real, fake_data])
y_train = np.concatenate([y_train_real, fake_labels])

# 确保输入数据的形状为 (batch_size, timesteps, features)
X_train = X_train.reshape(X_train.shape[0], 22, 1125)
X_test = X_test_real.reshape(X_test_real.shape[0], 22, 1125)

# 数据增强函数
def add_noise(data, noise_factor=0.01):
    noise = noise_factor * np.random.normal(size=data.shape)
    return data + noise

# 应用噪声注入
X_train = add_noise(X_train)
# 构建增强后的LSTM模型
def create_model(input_shape):
    model = Sequential([
        Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=input_shape),
        BatchNormalization(),
        MaxPooling1D(pool_size=2),
        Dropout(0.5),

        Bidirectional(LSTM(units=128, return_sequences=True, kernel_regularizer=l2(0.001))),
        Dropout(0.5),

        Bidirectional(LSTM(units=128, kernel_regularizer=l2(0.001))),
        Dropout(0.5),

        Dense(units=128, activation='relu', kernel_regularizer=l2(0.001)),
        Dropout(0.5),

        Dense(units=9, activation='softmax', kernel_regularizer=l2(0.001))
    ])

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    return model

# 使用早停法和学习率调整防止过拟合
early_stopping = EarlyStopping(
    monitor='val_loss',
    patience=10,
    restore_best_weights=True
)

reduce_lr = ReduceLROnPlateau(
    monitor='val_loss',
    factor=0.1,
    patience=5,
    min_lr=1e-6
)

# 定义余弦退火学习率调度器
def cosine_annealing(epoch, lr):
    initial_lr = 0.001
    lr_min = 0.0001
    T_max = 50
    eta_min = lr_min
    eta_max = initial_lr
    lr = eta_min + 0.5 * (eta_max - eta_min) * (1 + np.cos(np.pi * (epoch % T_max) / T_max))
    return lr

lr_scheduler = LearningRateScheduler(cosine_annealing)

# K折交叉验证
kf = KFold(n_splits=5)
best_val_acc = 0
best_model = None

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    print(f"Fold {fold + 1}")
    X_train_fold, X_val_fold = X_train[train_idx], X_train[val_idx]
    y_train_fold, y_val_fold = y_train[train_idx], y_train[val_idx]

    # 计算类别权重
    class_weights = compute_class_weight('balanced', classes=np.unique(y_train_fold), y=y_train_fold)
    class_weight_dict = dict(enumerate(class_weights))

    model = create_model(input_shape=(22, 1125))
    history = model.fit(
        X_train_fold, y_train_fold,
        epochs=150,
        batch_size=32,
        validation_data=(X_val_fold, y_val_fold),
        callbacks=[early_stopping, reduce_lr, lr_scheduler],
        class_weight=class_weight_dict,  # 使用类别权重
        verbose=1
    )

    # 绘制训练过程中的损失和准确率变化
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(history.history['loss'], label='train_loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.title('Model Loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(history.history['accuracy'], label='train_accuracy')
    plt.plot(history.history['val_accuracy'], label='val_accuracy')
    plt.title('Model Accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend()

    plt.show()

    # 模型评估
    val_loss, val_acc = model.evaluate(X_val_fold, y_val_fold)
    print(f"Validation accuracy: {val_acc:.4f}")

    if val_acc > best_val_acc:
        best_val_acc = val_acc
        best_model = model

# 保存最佳模型
model_path = 'shibie.h5'
best_model.save(model_path)
print(f"Best model saved to {model_path}")