import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv1D, MaxPooling1D, LSTM, Dense, Dropout, Flatten,Input,concatenate
import tensorflow.keras as keras
import warnings
warnings.filterwarnings('ignore')


def construct_dual_branch_model(signal_length):
    # 第一个分支
    branch1_input = Input(shape=(signal_length, 1))
    x1 = Conv1D(filters=64, kernel_size=16, strides=1, activation='relu')(branch1_input)
    x1 = MaxPooling1D(pool_size=5, strides=5)(x1)
    x1 = Dropout(rate=0.2)(x1)
    x1 = Conv1D(filters=128, kernel_size=13, strides=1, activation='relu')(x1)
    x1 = MaxPooling1D(pool_size=5, strides=5)(x1)
    x1 = LSTM(units=32, return_sequences=True)(x1)
    x1 = Flatten()(x1)

    # 第二个分支，结构与第一个分支相同
    branch2_input = Input(shape=(signal_length, 1))
    x2 = Conv1D(filters=64, kernel_size=16, strides=1, activation='relu')(branch2_input)
    x2 = MaxPooling1D(pool_size=5, strides=5)(x2)
    x2 = Dropout(rate=0.2)(x2)
    x2 = Conv1D(filters=128, kernel_size=13, strides=1, activation='relu')(x2)
    x2 = MaxPooling1D(pool_size=5, strides=5)(x2)
    x2 = LSTM(units=32, return_sequences=True)(x2)
    x2 = Flatten()(x2)

    # 合并两个分支的特征
    merged = concatenate([6*x1+4*x2])

    # 融合后的特征通过全连接层生成最终决策
    x = Dense(units=64, activation='relu')(merged)
    x = Dropout(rate=0.2)(x)
    output = Dense(units=1, activation='sigmoid')(x)

    # 创建模型
    model = Model(inputs=[branch1_input, branch2_input], outputs=output)
    model.compile(loss=keras.losses.binary_crossentropy, optimizer='Adam', metrics=['acc'])
    return model

# Function to load the data (use second lead)
def load_data(pathname, file_numbers):
    train_data = []

    train_data_ecg = []
    for i in range(1, file_numbers + 1):
        dir_number = f'{i:02d}'
        path = pathname + dir_number + '/eegsignals.csv'
        path1 = pathname + dir_number + '/ecgsignals.csv'
        raw_eeg = np.loadtxt(path, delimiter=',')[:, 0]  # Use the second lead (index 1)
        raw_ecg = np.loadtxt(path1)
        train_data.append(raw_eeg)
        train_data_ecg.append(raw_ecg)
    return train_data,train_data_ecg

# Function to split the data
def split_data1(pathname, fs, split_second, labels, file_numbers):
    label_numbers = file_numbers
    if file_numbers < 10:
        file_numbers = '0' + str(file_numbers)
    else:
        file_numbers = str(file_numbers)
    file_numbers += '/'
    path = pathname + file_numbers + '/eegsignals.csv'
    path1 = pathname + file_numbers + '/ecgsignals.csv'
    data = np.loadtxt(path, delimiter=',')[:, 0]  # Use the second lead (index 1)
    data1 = np.loadtxt(path1)
    cur_label = labels[label_numbers - 1]
    data_segment = []
    data_segment1 = []
    label = []
    loop_numbers = int(data.shape[0] / (fs * split_second))
    for i in range(loop_numbers):
        begin_index = i * (fs * split_second)
        end_index = (i + 1) * (fs * split_second)
        if end_index > data.shape[0]:
            continue
        temp_segment = data[begin_index:end_index].reshape(-1, 1)
        data_segment.append(temp_segment)
        temp_segment1 = data1[begin_index:end_index].reshape(-1, 1)
        data_segment1.append(temp_segment1)
        label.append(cur_label)
    data_segment = np.array(data_segment)
    data_segment1 = np.array(data_segment1)
    label = np.array(label)
    return data_segment,data_segment1, label

# Training and testing paths
pathname1 = 'D:/BaiduNetdiskDownload/dataset_BME_depression/traindata/'
pathname2 = 'D:/BaiduNetdiskDownload/dataset_BME_depression/testdata/'



# Set labels
train_label = np.array([1] * 22 + [0] * 8)
test_label = np.array([1] * 30)  # Test labels

# Split training data
fs = 250
split_second = 6
X_train = np.ones((1, fs * split_second, 2))
y_train = np.ones((1, 1)).reshape(-1,)

for i in range(1, 31):
    data_set,data1_set, data_label = split_data1(pathname1, fs, split_second, train_label, i)
    X_train = np.concatenate((X_train, np.concatenate((data_set,data1_set),axis=-1)), axis=0)
    y_train = np.concatenate((y_train, data_label), axis=0)
X_train = X_train[1:]
y_train = y_train[1:]
print(X_train.shape,y_train.shape)
# 训练模型并保存每次迭代的损失和准确率


# Split train and test data
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, shuffle=True)

# Construct the model
model = construct_dual_branch_model(fs * split_second)

# Train the model
model.fit([X_train[:,:,0],X_train[:,:,1]], y_train, batch_size=32, epochs=30)

# Evaluate the model
print("Evaluation: Loss | Accuracy")
model.evaluate([X_test[:,:,0],X_test[:,:,1]], y_test)

# Predict and calculate confusion matrix and accuracy
y_pred = model.predict([X_test[:,:,0],X_test[:,:,1]]) > 0.5
print(confusion_matrix(y_test, y_pred))
print(f"Accuracy: {accuracy_score(y_test, y_pred)}")

# Split test data
X_test = np.ones((1, fs * split_second, 2))
record_test = np.ones((1, 1)).reshape(-1,)

for i in range(1, 16):
    data_set, data1_set, data_label = split_data1(pathname2, fs, split_second, train_label, i)
    X_test = np.concatenate((X_test, np.concatenate((data_set,data1_set),axis=-1)), axis=0)
    record_test = np.concatenate((record_test, data_label * i), axis=0)
record_test = record_test[1:]
X_test = X_test[1:, :, :]

# Predict on test data
y_pred = model.predict([X_test[:,:,0],X_test[:,:,1]]) > 0.5
print("Test set predictions:")
for i in range(len(y_pred)):
    print(record_test[i], int(y_pred[i]))

# Leave-One-Out Cross-Validation (LOOCV)
all_data = X_train.copy()
all_label = y_train.copy()
print(all_data.shape, all_label.shape)

y_true = []
y_pred = []
Loop_numbers = all_data.shape[0]


for i in range(Loop_numbers):
    model = construct_dual_branch_model(fs * split_second)
    X_test = all_data[i, :, :].reshape(1, fs * split_second, 2)
    y_test = all_label[i]
    X_train = np.delete(all_data, i, axis=0)
    y_train = np.delete(all_label, i)
    model.fit([X_train[:,:,0],X_train[:,:,1]], y_train, epochs=30)
    pred = model.predict([X_test[:,:,0],X_test[:,:,1]]) > 0.5
    y_pred.append(int(pred))
    y_true.append(y_test)

print("LOOCV results:")
print(f"Accuracy: {accuracy_score(y_true, y_pred)}")
print(confusion_matrix(y_true, y_pred))

#对测试集进行预测
"""
分割测试集
"""
X_test = np.ones((1, fs*split_second,2))
y_test = np.ones((1,1)).reshape(-1,)
record_test = np.ones((1,1)).reshape(-1,)#记录数据的分段
#读取训练集
for i in range(1, 16):
    data_set, data1_set, data_label = split_data1(pathname2, fs, split_second, test_label, i)
    X_test = np.concatenate((X_test,  np.concatenate((data_set,data1_set),axis=-1)), axis=0)
    record_test = np.concatenate((record_test, data_label * i), axis=0)
record_test = record_test[1:]
X_test = X_test[1:, :, :]
#进行预测
y_pred = model.predict([X_test[:,:,0],X_test[:,:,1]])
y_pred = y_pred > 0.5
for i in range(len(y_pred)):
    print(record_test[i],int(y_pred[i]))
