import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, LSTM, Dense, Dropout, Flatten
import tensorflow.keras as keras

# Function to construct the model
def construct_model(signal_length):
    model = Sequential()
    model.add(Conv1D(filters=64, kernel_size=16, strides=1, activation='relu', input_shape=(signal_length, 1)))
    model.add(MaxPooling1D(pool_size=5, strides=5))
    model.add(Dropout(rate=0.2))
    model.add(Conv1D(filters=128, kernel_size=13, strides=1, activation='relu'))
    model.add(MaxPooling1D(pool_size=5, strides=5))
    model.add(LSTM(units=32, return_sequences=True))
    model.add(Flatten())
    model.add(Dense(units=64, activation='relu'))
    model.add(Dropout(rate=0.2))
    model.add(Dense(units=1, activation='sigmoid'))
    model.compile(loss=keras.losses.binary_crossentropy, optimizer='Adam', metrics=['acc'])
    return model

# Function to load the data (use second lead)
def load_data(pathname, file_numbers):
    train_data = []
    for i in range(1, file_numbers + 1):
        dir_number = f'{i:02d}'
        path = pathname + dir_number + '/eegsignals.csv'
        raw_eeg = np.loadtxt(path, delimiter=',')[:, 1]  # Use the second lead (index 1)
        train_data.append(raw_eeg)
    return train_data

# Function to split the data
def split_data1(pathname, fs, split_second, labels, file_numbers):
    label_numbers = file_numbers
    if file_numbers < 10:
        file_numbers = '0' + str(file_numbers)
    else:
        file_numbers = str(file_numbers)
    file_numbers += '/'
    path = pathname + file_numbers + '/eegsignals.csv'
    data = np.loadtxt(path, delimiter=',')[:, 1]  # Use the second lead (index 1)
    cur_label = labels[label_numbers - 1]
    data_segment = []
    label = []
    loop_numbers = int(data.shape[0] / (fs * split_second))
    for i in range(loop_numbers):
        begin_index = i * (fs * split_second)
        end_index = (i + 1) * (fs * split_second)
        if end_index > data.shape[0]:
            continue
        temp_segment = data[begin_index:end_index].reshape(-1, 1)
        data_segment.append(temp_segment)
        label.append(cur_label)
    data_segment = np.array(data_segment)
    label = np.array(label)
    return data_segment, label

# Training and testing paths
pathname1 = 'D:/BaiduNetdiskDownload/dataset_BME_depression/traindata/'
pathname2 = 'D:/BaiduNetdiskDownload/dataset_BME_depression/testdata/'

# Load data
train_data = load_data(pathname1, 30)
test_data = load_data(pathname2, 15)


# Set labels
train_label = np.array([1] * 22 + [0] * 8)
test_label = np.array([1] * 30)  # Test labels

# Split training data
fs = 250
split_second = 6
X_train = np.ones((1, fs * split_second, 1))
y_train = np.ones((1, 1)).reshape(-1,)

for i in range(1, 31):
    data_set, data_label = split_data1(pathname1, fs, split_second, train_label, i)
    X_train = np.concatenate((X_train, data_set), axis=0)
    y_train = np.concatenate((y_train, data_label), axis=0)
X_train = X_train[1:]
y_train = y_train[1:]
print(X_train.shape, y_train.shape)

# Split train and test data
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, shuffle=True)

# Construct the model
model = construct_model(fs * split_second)

# Train the model
model.fit(X_train, y_train, batch_size=32, epochs=30)

# Evaluate the model
print("Evaluation: Loss | Accuracy")
model.evaluate(X_test, y_test)

# Predict and calculate confusion matrix and accuracy
y_pred = model.predict(X_test) > 0.5
print(confusion_matrix(y_test, y_pred))
print(f"Accuracy: {accuracy_score(y_test, y_pred)}")

# Split test data
X_test = np.ones((1, fs * split_second, 1))
record_test = np.ones((1, 1)).reshape(-1,)

for i in range(1, 16):
    data_set, data_label = split_data1(pathname2, fs, split_second, test_label, i)
    X_test = np.concatenate((X_test, data_set), axis=0)
    record_test = np.concatenate((record_test, data_label * i), axis=0)
record_test = record_test[1:]
X_test = X_test[1:, :, :]

# Predict on test data
y_pred = model.predict(X_test) > 0.5
print("Test set predictions:")
for i in range(len(y_pred)):
    print(record_test[i], int(y_pred[i]))

# Leave-One-Out Cross-Validation (LOOCV)
all_data = X_train.copy()
all_label = y_train.copy()
print(all_data.shape, all_label.shape)

y_true = []
y_pred = []
Loop_numbers = all_data.shape[0]

for i in range(Loop_numbers):
    model = construct_model(fs * split_second)
    X_test = all_data[i, :, :].reshape(1, fs * split_second, 1)
    y_test = all_label[i]
    X_train = np.delete(all_data, i, axis=0)
    y_train = np.delete(all_label, i)
    model.fit(X_train, y_train, epochs=30)
    pred = model.predict(X_test) > 0.5
    y_pred.append(int(pred))
    y_true.append(y_test)

print("LOOCV results:")
print(f"Accuracy: {accuracy_score(y_true, y_pred)}")
print(confusion_matrix(y_true, y_pred))

#对测试集进行预测
"""
分割测试集
"""
X_test = np.ones((1, fs*split_second))
y_test = np.ones((1,1)).reshape(-1,)
record_test = np.ones((1,1)).reshape(-1,)#记录数据的分段
#读取训练集
for i in range(1, 16):
    data_set, data_label = split_data1(pathname2, fs, split_second, test_label, i)
    X_test = np.concatenate((X_test, data_set), axis=0)
    record_test = np.concatenate((record_test, data_label * i), axis=0)
record_test = record_test[1:]
X_test = X_test[1:, :, :]
#进行预测
y_pred = model.predict(X_test)
y_pred = y_pred > 0.5
for i in range(len(y_pred)):
    print(record_test[i],int(y_pred[i]))
