

import numpy as np

# mne imports
import mne
from mne import io
from mne.datasets import sample

# EEGNet-specific imports
from EEGModels import EEGNet
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import backend as K

# PyRiemann imports
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from pyriemann.utils.viz import plot_confusion_matrix
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression

# tools for plotting confusion matrices
from matplotlib import pyplot as plt
import os
# while the default tensorflow ordering is 'channels_last' we set it here
# to be explicit in case if the user has changed the default ordering
K.set_image_data_format('channels_last')

##################### Process, filter and epoch the data ######################
train_data_path_1 = "/media/brainseek/dataset/109subjects/LB_2s_250Hz/"
test_data_path_1  = "/media/brainseek/dataset/109subjects/LB_2s_250Hz/"
train_data_path_2 = "/media/brainseek/dataset/52subjects/LB_2s_250Hz/"
test_data_path_2  = "/media/brainseek/dataset/52subjects/LB_2s_250Hz/"
train_data_path_3 = "/media/brainseek/dataset/109subjects/LB_2s_250Hz/"
test_data_path_3  = "/media/brainseek/dataset/109subjects/LB_2s_250Hz/"
# Set parameters and read data
# raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# tmin, tmax = -0., 1
# event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)

# Setup for reading the raw data
# raw = io.Raw(raw_fname, preload=True, verbose=False)
# raw.filter(2, None, method='iir')  # replace baselining with high-pass
# events = mne.read_events(event_fname)
#
# raw.info['bads'] = ['MEG 2443']  # set bad channels
# picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
#                        exclude='bads')
#
# # Read epochs
# epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
#                     picks=picks, baseline=None, preload=True, verbose=False)
# labels = epochs.events[:, -1]
#
# # extract raw data. scale by 1000 due to scaling sensitivity in deep learning
# X = epochs.get_data()*1000 # format is in (trials, channels, samples)
# y = labels
# load the data
train_data_raw_1  = np.load(os.path.join(train_data_path_1, "train_data_total.npy"))
train_data_raw_2  = np.load(os.path.join(train_data_path_2, "train_data_total.npy"))
train_data_raw_3  = np.load(os.path.join(train_data_path_3, "train_data_total.npy"))
train_data_raw    = np.append(train_data_raw_1,train_data_raw_2,axis=0)
train_data_raw    = np.append(train_data_raw,train_data_raw_3,axis=0)

test_data_raw_1   = np.load(os.path.join(test_data_path_1, "test_data_total.npy"))
test_data_raw_2   = np.load(os.path.join(test_data_path_2, "test_data_total.npy"))
test_data_raw_3   = np.load(os.path.join(test_data_path_3, "test_data_total.npy"))
test_data_raw     = np.append(test_data_raw_1,test_data_raw_2,axis=0)
test_data_raw     = np.append(test_data_raw,test_data_raw_3,axis=0)

train_label_raw_1 = np.load(os.path.join(train_data_path_1, "train_label_total.npy"))
train_label_raw_2 = np.load(os.path.join(train_data_path_2, "train_label_total.npy"))
train_label_raw_3 = np.load(os.path.join(train_data_path_3, "train_label_total.npy"))
train_label_raw   = np.append(train_label_raw_1,train_label_raw_2,axis=0)
train_label_raw   = np.append(train_label_raw,train_label_raw_3,axis=0)

test_label_raw_1  = np.load(os.path.join(test_data_path_1, "test_label_total.npy"))
test_label_raw_2  = np.load(os.path.join(test_data_path_2, "test_label_total.npy"))
test_label_raw_3  = np.load(os.path.join(test_data_path_3, "test_label_total.npy"))
test_label_raw    = np.append(test_label_raw_1,test_label_raw_2,axis=0)
test_label_raw    = np.append(test_label_raw,test_label_raw_3,axis=0)


print(train_data_raw.shape)
print(test_data_raw.shape)
print(train_label_raw.shape)
print(test_label_raw.shape)


kernels, chans, samples = 1, 2, 500


X_train      = train_data_raw
Y_train      = train_label_raw
X_validate   = test_data_raw[0:int(test_data_raw.shape[0]/2),:,:]
Y_validate   = test_label_raw[0:int(test_label_raw.shape[0]/2),:]
X_test       = test_data_raw[int(test_data_raw.shape[0]/2):,:,:]
Y_test       = test_label_raw[int(test_label_raw.shape[0]/2):,:]


############################# EEGNet portion ##################################

# # convert labels to one-hot encodings.
# Y_train      = np_utils.to_categorical(Y_train-1)
# Y_validate   = np_utils.to_categorical(Y_validate-1)
# Y_test       = np_utils.to_categorical(Y_test-1)

# convert data to NHWC (trials, channels, samples, kernels) format. Data 
# contains 60 channels and 151 time-points. Set the number of kernels to 1.
X_train      = X_train.reshape(X_train.shape[0], chans, samples, kernels)
X_validate   = X_validate.reshape(X_validate.shape[0], chans, samples, kernels)
X_test       = X_test.reshape(X_test.shape[0], chans, samples, kernels)
   
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# configure the EEGNet-8,2,16 model with kernel length of 32 samples (other 
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes = 2, Chans = chans, Samples = samples,
               dropoutRate = 0.5, kernLength = 32, F1 = 8, D = 2, F2 = 16, 
               dropoutType = 'Dropout')

# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy', optimizer='adam', 
              metrics = ['accuracy'])

# count number of parameters in the model
numParams    = model.count_params()    

# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='/tmp/checkpoint.h5', verbose=1,
                               save_best_only=True)

###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during 
# optimization to balance it out. This data is approximately balanced so we 
# don't need to do this, but is shown here for illustration/completeness. 
###############################################################################

# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0:1, 1:1}

################################################################################
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN + 
# Riemannian geometry classification (below)
################################################################################
fittedModel = model.fit(X_train, Y_train, batch_size = 50, epochs = 50,
                        verbose = 2, validation_data=(X_validate, Y_validate),
                        callbacks=[checkpointer], class_weight = class_weights)

# load optimal weights
model.load_weights('/tmp/checkpoint.h5')

###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################

# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5 
# model.load_weights(WEIGHTS_PATH)

###############################################################################
# make prediction on test set.
###############################################################################

probs       = model.predict(X_test)
preds       = probs.argmax(axis = -1)  
acc         = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))


# ############################# PyRiemann Portion ##############################
#
# # code is taken from PyRiemann's ERP sample script, which is decoding in
# # the tangent space with a logistic regression
#
# n_components = 2  # pick some components
#
# # set up sklearn pipeline
# clf = make_pipeline(XdawnCovariances(n_components),
#                     TangentSpace(metric='riemann'),
#                     LogisticRegression())
#
# preds_rg     = np.zeros(len(Y_test))
#
# # reshape back to (trials, channels, samples)
# X_train      = X_train.reshape(X_train.shape[0], chans, samples)
# X_test       = X_test.reshape(X_test.shape[0], chans, samples)
#
# # train a classifier with xDAWN spatial filtering + Riemannian Geometry (RG)
# labels need to be back in single-column format
# clf.fit(X_train, Y_train.argmax(axis = -1))
# preds_rg     = clf.predict(X_test)
#
# # Printing the results
# acc2         = np.mean(preds_rg == Y_test.argmax(axis = -1))
# print("Classification accuracy: %f " % (acc2))

# plot the confusion matrices for both classifiers
names        = ['left', 'base']
plt.figure(0)
plot_confusion_matrix(preds, Y_test.argmax(axis = -1), names, title = __file__ )
plt.savefig(__file__ + 'confusion_matrix.png')
plt.show()


# plot the acc & loss
acc = fittedModel.history['accuracy']
val_acc = fittedModel.history['val_accuracy']
loss = fittedModel.history['loss']
val_loss = fittedModel.history['val_loss']

plt.subplot(2, 1, 1)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,wspace=0, hspace=0.5)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy:' + 'Train/Test samples   '+ str(X_train.shape[0]) + '/'+ str(X_test.shape[0]))

plt.legend()

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('\nTraining and Validation Loss')
plt.legend()
plt.savefig(__file__ + '.png')
plt.show()



