# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:20:18 2024

@author: xiaok

ref:
    https://github.com/amrzhd/EEGNet?tab=readme-ov-file
    
    as far as I know, most of the codes in github originate from :
https://github.com/vlawhern/arl-eegmodels/blob/master/EEGModels.py
"""

import os
import math
import random
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from scipy.stats import f_oneway

# Torch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.optim as optim
from torchsummary import summary
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler

# Scikit-Learn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import confusion_matrix
import pickle
import time
from EEGNet import EEGNet

#%%
# cleaned_data_folder = 'D:/materials/MI/competition/BCICIV_2a_gdf_cleaned/'
# # cleaned_data_folder = 'D:/m_proj_24/mii_app/training_data/'
# cleaned_data_folder = 'D:/materials/dataset/bci/'

# filePathName_save = cleaned_data_folder + "bcic_iv_2a_data_all_sub.p"

# filePathName_save = cleaned_data_folder + "A01_no_filter_resample.p"
# filePathName_save = cleaned_data_folder + "bcic_iv_2a_data_all_sub.p"
# filePathName_save = cleaned_data_folder + "mMIIData.p"
# filePathName_save = cleaned_data_folder + 'A01_filter4_100_fs250_t_5_25_8ch.p'

# filePathName_save = 'D:/materials/dataset/bci/BCICIV_2a_pickle/bcic_iv_2a_data_T.p'

cleaned_data_folder = '../training_data/'
filePathName_save = cleaned_data_folder + "mMIIData.p"

#%%
bcic_iv_2a_data_all_sub = pickle.load(open(filePathName_save,'rb'))

data = bcic_iv_2a_data_all_sub['datas']
# print(data.shape)
# data = np.delete(data, [2,3,4,6,8,10,12,14,15,16,18,19,20,21], axis=1)
# print(data.shape)
labels = bcic_iv_2a_data_all_sub['labels']
fs = bcic_iv_2a_data_all_sub['fs']

ch_num = data.shape[1]
time_points = data.shape[2]

#%%


# Choosing Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Loss Function
criterion = nn.CrossEntropyLoss()

# Normalizing Labels to [0, 1, 2, 3]
y = labels - np.min(labels)

# Normalizing Input features: z-score(mean=0, std=1)
X = (data - np.mean(data)) / np.std(data)

# Checking the existance of null & inf in the dataset
if np.any(np.isnan(X)) or np.any(np.isinf(X)):
    raise ValueError("Data contains NaNs or infinities after normalization.")
if np.any(np.isnan(y)) or np.any(np.isinf(y)):
    raise ValueError("Labels contain NaNs or infinities.")

# Making the X,y tensors for K-Fold Cross Validation
X_tensor = torch.Tensor(X).unsqueeze(1)
y_tensor = torch.LongTensor(y)

# Spliting  Data: 80% for Train and 20% for Test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

# Converting to Tensor
X_train = torch.Tensor(X_train).unsqueeze(1).to(device)
X_test = torch.Tensor(X_test).unsqueeze(1).to(device)
y_train = torch.LongTensor(y_train).to(device)
y_test = torch.LongTensor(y_test).to(device)

# Creating Tensor Dataset
train_dataset = TensorDataset(X_train, y_train)
test_dataset = TensorDataset(X_test, y_test)

# Printing the sizes
print("Size of X_train:", X_train.size())
print("Size of X_test:", X_test.size())
print("Size of y_train:", y_train.size())
print("Size of y_test:", y_test.size())

#%%
# time_points = 501
input_size = (1, ch_num, time_points) #1716 Trainable Parameters (As mentioned in paper)
# input_size = (1, 22, 257) #1716 Trainable Parameters (As mentioned in paper)
eegnet_model = EEGNet(chans=ch_num, time_points=time_points).to(device)
summary(eegnet_model, input_size)
     
#%%
learning_rate = 0.001
optimizer = optim.Adam(eegnet_model.parameters(), lr=learning_rate)

num_epochs = 100
batch_size = 128

t0 = time.time()

history = {"loss": [], "acc": [], "val_loss": [], "val_acc": []}

for epoch in range(num_epochs):
    eegnet_model.train()
    X_train, y_train = shuffle(X_train, y_train)
    running_loss = 0.0
    correct = 0
    total = 0

    for i in range(0, len(X_train), batch_size):
        inputs = X_train[i:i+batch_size].to(device)
        labels = y_train[i:i+batch_size].to(device)

        optimizer.zero_grad()
        outputs = eegnet_model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    epoch_loss = running_loss / len(X_train)
    epoch_accuracy = correct / total

    history["loss"] = np.append(history["loss"], epoch_loss)
    history["acc"] = np.append(history["acc"], epoch_accuracy)

    if (not (epoch % 10)):
        print(f"Epoch {epoch}/{num_epochs}, Loss: {epoch_loss:.2f}, Accuracy: {(epoch_accuracy*100):.2f}%")
    
    
average_loss = running_loss / len(X_train)
print("Average Loss:", average_loss)

# Saving model
torch.save(eegnet_model, 'eegnet_model.pth')

print(time.time()-t0)


#%%

fig, (ax1, ax2) = plt.subplots(1, 2)

l1 = ax1.plot(history["acc"], color="red", label='acc')
l2 = ax2.plot(history["loss"], color="blue", label='loss')
ax1.grid(True)
ax2.grid(True)

# fig= plt.figure()

# plt.plot(history["acc"], color="red", label='acc')
# plt.plot(history["loss"], color="blue", label='loss')

#%%
eegnet_model.eval()
correct = 0
total = 0
with torch.no_grad():
    for i in range(len(X_test)):
        inputs = X_test[i:i+1].to(device)
        labels = y_test[i:i+1].to(device)
        outputs = eegnet_model(inputs)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

accuracy = (correct / total)*100
print(f"Test Accuracy: {accuracy:.2f}%")

#%%
eegnet_model.eval()
y_pred = []
y_true = []
classes = ['Left', 'Right', 'Foot', 'Tongue']

with torch.no_grad():
    for inputs, labels in zip(X_test, y_test):
        outputs = eegnet_model(inputs.unsqueeze(0))  # Forward pass
        _, predicted = torch.max(outputs.data, 1)
        y_pred.append(predicted.item())
        y_true.append(labels.item())

cf_matrix = confusion_matrix(y_true, y_pred)
cf_matrix = cf_matrix.astype('float') / cf_matrix.sum(axis=1)[:, np.newaxis]

# Create DataFrame for visualization
df_cm = pd.DataFrame(cf_matrix, index=classes, columns=classes)

# Plot confusion matrix
plt.figure(figsize=(10, 7))
sn.heatmap(df_cm, annot=True, cmap='Blues', fmt='.2f')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.title('Confusion Matrix')
plt.savefig('confusion_matrix_eegnet.png')
plt.show()


#%%
#%%
#%%
#%%







