# -*- coding: utf-8 -*-
"""
Created on Tue Nov  4 10:37:29 2025

@author: ZT
"""



import mne
import os
import json
    
    
    

import mne
import os
import math
import random
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from scipy.stats import f_oneway
from sklearn.preprocessing import StandardScaler

#%%
# use multiple files in a folder by list
# abandon using concatenate_raws

trianingDataFolder = '..\\training_data\\'

# file_list=[]
# raw_list = []

# for file_name in os.listdir(trianingDataFolder):
#     filePathName = trianingDataFolder+file_name
#     # file_list.append(filePathName)
#     if not (filePathName.endswith('_.edf')):
#         continue
    
#     raw = mne.io.read_raw_edf(filePathName,preload=True) # preload should be True, if you want to use filter like below 

#     # raw.filter(l_freq=1, h_freq=90, method='iir')
#     # raw.notch_filter(freqs=50)

#     raw_list.append(raw)

# final_raw = mne.concatenate_raws(raw_list)

#%%



def StandardScalar_data(data):
    print(data.shape)
    # should be (288, 22, 1001) bciIV2a
    # should be (xx-trial_number in total, 32, 2001)
    
    n_samples, n_channels, n_timepoints = data.shape
    data_flat = data.reshape(n_samples,-1)
    print(data_flat.shape)
    # should be (288, 22022)
    # should be (xx, xx)

    scaler = StandardScaler().fit(data_flat)
    data_scaled = scaler.transform(data_flat)
    
    data_scaled = data_scaled.reshape(n_samples, n_channels, n_timepoints)
    print(data_scaled.shape)
    # should be (288, 22, 1001)
    # should be (xx-trial_number in total, 32, 2001)

    return data_scaled

#%%
labels = np.empty([0], dtype = int) 
tmin = 0
tmax = 4

with open('..\\task\\task_markers.json', 'r') as file:
    markers = json.load(file)
    
label_cue_left_int = markers['Cue_onset_left'][0]
label_cue_right_int = markers['Cue_onset_right'][0]
label_cue_up_int = markers['Cue_onset_up'][0]
label_cue_down_int = markers['Cue_onset_down'][0]    

label_cue_left_str = str(label_cue_left_int)
label_cue_right_str = str(label_cue_right_int)
label_cue_up_str = str(label_cue_up_int)
label_cue_down_str = str(label_cue_down_int)

#%%
# postfix_str = '_.bdf'
files_list = []

for sub_file_path in os.listdir(trianingDataFolder):
    # if sub_file_path.startswith('.'): #exclude the .gitignore
    #     continue

    file_path = os.path.join(trianingDataFolder,sub_file_path)
    if os.path.isdir(file_path):
        print(file_path)    
    
        for file_name in os.listdir(file_path):
            file_path_name = file_path+'\\'+file_name
            print(file_path_name)
            files_list.append(file_path_name)

        


for i, file in enumerate(files_list):
    print(file)
    
    raw = mne.io.read_raw_bdf(file,preload=True) # preload should be True, if you want to use filter like below 
    events_from_annot,event_dict = mne.events_from_annotations(raw)

    event_id = [event_dict[label_cue_left_str],event_dict[label_cue_right_str],event_dict[label_cue_up_str],event_dict[label_cue_down_str]]
    
    epochs = mne.Epochs(raw, events_from_annot, event_id=event_id, tmin=tmin, tmax=tmax, reject=None,proj=False, baseline=None, preload=True)
    tmp_data =  epochs.get_data()
    tmp_data_2 = StandardScalar_data(tmp_data)
    
    if i==0:
        data = tmp_data_2        
    else:
        data = np.concatenate((data, tmp_data_2), axis=0)
        
    # convert the event_ids(6,7 ..) to original labels like 769,770
    labels_tmp = epochs.events[:,-1]
    labels_int = []
    for l in labels_tmp:
        keys = [key for key, val in event_dict.items() if val == l]
        labels_int.append(int(''.join(keys)))
    labels = np.concatenate((labels,labels_int), axis=0)



#%%
save_pickle = 1
import pickle

mMIIData = {}
mMIIData['datas']=data
mMIIData['labels']=labels
mMIIData['fs']=raw.info['sfreq']

filePathName_save = '..\\training_data\\mMIIData.p'

if save_pickle==1:
    pickle.dump(mMIIData,open(filePathName_save,'wb'))



# for i in range(1,9):
#     fileName = 'A0'+str(i)+'T.gdf'
#     print(fileName)
#     file_path = os.path.join(raw_data_folder, fileName)
#     raw = mne.io.read_raw_gdf(file_path, eog=['EOG-left', 'EOG-central', 'EOG-right'], preload=True)
#     raw.drop_channels(['EOG-left', 'EOG-central', 'EOG-right'])
    
#     events_from_annot,event_dict = mne.events_from_annotations(raw)
#     event_id = [event_dict['769'],event_dict['770'],event_dict['771'],event_dict['772']]
#     # fig = mne.viz.plot_events(events_from_annot,event_id=event_dict,sfreq=raw.info['sfreq'],first_samp=raw.first_samp)
#     # raw.resample(128, npad='auto')
    
#     #     # High Pass Filtering 4-40 Hz
#     #     raw.filter(l_freq=1, h_freq=100, method='iir')

#     #     # Notch filter for Removal of Line Voltage
#     #     raw.notch_filter(freqs=50)
    
    
#     if i==1:
#         epochs = mne.Epochs(raw, events_from_annot, event_id=event_id, tmin=tmin, tmax=tmax, reject=None,proj=False, baseline=None, preload=True)
#         tmp_data =  epochs.get_data()
#         tmp_data_2 = StandardScalar_data(tmp_data)
#         data = tmp_data_2        
#     else:
#         epochs = mne.Epochs(raw, events_from_annot, event_id=event_id, tmin=tmin, tmax=tmax, reject=None,proj=False, baseline=None, preload=True)
#         tmp_data =  epochs.get_data()      
#         tmp_data_2 = StandardScalar_data(tmp_data)
#         data = np.concatenate((data, tmp_data_2), axis=0)
        
        
    
#     # convert the event_ids(6,7 ..) to original labels like 769,770
#     labels_tmp = epochs.events[:,-1]
#     labels_int = []
#     for l in labels_tmp:
#         keys = [key for key, val in event_dict.items() if val == l]
#         labels_int.append(int(''.join(keys)))
#     labels = np.concatenate((labels,labels_int), axis=0)

# #%%

# import pickle

# bcic_iv_2a_data_all_sub = {}
# bcic_iv_2a_data_all_sub['datas']=data
# bcic_iv_2a_data_all_sub['labels']=labels
# bcic_iv_2a_data_all_sub['fs']=raw.info['sfreq']

# #%%


# filePathName_save = 'D:/materials/dataset/bci/BCICIV_2a_pickle/bcic_iv_2a_data_T.p'
# pickle.dump(bcic_iv_2a_data_all_sub,open(filePathName_save,'wb'))













    # +postfix_str
    # print(filePathName)
    
    # if not (filePathName.endswith('_.edf')):
    #     continue
    
    # 

    # raw.filter(l_freq=1, h_freq=90, method='iir')
    # raw.notch_filter(freqs=50)   
    
    
    # 
    # event_id = [event_dict['769'],event_dict['770'],event_dict['771'],event_dict['772']]
    
    
    
    
    
    
    # fig = mne.viz.plot_events(events_from_annot,event_id=event_dict,sfreq=raw.info['sfreq'],first_samp=raw.first_samp)
    # raw.resample(128, npad='auto')
    
    #     # High Pass Filtering 4-40 Hz
    #     raw.filter(l_freq=1, h_freq=100, method='iir')

    #     # Notch filter for Removal of Line Voltage
    #     raw.notch_filter(freqs=50)
    
    
    # if i==1:
    #     epochs = mne.Epochs(raw, events_from_annot, event_id=event_id, tmin=tmin, tmax=tmax, reject=None,proj=False, baseline=None, preload=True)
    #     tmp_data =  epochs.get_data()
    #     tmp_data_2 = StandardScalar_data(tmp_data)
    #     data = tmp_data_2        
    # else:
    #     epochs = mne.Epochs(raw, events_from_annot, event_id=event_id, tmin=tmin, tmax=tmax, reject=None,proj=False, baseline=None, preload=True)
    #     tmp_data =  epochs.get_data()      
    #     tmp_data_2 = StandardScalar_data(tmp_data)
    #     data = np.concatenate((data, tmp_data_2), axis=0)
        
        
    
    # # convert the event_ids(6,7 ..) to original labels like 769,770
    # labels_tmp = epochs.events[:,-1]
    # labels_int = []
    # for l in labels_tmp:
    #     keys = [key for key, val in event_dict.items() if val == l]
    #     labels_int.append(int(''.join(keys)))
    # labels = np.concatenate((labels,labels_int), axis=0)
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
# List of the events
# "trial_end":[13],             1
# "Cue_onset_left":[22],        2
# "Cue_onset_right":[23],       3
# "Cue_onset_up":[24],          4
# "Cue_onset_down":[25],        5
# "begin":[99],                 6

# fig = mne.viz.plot_events(events_from_annot,event_id=event_dict,sfreq=raw.info['sfreq'],first_samp=raw.first_samp)

# event_dict_new = {value:int(float(key)) for key,value in event_dict.items()}
# events_from_annot_new = events_from_annot.copy()
# for i, c in enumerate(events_from_annot[:,2]):
#     events_from_annot_new[i,2]=event_dict_new[c]

# evnet_dict_marker = {
#     'left':label_cue_left_int,
#     'right':label_cue_right_int,
#     'up':label_cue_up_int,
#     'down':label_cue_down_int
# }

# #%%

# signal_win_start = 0.5 #second
# signal_win_end = 2.5 #second

# epochs = mne.Epochs(
#     final_raw,
#     events_from_annot_new,
#     event_id=evnet_dict_marker,
#     tmin=signal_win_start,
#     tmax=signal_win_end,
#     baseline = None
# )

# data = epochs.get_data()
# labels = epochs.events[:,-1]


# #%%

# #%%
# save_pickle = 1
# import pickle

# mMIIData = {}
# mMIIData['datas']=data
# mMIIData['labels']=labels
# mMIIData['fs']=raw.info['sfreq']

# filePathName_save = 'D:\\m_proj_24\\mii_app\\training_data\\mMIIData.p'

# if save_pickle==1:
#     pickle.dump(mMIIData,open(filePathName_save,'wb'))
    



# # raw_data_folder = 'D:/materials/dataset/bci/BCICIV_2a_gdf/'

# raw_data_folder = 'D:/mii_app/training_data/'

# training_data

# files = os.listdir(raw_data_folder)

# plt.close('all')


# #%% 
# # List of the events



# for i in range(1,9):
#     fileName = 'A0'+str(i)+'T.gdf'
#     print(fileName)
#     file_path = os.path.join(raw_data_folder, fileName)