#!/usr/bin/python
# -*- coding: utf-8 -*-

import mne
import matplotlib.pyplot as plt
from sklearn import preprocessing
import numpy as np
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split

"""
    EEG datasets for motor imagery brain computer interface  http://gigadb.org/dataset/100295
    64 channels EEG，512hz freq, 2 seconds rest,3 seconds MI-task,2 seconds rest,Total 7 seconds
    100 runs for each of the 52 subjects
    labels 0:base  1:left  2:right
    

"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft,fftfreq
from scipy.io import loadmat
from scipy import interpolate
from scipy import signal
data_path = "/dataset/52subjects/rawdata/"
save_path = "/dataset/52subjects/LRB2s/250Hz/"

rename_mapping = {'Fp1.': 'Fp1', 'AF7.': 'AF7', 'AF3.': 'AF3', 'F1.': 'F1', 'F3.': 'F3', 'F5.': 'F5',
                  'F7.': 'F7', 'FT7..': 'FT7', 'Fc5..': 'Fc5', 'Fc3..': 'Fc3', 'Fc1..': 'Fc3', 'C1..': 'C1', 'C3..': 'C3',
                  'C5..': 'C5', 'T7.': 'T7', 'Tp7.': 'Tp7', 'Cp5.': 'Cp5', 'Cp3.': 'Cp3', 'Cp1.': 'Cp1',
                  'P1.': 'P1', 'P3.': 'P3', 'P5.': 'P5', 'P7.': 'P7', 'P9.': 'P9', 'Po7.': 'Po7',
                  'Po3.': 'Po3', 'O1.': 'O1', 'Lz.': 'Lz', 'Oz.': 'Oz', 'Poz..': 'Poz', 'Pz..': 'Pz', 'Cpz..': 'Cpz',
                  'Fpz..': 'Fpz', 'Fp2..': 'Fp2', 'Af8..': 'Af8', 'Af4..': 'Af4', 'Afz..': 'Afz', 'Fz..': 'Fz', 'F2.': 'F2',
                  'F4.': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ff8..': 'Ft8', 'Fc6.': 'Fc6', 'Fc4.': 'Fc4', 'Fc2.': 'Fc2',
                  'Fcz..': 'Fcz', 'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'T8..': 'T8', 'Tp8..': 'Tp8',
                  'Cp6..': 'Cp6', 'Cp4..': 'Cp4', 'Cp2.': 'Cp2', 'P2.': 'P2', 'P4.': 'P4', 'P6.': 'P6', 'P8.': 'P8',
                  'P10..': 'P10', 'Po8..': 'Po8', 'Po4..': 'Po4', 'O2..': 'O2'}


def get_gigadb(subject: int):
    """
    :param subject: SN of subject : [1,53]

    """
    # loading from file
    raw_new = loadmat(os.path.join(data_path,'s%02d' % subject))
    imagery_left  = raw_new['eeg'][0, 0]['imagery_left'][[11,46,10,45,12,49,13,50,19,56,18,55], :]
    imagery_right = raw_new['eeg'][0, 0]['imagery_right'][[11, 46, 10, 45, 12, 49, 13, 50, 19, 56, 18, 55], :]
    rest_raw      = raw_new['eeg'][0, 0]['rest'][[11,46,10,45,12,49,13,50,19,56,18,55], :]

    #100 /120 trials  358400 / 430080
    if len(imagery_left[1]) == 430080:
        imagery_left_raw = imagery_left.reshape((12,120,3584)).transpose(1,2,0)
    else:
        imagery_left_raw = imagery_left.reshape(12, 100, 3584).transpose(1, 2, 0)
    if len(imagery_right[1]) == 430080:
        imagery_right_raw = imagery_right.reshape((12, 120, 3584)).transpose(1, 2, 0)
    else:
        imagery_right_raw = imagery_right.reshape(12, 100, 3584).transpose(1, 2, 0)
    #get the 4s data of imagery
    imagery_left_2s  = imagery_left_raw[:,1024:2048,:]
    imagery_right_2s = imagery_right_raw[:, 1024:2048, :]

    # shap the rest(Base line) data and keep channel last
    rest_raw_cut = rest_raw[:,0:32768].transpose(1,0)
    rest_2s = rest_raw_cut.reshape(32, 1024, 12)

    #Standardization the data
    scaler_rest = preprocessing.StandardScaler()
    for i in range(len(rest_2s)):
        scaler_rest.fit(rest_2s[i])
        rest_2s[i] = scaler_rest.transform(rest_2s[i])
    scaler_left = preprocessing.StandardScaler()
    for i in range(len(imagery_left_2s)):
        scaler_left.fit(imagery_left_2s[i])
        imagery_left_2s[i] = scaler_left.transform(imagery_left_2s[i])
    scaler_right = preprocessing.StandardScaler()
    for i in range(len(imagery_left_2s)):
        scaler_right.fit(imagery_left_2s[i])
        imagery_right_2s[i] = scaler_right.transform(imagery_left_2s[i])



    # # #draw the data to test
    # for i in range(len(imagery_left_2s)):
    #     x = np.arange(imagery_left_2s.shape[1])
    #     fft_y = fft(imagery_left_2s[i,:,0])
    #     abs_y = np.abs(fft_y)
    #     xf = np.arange(0,160,160/512)
    #     plt.figure(i)
    #     plt.subplot(2, 1, 1)
    #     plt.plot(x,imagery_left_2s[i,:,0])
    #     plt.subplot(2, 1, 2)
    #     plt.plot(xf, abs_y[0:512])
    #     plt.show()
    # # # huatu ceshi

    # get the data and labels 0:base  1:left  2:right
    labels_rest  = np.zeros([1, len(rest_2s)])
    labels_left  = np.ones([1,len(imagery_left_2s)])
    labels_right = np.empty([1, len(imagery_right_2s)])
    labels_right.fill(2)


    # # get the data and labels 0:base  1:left  2:right
    # labels_left = np.empty([1, len(imagery_left_2s)],dtype=bytes)
    # labels_left.fill('L')
    # labels_right = np.empty([1, len(imagery_right_2s)],dtype=bytes)
    # labels_right.fill('R')
    # labels_rest = np.zeros([1, len(rest_2s)],dtype=bytes)
    # labels_rest.fill('B')

    # joint the data together and one hot
    data = np.vstack((imagery_left_2s,rest_2s,imagery_right_2s))
    labels_left_rest_right = np.hstack((labels_left,labels_rest,labels_right))
    labels = to_categorical(labels_left_rest_right[0])  # one-hot

    # reshape and split
    train_data_ori, test_data_ori, train_label_ori, test_label_ori = train_test_split(data, labels, test_size=0.2,
                                                                                      random_state=42)
    train_data = np.empty((0, train_data_ori.shape[1], 2))
    test_data = np.empty((0, test_data_ori.shape[1], 2))
    train_label = np.empty((0, 3))
    test_label = np.empty((0, 3))



    # joint the channel
    for i in range(0, 12, 2):
        train_data = np.concatenate((train_data, train_data_ori[:, :, i:i + 2]))
        test_data = np.concatenate((test_data, test_data_ori[:, :, i:i + 2]))
        train_label = np.concatenate((train_label, train_label_ori))
        test_label = np.concatenate((test_label, test_label_ori))
    print('data loaded'+str(subject))

    # resample to fit the model 500*2
    train_data = signal.resample_poly(train_data, 500, 1024, axis=1)
    test_data = signal.resample_poly(test_data, 500, 1024, axis=1)

    return train_data, test_data, train_label, test_label


if __name__ == '__main__':
    train_data_total  = np.empty((0, 500,2))
    test_data_total   = np.empty((0, 500, 2))
    train_label_total = np.empty((0, 3))
    test_label_total  = np.empty((0, 3))
    subjects_good = np.arange(1, 53)
    os.mkdir(save_path)
    for subs in subjects_good:
        res = get_gigadb(subs)
        np.save(os.path.join(save_path, "train_data" + str(subs)),  res[0],  allow_pickle=True)
        np.save(os.path.join(save_path, "test_data" + str(subs)),   res[1],   allow_pickle=True)
        np.save(os.path.join(save_path, "train_label" + str(subs)), res[2], allow_pickle=True)
        np.save(os.path.join(save_path, "test_label" + str(subs)),  res[3],  allow_pickle=True)
        train_data_total  = np.concatenate((train_data_total,  res[0]), axis=0)
        test_data_total   = np.concatenate((test_data_total,   res[1]), axis=0)
        train_label_total = np.concatenate((train_label_total, res[2]), axis=0)
        test_label_total  = np.concatenate((test_label_total,  res[3]), axis=0)
    np.save(os.path.join(save_path, "train_data_total" ),  train_data_total, allow_pickle=True)
    np.save(os.path.join(save_path, "test_data_total" ),   test_data_total, allow_pickle=True)
    np.save(os.path.join(save_path, "train_lable_total" ), train_label_total, allow_pickle=True)
    np.save(os.path.join(save_path, "test_lable_total" ),  test_label_total, allow_pickle=True)

    for r in res:
        print(r.shape)