""" 
Copyright (C) 2022 King Saud University, Saudi Arabia 
SPDX-License-Identifier: Apache-2.0 

Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the 
License at

http://www.apache.org/licenses/LICENSE-2.0  

Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License. 

Author:  Hamdi Altaheri 
"""

#%%
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import confusion_matrix, accuracy_score, ConfusionMatrixDisplay
from sklearn.metrics import cohen_kappa_score
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.models import Model
from matplotlib.colors import ListedColormap


import models 
from preprocess import get_data

#%% ========== 新增全局变量 ==========
all_features = []
all_labels = []

def draw_tsne(features, labels, sub, results_path):
    features = np.array(features)
    labels = np.array(labels).astype(str)

    n_samples = features.shape[0]
    print(f"[Debug] Subject {sub} 样本数: {n_samples}")

    if n_samples < 5:
        print(f"[Warning] Subject {sub} 样本数不足（{n_samples}），跳过t-SNE")
        return

    valid_perplexity = min(30, max(5, n_samples // 2))

    tsne = TSNE(n_components=2,
                random_state=42,
                perplexity=valid_perplexity,
                n_iter=500)
    reduced_features = tsne.fit_transform(features)

    plt.figure(figsize=(10, 8))
    sns.scatterplot(x=reduced_features[:, 0], y=reduced_features[:, 1],
                    hue=labels, palette="tab10",
                    alpha=0.8)

    plt.title(f't-SNE Visualization of Subject: {sub}')
    save_path = os.path.join(results_path, f'subject_{sub}_tsne.png')
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"生成t-SNE图: {save_path}")

#%% ========== 新增全局TSNE函数 ==========
def draw_global_tsne(features, labels, results_path):
    features = np.concatenate(features, axis=0)
    labels = np.concatenate(labels, axis=0).astype(str)

    n_samples = features.shape[0]
    print(f"[Global] 总样本数: {n_samples}")

    valid_perplexity = min(50, max(10, n_samples//100))

    tsne = TSNE(n_components=2,
                random_state=42,
                perplexity=valid_perplexity,
                n_iter=1500,
                init='pca')
    reduced = tsne.fit_transform(features)

    plt.figure(figsize=(12, 10))
    sns.scatterplot(x=reduced[:, 0], y=reduced[:, 1],
                    hue=labels, palette="husl",
                    alpha=0.4,
                    s=30,
                    edgecolor='none')

    plt.title(f'Global t-SNE (All Runs)')
    save_path = os.path.join(results_path, 'global_tsne_all_runs.png')
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"生成全局t-SNE图: {save_path}")

#%% 保持原有函数不变
def draw_learning_curves(history, save_path=None):
    fig, ax = plt.subplots(1, 2, figsize=(12, 6))
    ax[0].plot(history.history['accuracy'], label='Train Accuracy')
    ax[0].plot(history.history['val_accuracy'], label='Validation Accuracy')
    ax[0].set_title('Model Accuracy', fontsize=14)
    ax[0].set_xlabel('Epoch', fontsize=12)
    ax[0].set_ylabel('Accuracy', fontsize=12)
    ax[0].legend(loc='upper left', fontsize=10)
    ax[0].grid(True)

    ax[1].plot(history.history['loss'], label='Train Loss')
    ax[1].plot(history.history['val_loss'], label='Validation Loss')
    ax[1].set_title('Model Loss', fontsize=14)
    ax[1].set_xlabel('Epoch', fontsize=12)
    ax[1].set_ylabel('Loss', fontsize=12)
    ax[1].legend(loc='upper left', fontsize=10)
    ax[1].grid(True)
    plt.tight_layout()
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"Learning curves saved to: {save_path}")

def draw_confusion_matrix(cf_matrix, sub, results_path, classes_labels):
    display_labels = classes_labels
    disp = ConfusionMatrixDisplay(confusion_matrix=cf_matrix,
                                display_labels=display_labels)
    disp.plot(cmap='Blues')
    disp.ax_.set_xticklabels(display_labels, rotation=12)
    plt.title('Confusion Matrix of Subject: ' + sub )
    plt.savefig(results_path + '/subject_' + sub + '.png')

def draw_performance_barChart(num_sub, metric, label):
    fig, ax = plt.subplots()
    x = list(range(1, num_sub+1))
    ax.bar(x, metric, 0.5, label=label)
    ax.set_ylabel(label)
    ax.set_xlabel("Subject")
    ax.set_xticks(x)
    ax.set_title('Model '+ label + ' per subject')
    ax.set_ylim([0,1])
    plt.savefig('performance_chart.png')

#%% Training
def train(dataset_conf, train_conf, results_path):
    in_exp = time.time()
    best_models = open(results_path + "/best models.txt", "w")
    log_write = open(results_path + "/log-F.txt", "w")
    perf_allRuns = open(results_path + "/perf_allRuns.npz", 'wb')

    dataset = dataset_conf.get('name')
    n_sub = dataset_conf.get('n_sub')
    data_path = dataset_conf.get('data_path')
    isStandard = dataset_conf.get('isStandard')
    LOSO = dataset_conf.get('LOSO')

    batch_size = train_conf.get('batch_size')
    epochs = train_conf.get('epochs')
    patience = train_conf.get('patience')
    lr = train_conf.get('lr')
    LearnCurves = train_conf.get('LearnCurves')
    n_train = train_conf.get('n_train')
    model_name = train_conf.get('model')

    acc = np.zeros((n_sub, n_train))
    kappa = np.zeros((n_sub, n_train))

    for sub in range(n_sub):
        in_sub = time.time()
        print('\nTraining on subject ', sub+1)
        log_write.write( '\nTraining on subject '+ str(sub+1) +'\n')
        BestSubjAcc = 0
        bestTrainingHistory = []

        X_train, _, y_train_onehot, X_test, _, y_test_onehot = get_data(
            data_path, sub, dataset, LOSO = LOSO, isStandard = isStandard)

        for train in range(n_train):
            tf.random.set_seed(train+1)
            np.random.seed(train+1)

            in_run = time.time()
            filepath = results_path + '/saved models/run-{}'.format(train+1)
            if not os.path.exists(filepath):
                os.makedirs(filepath)
            filepath = filepath + '/subject-{}.h5'.format(sub+1)

            model = getModel(model_name, dataset_conf)
            model.compile(loss=categorical_crossentropy, optimizer=Adam(learning_rate=lr), metrics=['accuracy'])

            callbacks = [
                ModelCheckpoint(filepath, monitor='val_accuracy', verbose=0,
                                save_best_only=True, save_weights_only=True, mode='max'),
                ReduceLROnPlateau(monitor="val_loss", factor=0.90, patience=20, verbose=1, min_lr=0.0001),
                EarlyStopping(monitor='val_accuracy', verbose=1, mode='max', patience=patience)
            ]
            history = model.fit(X_train, y_train_onehot, validation_data=(X_test, y_test_onehot),
                                epochs=epochs, batch_size=batch_size, callbacks=callbacks, verbose=0)

            model.load_weights(filepath)
            y_pred = model.predict(X_test).argmax(axis=-1)
            labels = y_test_onehot.argmax(axis=-1)
            acc[sub, train]  = accuracy_score(labels, y_pred)
            kappa[sub, train] = cohen_kappa_score(labels, y_pred)

            out_run = time.time()
            info = 'Subject: {}   Train no. {}   Time: {:.1f} m   '.format(sub+1, train+1, ((out_run-in_run)/60))
            info += 'Test_acc: {:.4f}   Test_kappa: {:.4f}'.format(acc[sub, train], kappa[sub, train])
            print(info)
            log_write.write(info +'\n')

            if(BestSubjAcc < acc[sub, train]):
                 BestSubjAcc = acc[sub, train]
                 bestTrainingHistory = history

        best_run = np.argmax(acc[sub,:])
        filepath = '/saved models/run-{}/subject-{}.h5'.format(best_run+1, sub+1)+'\n'
        best_models.write(filepath)

        out_sub = time.time()
        info = '----------\n'
        info += 'Subject: {}   best_run: {}   Time: {:.1f} m   '.format(sub+1, best_run+1, ((out_sub-in_sub)/60))
        info += 'acc: {:.4f}   avg_acc: {:.4f} +- {:.4f}   '.format(acc[sub, best_run], np.average(acc[sub, :]), acc[sub,:].std() )
        info += 'kappa: {:.4f}   avg_kappa: {:.4f} +- {:.4f}'.format(kappa[sub, best_run], np.average(kappa[sub, :]), kappa[sub,:].std())
        info += '\n----------'
        print(info)
        log_write.write(info+'\n')

        if (LearnCurves == True):
            print('Plot Learning Curves ....... ')
            draw_learning_curves(bestTrainingHistory)

    out_exp = time.time()
    info = '\nTime: {:.1f} h   '.format( (out_exp-in_exp)/(60*60) )
    print(info)
    log_write.write(info+'\n')

    np.savez(perf_allRuns, acc = acc, kappa = kappa)

    best_models.close()
    log_write.close()
    perf_allRuns.close()

#%% Evaluation
def test(model, dataset_conf, results_path, train_conf, allRuns=True):
    global all_features, all_labels  # 新增全局变量引用

    log_write = open(results_path + "/log-F.txt", "a")
    best_models = open(results_path + "/best models.txt", "r")

    dataset = dataset_conf.get('name')
    n_classes = dataset_conf.get('n_classes')
    n_sub = dataset_conf.get('n_sub')
    data_path = dataset_conf.get('data_path')
    isStandard = dataset_conf.get('isStandard')
    LOSO = dataset_conf.get('LOSO')
    classes_labels = dataset_conf.get('cl_labels')

    acc_bestRun = np.zeros(n_sub)
    kappa_bestRun = np.zeros(n_sub)
    cf_matrix = np.zeros([n_sub, n_classes, n_classes])

    if(allRuns):
        perf_allRuns = open(results_path + "/perf_allRuns.npz", 'rb')
        perf_arrays = np.load(perf_allRuns)
        acc_allRuns = perf_arrays['acc']
        kappa_allRuns = perf_arrays['kappa']

    # ========== 新增特征收集逻辑 ==========
    run_features = []
    run_labels = []
    # 修正后的循环次数获取方式
    n_train = train_conf.get('n_train', 10)  # 安全获取训练次数

    for sub in range(n_sub):
        _, _, _, X_test, _, y_test_onehot = get_data(data_path, sub, dataset, 'all', LOSO, isStandard)

        # 加载最佳模型
        filepath = best_models.readline()
        model.load_weights(results_path + filepath[:-1])

        # 提取最佳模型特征
        feature_layer = Model(inputs=model.input, outputs=model.layers[-2].output)
        test_features = feature_layer.predict(X_test)
        true_labels = y_test_onehot.argmax(axis=-1)
        draw_tsne(test_features, true_labels, str(sub + 1), results_path)

        # ========== 收集所有运行的特征 ==========
        for run in range(n_train):
            model_path = os.path.join(results_path, 'saved models',
                                      f'run-{run + 1}', f'subject-{sub + 1}.h5')
            model.load_weights(model_path)
            features = feature_layer.predict(X_test)
            run_features.append(features)
            run_labels.append(true_labels)

        # 原始评估逻辑
        y_pred = model.predict(X_test).argmax(axis=-1)
        labels = y_test_onehot.argmax(axis=-1)
        acc_bestRun[sub] = accuracy_score(labels, y_pred)
        kappa_bestRun[sub] = cohen_kappa_score(labels, y_pred)
        cf_matrix[sub, :, :] = confusion_matrix(labels, y_pred, normalize='true')
        draw_confusion_matrix(cf_matrix[sub, :, :], str(sub+1), results_path, classes_labels)

        info = 'Subject: {}   best_run: {:2}  '.format(sub+1, (filepath[filepath.find('run-')+4:filepath.find('/sub')]) )
        info += 'acc: {:.4f}   kappa: {:.4f}   '.format(acc_bestRun[sub], kappa_bestRun[sub] )
        if(allRuns):
            info += 'avg_acc: {:.4f} +- {:.4f}   avg_kappa: {:.4f} +- {:.4f}'.format(
                np.average(acc_allRuns[sub, :]), acc_allRuns[sub,:].std(),
                np.average(kappa_allRuns[sub, :]), kappa_allRuns[sub,:].std() )
        print(info)
        log_write.write('\n'+info)

    # ========== 新增全局可视化调用 ==========
    if run_features:
        draw_global_tsne(run_features, run_labels, results_path)
        all_features.extend(run_features)
        all_labels.extend(run_labels)

    info = '\nAverage of {} subjects - best runs:\nAccuracy = {:.4f}   Kappa = {:.4f}\n'.format(
        n_sub, np.average(acc_bestRun), np.average(kappa_bestRun))
    if(allRuns):
        info += '\nAverage of {} subjects x {} runs (average of {} experiments):\nAccuracy = {:.4f}   Kappa = {:.4f}'.format(
            n_sub, acc_allRuns.shape[1], (n_sub * acc_allRuns.shape[1]),
            np.average(acc_allRuns), np.average(kappa_allRuns))
    print(info)
    log_write.write(info)

    draw_performance_barChart(n_sub, acc_bestRun, 'Accuracy')
    draw_performance_barChart(n_sub, kappa_bestRun, 'K-score')
    draw_confusion_matrix(cf_matrix.mean(0), 'All', results_path, classes_labels)
    log_write.close()

#%% 保持模型定义不变
def getModel(model_name, dataset_conf):
    n_classes = dataset_conf.get('n_classes')
    n_channels = dataset_conf.get('n_channels')
    in_samples = dataset_conf.get('in_samples')

    if(model_name == 'ATCNet'):
        model = models.ATCNet_(
            n_classes = n_classes,
            in_chans = n_channels,
            in_samples = in_samples,
            n_windows = 5,
            attention = 'mha',
            eegn_F1 = 16,
            eegn_D = 2,
            eegn_kernelSize = 64,
            eegn_poolSize = 7,
            eegn_dropout = 0.3,
            tcn_depth = 2,
            tcn_kernelSize = 4,
            tcn_filters = 32,
            tcn_dropout = 0.3,
            tcn_activation='elu'
            )
    elif (model_name == 'ATCNetF'):
        model = models.ATCNet_F(
            n_classes=n_classes,
            in_chans=n_channels,
            in_samples=in_samples,
            n_windows=5,
            attention='mha',
            eegn_F1=16,
            eegn_D=2,
            eegn_kernelSize=64,
            eegn_poolSize=7,
            eegn_dropout=0.3,
            tcn_depth=2,
            tcn_kernelSize=4,
            tcn_filters=32,
            tcn_dropout=0.3,
            tcn_activation='elu'
        )
    elif (model_name == 'ATCNet1'):
        model = models.ATCNet_1(
            n_classes=n_classes,
            in_chans=n_channels,
            in_samples=in_samples,
            n_windows=5,
            attention='mha',
            eegn_F1=16,
            eegn_D=2,
            eegn_kernelSize=64,
            eegn_poolSize=7,
            eegn_dropout=0.3,
            tcn_depth=2,
            tcn_kernelSize=4,
            tcn_filters=32,
            tcn_dropout=0.3,
            tcn_activation='elu'
        )
    elif (model_name == 'ATCNetF1'):
        model = models.ATCNet_F1(
            n_classes=n_classes,
            in_chans=n_channels,
            in_samples=in_samples,
            n_windows=5,
            attention='mha',
            eegn_F1=16,
            eegn_D=2,
            eegn_kernelSize=64,
            eegn_poolSize=7,
            eegn_dropout=0.3,
            tcn_depth=2,
            tcn_kernelSize=4,
            tcn_filters=32,
            tcn_dropout=0.3,
            tcn_activation='elu'
        )
    elif (model_name == 'ATCNetF2'):
        model = models.ATCNet_F2(
            n_classes=n_classes,
            in_chans=n_channels,
            in_samples=in_samples,
            n_windows=5,
            attention='mha',
            eegn_F1=16,
            eegn_D=2,
            eegn_kernelSize=64,
            eegn_poolSize=7,
            eegn_dropout=0.3,
            tcn_depth=2,
            tcn_kernelSize=4,
            tcn_filters=32,
            tcn_dropout=0.3,
            tcn_activation='elu'
        )
    elif(model_name == 'TCNet_Fusion'):
        model = models.TCNet_Fusion(n_classes = n_classes, Chans=n_channels, Samples=in_samples)
    elif(model_name == 'EEGTCNet'):
        model = models.EEGTCNet(n_classes = n_classes, Chans=n_channels, Samples=in_samples)
    elif(model_name == 'EEGNet'):
        model = models.EEGNet_classifier(n_classes = n_classes, Chans=n_channels, Samples=in_samples)
    elif(model_name == 'EEGNeX'):
        model = models.EEGNeX_8_32(n_timesteps = in_samples , n_features = n_channels, n_outputs = n_classes)
    elif(model_name == 'DeepConvNet'):
        model = models.DeepConvNet(nb_classes = n_classes , Chans = n_channels, Samples = in_samples)
    elif(model_name == 'ShallowConvNet'):
        model = models.ShallowConvNet(nb_classes = n_classes , Chans = n_channels, Samples = in_samples)
    elif(model_name == 'MBEEG_SENet'):
        model = models.MBEEG_SENet(nb_classes = n_classes , Chans = n_channels, Samples = in_samples)
    else:
        raise Exception("'{}' model is not supported yet!".format(model_name))

    return model

def run():
    dataset = 'mydata'

    if dataset == 'BCI2a':
        in_samples = 1125
        n_channels = 22
        n_sub = 9
        n_classes = 4
        classes_labels = ['Left hand', 'Right hand','Foot','Tongue']
        data_path = os.path.expanduser('~') + '/BCI Competition IV/BCI Competition IV-2a/BCI Competition IV 2a mat/'
    elif dataset == 'HGD':
        in_samples = 1125
        n_channels = 44
        n_sub = 14
        n_classes = 4
        classes_labels = ['Right Hand', 'Left Hand','Rest','Feet']
        data_path = os.path.expanduser('~') + '/mne_data/MNE-schirrmeister2017-data/robintibor/high-gamma-dataset/raw/master/data/'
    elif dataset == 'CS2R':
        in_samples = 1125
        n_channels = 32
        n_sub = 18
        n_classes = 3
        classes_labels = ['Fingers', 'Wrist','Elbow']
        data_path = os.path.expanduser('~') + '/CS2R MI EEG dataset/all/EDF - Cleaned - phase one (remove extra runs)/two sessions/'
    else:
        in_samples = 1000
        n_channels = 20
        n_sub = 1
        n_classes = 3
        classes_labels = ['grasp', 'reaching', 'twist']
        data_path = 'data/'

    results_path = os.getcwd() + "/results"
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    dataset_conf = { 'name': dataset, 'n_classes': n_classes, 'cl_labels': classes_labels,
                    'n_sub': n_sub, 'n_channels': n_channels, 'in_samples': in_samples,
                    'data_path': data_path, 'isStandard': True, 'LOSO': False}
    train_conf = { 'batch_size': 64, 'epochs': 1000, 'patience': 400, 'lr': 0.001,
                  'LearnCurves': True, 'n_train': 10, 'model':'ATCNetF2'}

    train(dataset_conf, train_conf, results_path)
    model = getModel(train_conf.get('model'), dataset_conf)
    test(model, dataset_conf, results_path, train_conf)

if __name__ == "__main__":
    run()
    print('Over...')