# -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 22:50:03 2023

@author: xtp

"""

from hyperimage import HyperImage
from smalltools import Files, Table, Tools
from dataprocess import DataLoad, PreProcessing, FeatureSelection, QualitativeAnalysis
from multiprocessing import cpu_count
import concurrent.futures
from tqdm import tqdm
import pandas as pd
import warnings
import os

warnings.filterwarnings("ignore")

def Outlier_Dataload(dataset, path):
    
    name = dataset[0]
    df = dataset[1]
    folder = path + name[0] + '/Pre-processing/'
    Files.mkdirPath(folder)
    
    df_train, df_test = DataLoad.outlier_dataload(df, check_threshold=3.5,
        check_band_num=5, split_method='ks', path=folder+name[1]+'_'+name[2])
    
    return [name, (df_train, df_test)]

def Preprocess_Feature(dataset, methods, path):

    name, data = dataset
    label, method = methods
    folder = path + name + '/Pre-processing/'
    Files.mkdirPath(folder)
    
    # PreProcess
    data_copy = data.copy(deep=True)
    preprocess = PreProcessing(data_copy)
    processed = preprocess.PreProcess(methods=method, path=folder)

    # FeatureSelection: Train
    featureselect = FeatureSelection(processed)
    mr = featureselect.Select(method='MRMR', 
                              path=folder+label+'_', return2data=False)
    
    pca = featureselect.Select(method='PCA', return2data=True)[0]
    pca_rf = featureselect.Select(method='ReliefF', 
                                  path=folder+label+'_')
    
    mr = mr.sort_index(axis=1, level=0, ascending=False)
    pca_rf = pca_rf.sort_index(axis=1, level=0, ascending=False)

    return [[(name, label, 'MRMR'), mr], 
            [(name, label, 'PCA_RelieF'), pca_rf]]

def Modeling_Evaluation(data, path):
    
    filename, preprocess, select = data[0]
    df_train, df_test = data[1]
    diff_list = Tools.diff_list(len(df_train.columns), step=5)
    folder = path + filename + '/Modeling/' + preprocess + '/' + select + '/'
    Files.mkdirPath(folder)
    
    df_frame = []
    y_labels_dict = {}
    
    # # BayesSearch # #
    for method in ['SVM','RF']:
        result_features_BO = []
        for n_features in diff_list:
            df_train = df_train.iloc[:, :n_features]
            df_test = df_test.iloc[:, :n_features]
            qa = QualitativeAnalysis(df_train, df_test, method=method)
            estimator = qa.getEstimator()
            spaces = qa.getSpaces()
            params = qa.getParams()
            result_BO, best_model_BO, y_pred_test_BO, best_acc_train_BO, \
                best_acc_test_BO = qa.Bayesopt(init_points=10, n_iter=50)
            result_feature_BO = [n_features, result_BO, best_model_BO, 
                y_pred_test_BO, best_acc_train_BO, best_acc_test_BO]
            result_features_BO.append(result_feature_BO)
        result_features_BO_df = pd.DataFrame(result_features_BO, 
            columns=['n_features', 'result', 'best_model', 'y_pred_test', 
                      'best_acc_train', 'best_acc_test']).sort_values(
            by='best_acc_train', ascending=False).reset_index(drop=True)
        best_y_pred_BO = result_features_BO_df.loc[0, 'y_pred_test'].tolist()
        best_result_BO = result_features_BO_df.loc[0, 'result']
        best_result_BO.to_csv(folder+method+'_BayesSearch_best_result.csv')
        qa.Bayesopt_visualize(best_result_BO, path=folder+method+'_')
        y_test_BO = qa.y_test.tolist()
        y_labels_BO = {'y_test': y_test_BO, 'best_y_pred': best_y_pred_BO}
        y_labels_dict.update({method + '_BayesSearch': y_labels_BO})
        best = result_features_BO_df.loc[0, :]
        best.drop(['result','y_pred_test'], inplace=True)
        my_index = pd.MultiIndex.from_tuples(
                    [(filename, preprocess, select, method, 'BayesSearch')], 
                    names = ['Times', 'Preprocess', 'FeatureSelection', 
                    'ClassificationMethod', 'HyperparameterOptimization'])
        df = pd.DataFrame([best], index=my_index)
        df_frame.append(df)

    # # GridSearch # #
    for method in ['PLS-DA']:
        result_features_G = []
        for n_features in diff_list:
            df_train = df_train.iloc[:, :n_features]
            df_test = df_test.iloc[:, :n_features]
            qa = QualitativeAnalysis(df_train, df_test, method=method)
            estimator = qa.getEstimator()
            spaces = qa.getSpaces()
            params = qa.getParams()
            result_G, best_model_G, y_pred_test_G, best_acc_train_G, \
                best_acc_test_G = qa.GridsearchCV(cv=10)
            result_feature_G = [n_features, result_G, best_model_G, 
                y_pred_test_G, best_acc_train_G, best_acc_test_G]
            result_features_G.append(result_feature_G)
        result_features_G_df = pd.DataFrame(result_features_G, 
            columns=['n_features', 'result', 'best_model', 'y_pred_test', 
                      'best_acc_train', 'best_acc_test']).sort_values(
            by='best_acc_train', ascending=False).reset_index(drop=True)
        best_y_pred_G = result_features_G_df.loc[0, 'y_pred_test'].tolist()
        best_result_G = qa.GridsearchCV_test(result_features_G_df.loc[0,'result'])
        best_result_G.to_csv(folder+method+'_GridSearch_best_result.csv')
        qa.GridsearchCV_visualize(best_result_G, path=folder+method+'_')
        y_test_G = qa.y_test.tolist()
        y_labels_G = {'y_test': y_test_G, 'best_y_pred': best_y_pred_G}
        y_labels_dict.update({method + '_GridSearch': y_labels_G})
        best = result_features_G_df.loc[0, :]
        best.drop(['result','y_pred_test'], inplace=True)
        my_index = pd.MultiIndex.from_tuples(
                    [(filename, preprocess, select, method, 'GridSearch')], 
                    names = ['Times', 'Preprocess', 'FeatureSelection', 
                    'ClassificationMethod', 'HyperparameterOptimization'])
        df = pd.DataFrame([best], index=my_index)
        df_frame.append(df)

    # # Evaluation # #
    y_labels = list(y_labels_dict.items())
    HyperImage.saveData2Json(y_labels, path=folder + 'y_labels.json')
    for (method, y_labels) in y_labels:
        y_test = y_labels['y_test']
        best_y_pred = y_labels['best_y_pred']
        report = QualitativeAnalysis.report(y_test, best_y_pred, 
                                        path=folder + method + '_')
        matrix = qa.plot_confusion_matrix(y_test, best_y_pred, 
                                          path=folder + method + '_')
        QualitativeAnalysis.multiClassification_PR(y_test, best_y_pred, 
                                            path=folder + method+'_')
        QualitativeAnalysis.multiClassification_ROC(y_test, best_y_pred, 
                                            path=folder + method+'_')
    
    return df_frame


def Multiprocessing():
    
    cores = cpu_count()
    
    disk = os.path.dirname(__file__)[0]
    path_processing = disk + ':/Python/Processing/'
    ext_processing = ['*.hdr']
    path_result = disk + ':/Python/Result2/'
    ext_result = ['*.csv']
    Files.mkdirPath(path_result)
    
    ''' Preprocessing and FeatureEngineering '''
    dataset = []
    process = Files(path_processing, ext_result, flag='all')
    for path, name in zip(process.filesWithPath, process.filesNoExt):
        table_processing = Table.read(path, header=0, index_col=[0], sep=',')
        df = table_processing.DataFrame.sort_index()
        dataset.append([name, df])
    path = [path_result]*len(dataset)
    preprocess = [['D1', ['SG','MSC','D1']], ['D1-SS', ['SG','MSC','D1','SS']],
        ['D2', ['SG','MSC','D1','D2']], ['D2-SS', ['SG','MSC','D1','D2','SS']]]
    pre = [method for method in preprocess]*len(dataset)
    res = [df for df in dataset for i in range(len(preprocess))]
    path = [path_result]*len(res)
    with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
        results1 = list(tqdm(executor.map(Preprocess_Feature,res,pre,path), 
                        position=0, ncols=80, leave=True, total=len(res), 
                        desc='Preprocess & Features', colour='red'))
    results_list1 = []
    [results_list1.extend(result) for result in results1]

    """ OutlierCheck and DataLoad """
    path = [path_result]*len(results_list1)
    with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
        results2 = list(tqdm(executor.map(Outlier_Dataload,results_list1,path), 
                position=0, ncols=80, leave=True, total=len(results_list1), 
                desc='OuterCheck & DataLoad', colour='blue'))
    results_list2 = [result for result in results2]
    
    ''' Modeling and Evaluation '''
    path = [path_result]*len(results_list2)
    with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
        results3 = list(tqdm(executor.map(Modeling_Evaluation,results_list2,path),
                    position=0, ncols=80, leave=True, total=len(results_list2), 
                    desc='Modeling & Evaluation', colour='green'))
    results_list3 = []
    [results_list3.extend(result) for result in results3]
    Table.concat(results_list3, axis=0, reset_index=True, fitWidth=True, 
                  path=path_result + 'Result_Summary.xlsx')

    
if __name__ == '__main__':
    Multiprocessing()
    
    
    
    
    