# -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 16:56:50 2022

@author: xtp

"""
# %% Preparation
# =============================================================================
#                      Description, Modules, Path Statement
# =============================================================================

'''
    "Path Statement": Prepare a folder named "Processing" and place the
hyperspectral image files or pending data in the folder. 

    "HyperImage Processing": Before "HyperImage Processing", place the pending 
folders in the "Processing" folder, which containing hyperspectral data, 
whiteboard data, blackboard data, standard board data, and Standard Board files
for the batch.

    "Spectral Analysis": Before processing the "Spectral Analysis", place 
the merged spectral data in the "Processing" folder and modify the filename 
as required.

    "Spectral Modeling": Before processing the "Spectral Modeling", place 
the original spectral data in the "Processing" folder and modify the filename 
as required, where merged data is named "All".

'''

from hyperimage import HyperImage
from smalltools import Files, Table, Tools
from dataprocess import DataLoad, PreProcessing, FeatureSelection, QualitativeAnalysis
from datanalysis import Analysis
from tqdm import tqdm
import pandas as pd
import warnings

warnings.filterwarnings("ignore")

path_processing = 'D:/Data/2023-Herbicide/Processing/'
ext_processing = ['*.hdr']
path_result = 'D:/Data/2023-Herbicide/Result/'
ext_result = ['*.csv']
Files.mkdirPath(path_result)

# %% HyperImage Processing
# =============================================================================
#                Spectral Calibration & Extraction, Data Merging
# =============================================================================

""" Spectral Calibration and Extraction """

leavesnumber_list = []
process_1 = Files(path_processing, ext_processing, flag ='all')
folder0_list = list(set(process_1.folderName0))
folder0_list.sort()
process1 = tqdm(enumerate(folder0_list), position=0, ncols=80, leave=True, 
                total=len(folder0_list), desc='Total progress', colour='green')

for i, folder0 in process1:
    
    leavesnumber_list = []
    path_result_folders = path_result + folder0 + '/'
    Files.mkdirPath(path_result_folders)
    
    # # Calibration1 # #
    grey = HyperImage.readENVIdata(path_processing + folder0 + '/grey.hdr')
    grey_crop = grey.crop2(th=0.3, denoisSize=0, denoiseIter=1,
        contourThresh=[0,0], path = path_result_folders + 'grey_crop.png')      # Set extraction parameters of the board.
    refcestedboard = path_processing + folder0 + '/refceStedBoard.txt'
    
    # # # Calibration2 # #
    # white = HyperImage.readENVIdata(path_processing + folder0 + '/white.hdr')
    # white_crop = white.crop2(th=0.3, denoisSize=0, denoiseIter=1,
    #     contourThresh=[0,0], path = path_result_folders + '/white_crop.png')    # Set extraction parameters of the board.
    # black = HyperImage.readENVIdata(path_processing + folder0 + '/black.hdr')
    
    process_2 = Files(path_processing + folder0, ext_processing, flag ='all')
    process1_1 = tqdm(zip(process_2.filesWithPath, process_2.filesNoExt), 
        position=0, ncols=80, leave=False, total=len(process_2.folderName0)-1, 
        desc='HyperImage Processing', colour='red')
    for path, name in process1_1:
        if name != 'black' and name !='grey' and name !='white':
            img = HyperImage.readENVIdata(path)
            path_result_files = path_result_folders + name + '/'
            Files.mkdirPath(path_result_files)

            # # Calibration1 # #
            calibrated = HyperImage.caliColor(img.hyperImg, grey_crop[0],
                                      img.wvs, refcestedboard, imgingtype=0)
            # # Calibration2 # #
            # calibrated = HyperImage.caliColor(img.hyperImg, white_crop[0],
            #                         img.wvs, black.hyperImg, imgingtype=1)
    
            rededge_seg = calibrated.segRedEdge(th=0.4, denoisSize=10,
                                    denoiseIter=1, contourThresh=[4000,0],
                        path = path_result_files+ '/segmentation_rededge.png')  # Set leaf segmentation parameters (Convolution kernel size in "segRedEdge").
    
            HyperImage.saveData2Json(calibrated.wvs,
                            path = path_result_folders +'wavelength_list.json')
            
            # # Whole image spectrum # #
            # average_spectrum = calibrated.getMean(calibrated.makedImg,
            #                                       axis=(0,1), meantype=0,
            #                    path = path_result_files + '/spectrum.csv')
            
            # # Segmentation by contour # #
            average_spectrum_seg = calibrated.leavesSegmentation(
                                    calibrated.hyperImg, calibrated.mask,
                                    threshold=500, path=path_result_files)      # Set leaf contours selection parameters.
            leavesnumber = tuple([name, average_spectrum_seg[1]])
            leavesnumber_list.append(leavesnumber)
    
            # leafparameters_seg = calibrated.getLeafParaSeg(
            #                average_spectrum_seg, path = path_result_files)
            # leafparameters = calibrated.getLeafPara(average_spectrum,
            #                  path = path_result_files + '/parameters.csv')
    
            display_RGB = img.getRGB(path=path_result_files+'/image_RGB.png')
    
    Table.data2excel(leavesnumber_list,
                     path = path_result_folders + 'leavesnumber.xlsx')


    """ Spectral Data Merging """
    
    spectrum = Table.readFile2Table(path_result_folders, ext_result,
                    flag ='all', header=None, index_col=None, sep=',',
                    skiprows=[], feature_name='WavList', dropindex=True)

    spectrum_tab = Table.multiIndexExtract(spectrum.DataFrame,
                        slices = [[0,7], [1,2], [3,5], [-1]],
            names = ['Filename','Concentration','Varieties','Repetition'])      # Reset table index: Set the slices and name of different factors.
    spectrum = Table.resetTableColumns(spectrum_tab, new_columns = calibrated.wvs)
    spectrum_tab = spectrum.DataFrame
    
    PreProcessing.Draw(calibrated.wvs, spectrum_tab, path=path_result_folders)
    Table.write(spectrum_tab, path=path_result_folders + folder0 + '.csv')
    
    print(' —— loop ' + str(i+1) + '\n')


# %% Spectra-Indicator Merge

import numpy as np
indicator_frame = []
spectra_frame = []
process = Files(path_processing, ext_result, flag = 'all')
process1 = list(zip(process.filesWithPath, process.filesNoExt))

for path, name in process1:
    if 'indicator' in name:
        df_indicator = pd.read_table(path, header=0, index_col=0, sep=',')
        indicator_frame.append({name: df_indicator})
    else:
        df_spectra = pd.read_table(path, header=0, index_col=[0,1,2,3], sep=',')
        spectra_frame.append({name: df_spectra})
        
for name_spectra in spectra_frame:
    name1 = list(name_spectra.keys())[0]
    spectra = list(name_spectra.values())[0]
    for name_indicator in indicator_frame:
        name2 = list(name_indicator.keys())[0]
        if name1 in name2:
            indicator = list(name_indicator.values())[0]
        else:
            pass
    filename_index = spectra.index.get_level_values(0)
    indicator_new = pd.DataFrame(index=filename_index, columns=indicator.columns)
    for index in filename_index:
        indicator_origin = pd.DataFrame(df_indicator.loc[index]).T
        if type(indicator_new.loc[index]) == pd.Series:
            indicator_multi = pd.DataFrame(np.repeat(indicator_origin.values, 1, axis=0),
                index=list(indicator_origin.index), columns=indicator_origin.columns)
        else:
            indicator_multi = pd.DataFrame(np.repeat(
                indicator_origin.values, len(indicator_new.loc[index]), axis=0),
                index=list(indicator_origin.index)*len(indicator_new.loc[index]),
                columns=indicator_origin.columns)
        indicator_new.loc[index] = indicator_multi.loc[index]
    indicator_new.index = spectra.index
    indicator_new.set_index(['GSH', 'MDA', 'SPAD', 'WC'], append=True, inplace=True)
    spectra.index = indicator_new.index
    spectra.to_csv(path_result + name1 + '.csv')


# %% Spectral Analysis
# =============================================================================
#              Spectral Classification, Preprocess, ANOVA Analysis
# =============================================================================

""" Spectral Classification, Preprocessing and ANOVA by factor level """

process = Files(path_processing, ext_result, flag = 'all')
process1 = tqdm(zip(process.filesWithPath, process.filesNoExt), 
        position=0, ncols=80, leave=True, total=len(process.filesWithPath), 
        desc='Total progress', colour='green')
for path, name in process1:
    path_result_folders = path_result + name + '/'
    Files.mkdirPath(path_result_folders)

    # Method1(Resistant)  Iterate, DataFilter, Preprocessing, ANOVA # #
    table_processing = Table.read(path,header=0,index_col=[0,1,2,3,4,5,6,7],sep=',')
    df_frame_cctn = table_processing.factorIterate(factor = 'Concentration')
    for fn, df in zip(df_frame_cctn[0], df_frame_cctn[1]):
        path_result_files = path_result_folders + str(fn) + '/'
        Files.mkdirPath(path_result_files)
        df = Table.resetIndex(df, level=['Filename','Concentration',
                                              'Repetition']).DataFrame
        df_S = df.loc[['M1','S2'], :]                                           # Filter varieties.
        df_R = df.loc[['R1','R2'], :]                                           # Filter varieties.
        df_S = Table.addIndex(df_S, index='0', class_name='Class')
        df_R = Table.addIndex(df_R, index='1', class_name='Class')
        df = Table.concat([df_S, df_R], axis=0).DataFrame
        df_copy = df.reset_index(level=[0,1,2,3,4], drop=True)   # Without Varieties and Indicator
        df = df.reorder_levels([5,0,1,2,3,4], axis=0)
        df.to_csv(path_result_files + 'All_Original.csv')
        
        # all data preprocess #
        preprocess = PreProcessing(df_copy)
        processed_all = preprocess.PreProcess(methods=['SG','MSC','D1'],
                        return2data=True, path=path_result_files + 'All_')      # Set pre-process methods.
        # mean data preprocess #
        df_mean = Table.groupBy(df_copy, mode='mean')
        preprocess = PreProcessing(df_mean)
        processed_mean = preprocess.PreProcess(methods=['SG','MSC','D1'],
                      return2data=True, path=path_result_files + 'Mean_')       # Set pre-process methods.

        # Significance test of Maximum Difference Spectral #
        diff_sort = Tools.max_diff(processed_mean, scatternumber=10)            # Set scatter number ploted in curve.

        wavelength_select = processed_all.loc[:,
                                    [diff_sort[i] for i in [0,1,2]]]            # Set the selected wavelength number.

        df = Table.addname(wavelength_select, ['Class'], ['Wavelength']).DataFrame
        df_frame1 = Table.columnIterate(df, columns_label='Wavelength')[1]
        anova_oneway_result_list = []
        for df in df_frame1:
            df_frame2 = Table.columnIterate(df.T, columns_label='Class')[1]
            df_frame2 = [Table.resetIndex(i).DataFrame for i in df_frame2]
            df_table = Table.concat(df_frame2, axis=1).DataFrame
            Analysis.plotNumericBox(df_table, figsize=(7,8),
                            testmethod='Mann-Whitney', testitem='All')          # Set Boxplot test method.
            
            df_anova = df.reset_index()
            anova_oneway = Analysis.ANOVA_ONEWAY(df_anova)
            result = {key: anova_oneway[key] for key
                      in anova_oneway.keys() if key in ['F','p_value']}         # Set the anova result to be saved.
            label = df_anova.columns[1]
            dict_tuple = tuple([label, result])
            anova_oneway_result_list.append(dict_tuple)
        anova_oneway_result_dict = dict(anova_oneway_result_list)
        HyperImage.saveData2Json(anova_oneway_result_dict,
                      path = path_result_files +'anova_oneway_result.json')
        
df_frame = []
result = Files(path_result, ['All_Original.csv'], flag ='all')
result.sortFilesName(reg=(0,0))
for folder1 in list(set(result.folderName1)):
    path_result_folders = path_result + folder1 + '/'
    df = Table.readFile2Table3(path_result_folders, ['All_Original.csv'],
                flag ='all', header=0, index_col=[0,1], sep=',', skiprows=[],
                feature_name='Class', dropindex=False).DataFrame
    df = Table.addIndex(df, index=folder1, class_name='Time', drop=False)
    df = df.set_index(['Class', 'Varieties', 'GSH', 'MDA', 'SPAD', 'WC'], append=True)
    df = df.reorder_levels([1,0,2,3,4,5,6,7], axis=0)
    Table.write(df, path = path_result_folders + 'Original_merge.csv')
    PreProcessing.Draw(df.columns, df, path = path_result_folders + 'Original_')
    df_frame.append(df)
df_merge = Table.concat(df_frame, axis=0).DataFrame.sort_index()
Table.write(df_merge, path = path_result + 'Original_merge.csv')
    

    # # # Method2(Toxicity)  Index Importing, Preprocessing, ANOVA # #
    # path_result_files = path_result_folders
    # class_index_int = Table.read(path_processing + 'cluster_3.txt'
    #                           ).DataFrame.values.reshape(1,-1)[0].tolist()
    # class_index_str = [str(x) for x in class_index_int]
    # table_processing = Table.read(path,header=0,index_col=[0,1,2,3],sep=',')
    # df_frame_org = table_processing.factorIterate(factor='Filename')
    # df_frame_new = []
    # for i, df in enumerate(df_frame_org[1]):                                    # Import new index.
    #     df = df.reset_index(level=[1,2,3], drop=True)
    #     df = Table.addIndex(df, index=class_index_str[i], class_name='Class')
    #     df_frame_new.append(df)
    # df = Table.concat(df_frame_new, axis=0).DataFrame
    # df = Table.resetIndex(df, level=0).DataFrame
    # table_processing = Table.resetTableIndex(df, flags_list=[[0,1]],
    #                     filter_list=[['0','1','2','3']],
    #                     new_index_list=[['CK','Grade II','Grade III','Grade I']],
    #                     class_name_list=['Class'], dropindex=True)              # Reset new index.
    # table_processing.factorIterate(factor='Class')
    # df = table_processing.DataFrame
    # df.to_csv(path_result_files + 'All_Original.csv')

    # # all data preprocess #
    # preprocess = PreProcessing(df)
    # processed_all = preprocess.PreProcess(methods=['SG','MSC','D2'],
    #                     return2data=True, path=path_result_files + 'All_')      # Set pre-process methods.
    # # mean data preprocess #
    # df_mean = Table.groupBy(df, mode='mean')
    # preprocess = PreProcessing(df_mean)
    # processed_mean = preprocess.PreProcess(methods=['SG'],
    #                   return2data=True, path=path_result_files + 'Mean_')       # Set pre-process methods.
    # '''
    # # Significance test of Maximum Difference Spectral #
    # diff_sort = Tools.max_diff(processed_mean, scatternumber=10, 
    #                             path=path_result_files)                          # Set scatter number ploted in curve.

    # wavelength_select = processed_all.loc[:,
    #                                 [diff_sort[i] for i in [0,1,2]]]            # Set the selected wavelength number.

    # df = Table.addname(wavelength_select, ['Class'], ['Wavelength']).DataFrame
    # df_frame1 = Table.columnIterate(df, columns_label='Wavelength')[1]
    # anova_oneway_result_list = []
    # for df in df_frame1:
    #     df_frame2 = Table.columnIterate(df.T, columns_label='Class')[1]
    #     df_frame2 = [Table.resetIndex(i).DataFrame for i in df_frame2]
    #     df_table = Table.concat(df_frame2, axis=1).DataFrame
    #     Analysis.plotNumericBox(df_table, testmethod='Mann-Whitney',
    #         testitem='All', title=name + ': ' + str(df.columns.tolist()[0]))    # Set Boxplot test method.

    #     df_anova = df.reset_index()
    #     anova_oneway = Analysis.ANOVA_ONEWAY(df_anova, verbose=False)
    #     result = {key: anova_oneway[key] for key
    #               in anova_oneway.keys() if key in ['F','p_value']}             # Set the data to be saved.
    #     label = df_anova.columns[1]
    #     dict_tuple = tuple([label, result])
    #     anova_oneway_result_list.append(dict_tuple)
    # anova_oneway_result_dict = dict(anova_oneway_result_list)
    # HyperImage.saveData2Json(anova_oneway_result_dict,
    #                 path = path_result_files +'anova_oneway_result.json')
    # '''
    
    # df = Table.readFile2Table3(path_result, ['All_Original.csv'],
    #             flag ='all', header=0, index_col=0, sep=',', skiprows=[],
    #             feature_name='Class', dropindex=False).DataFrame
    # df.index.name = 'Time'
    # df = df.set_index('Class', append=True).sort_index()
    # Table.write(df, path = path_result + 'All_Original_merge.csv')
    # PreProcessing.Draw(df.columns, df, path=path_result + 'All_Original_merge_')



# %% bands_cut
Tools.bands_cut(path_processing, ext_result)
# %% Spectral Modeling (discard)
# =============================================================================
# OutlierCheck & DataLoad, Preprocess & FeatureSelection, Modeling & Evaluation
# =============================================================================

result_summary_0 = []
process = Files(path_processing, ext_result, flag='all')
process1 = tqdm(zip(range(len(process.filesWithPath)), process.filesWithPath, 
                process.filesNoExt), position=0, ncols=80, leave=True, 
    total=len(process.filesWithPath), desc='Total progress', colour='green')
for i, path, name in process1:
    path_result_files = path_result + name
    folder1 = path_result_files + '/Pre-processing/'
    folder2 = path_result_files + '/Modeling/'
    for folder in [folder1, folder2]:
        Files.mkdirPath(folder)

    """ Spectral Modeling Pre-processing """

    if 'All' in name:
        table_processing = Table.read(path, header=0, index_col=[0, 1], sep=',')
        table_processing = Table.resetIndex(table_processing.DataFrame)
    else:
        table_processing = Table.read(path, header=0, index_col=[0], sep=',')
    df_all = table_processing.DataFrame.sort_index()

    """ Outlier Check and DataLoad """
    df_train, df_test = DataLoad.outlier_dataload(df_all, check_threshold=3.5,
                check_band_num=5, split_method='ks', path=folder1+'Outlier')

    ''' Preprocess and Feature Selection '''
    dataset = {}
    methods = {'D1': ['SG','MSC','D1'], 'D1-SS': ['SG','MSC','D1','SS'],
        'D2': ['SG','MSC','D1','D2'], 'D2-SS': ['SG','MSC','D1','D2','SS']}
    process1_1 = tqdm(methods.items(), position=0, ncols=80, leave=False, 
        total=len(methods), desc='Preprocess & Feature_Selection', colour='red')
    for label, method in methods.items():
        
        # PreProcess
        df_train_copy = df_train.copy(deep=True)
        df_test_copy = df_test.copy(deep=True)
        preprocess_train = PreProcessing(df_train_copy)
        preprocess_test = PreProcessing(df_test_copy)
        processed_train = preprocess_train.PreProcess(methods=method, 
                                                      path=folder1+'Train_')
        processed_test = preprocess_test.PreProcess(methods=method, 
                                                    path=folder1+'Test_')

        # FeatureSelection: Train
        featureselect = FeatureSelection(processed_train)
        train_mr = featureselect.Select(method='MRMR', 
                        path=folder1+'Train_'+label+'_', return2data=False)
        
        pca = featureselect.Select(method='PCA', return2data=True)[0]
        train_pca_rf = featureselect.Select(method='ReliefF', 
                                            path=folder1+'Train_'+label+'_')

        # FeatureSelection: Test
        test_mr = processed_test.copy()
        test_mr.index.name = 'Class'
        test_mr.columns = train_mr.columns
        test_mr = Table.write(test_mr, folder1+'Test_'+label+'_Selected_MRMR.csv')

        Test_pca = FeatureSelection(processed_test)
        test_pca = pca.fit_transform(Test_pca.Data)
        test_pca_rf = Table.dataframe(test_pca, Test_pca.Label)
        test_pca_rf.index.name = 'Class'
        test_pca_rf.columns = train_pca_rf.columns
        test_pca_rf = Table.write(test_pca_rf, 
                                  folder1+'Test_'+label+'_Selected_ReliefF.csv')

        train_mr = train_mr.sort_index(axis=1, level=0, ascending=False)
        test_mr = test_mr.sort_index(axis=1, level=0, ascending=False)
        train_pca_rf = train_pca_rf.sort_index(axis=1, level=0, ascending=False)
        test_pca_rf = test_pca_rf.sort_index(axis=1, level=0, ascending=False)

        data = {label+'-MRMR': [train_mr, test_mr],
                label+'-PCA_RelieF': [train_pca_rf, test_pca_rf]}
        dataset.update(data)


    ''' Modeling and Evaluation (BayesSearch, GridSearch) '''

    result_summary_1 = []
    process1_2 = tqdm(dataset.items(), position=0, ncols=80, leave=False, 
        total=len(dataset.items()), desc='Modeling & Evaluation', colour='red')
    for subfolder, [df_train, df_test] in process1_2:
        diff_list = Tools.diff_list(len(df_train.columns), step=5)
        folder2_sub = folder2 + subfolder + '/'
        Files.mkdirPath(folder2_sub)

        result_summary_2 = []
        
        y_labels_BO_dict = {}
        # # BayesSearch # #
        method_list_BO = ['SVM', 'RF']
        process1_2_1 = tqdm(method_list_BO, position=0, ncols=80, leave=False, 
            total=len(method_list_BO), desc='BayesSearch: '+subfolder, colour='blue')
        for method in process1_2_1:
            result_features_BO = []
            process1_2_1_1 = tqdm(diff_list, position=0, ncols=80, leave=False, 
                total=len(diff_list), desc='BayesSearch: '+method, colour='cyan')
            for n_features in process1_2_1_1:
                df_train = df_train.iloc[:, :n_features]
                df_test = df_test.iloc[:, :n_features]
                qa = QualitativeAnalysis(df_train, df_test, method=method)
                estimator = qa.getEstimator()
                spaces = qa.getSpaces()
                params = qa.getParams()
                result_BO, best_model_BO, y_pred_test_BO, best_acc_train_BO, \
                    best_acc_test_BO = qa.Bayesopt(init_points=10, n_iter=50)
                result_feature_BO = [n_features, result_BO, best_model_BO, 
                    y_pred_test_BO, best_acc_train_BO, best_acc_test_BO]
                result_features_BO.append(result_feature_BO)
            result_features_BO_df = pd.DataFrame(result_features_BO, 
                columns=['n_features', 'result', 'best_model', 'y_pred_test', 
                         'best_acc_train', 'best_acc_test']).sort_values(
                by='best_acc_train', ascending=False).reset_index(drop=True)
            best_y_pred_BO = result_features_BO_df.loc[0, 'y_pred_test'].tolist()
            best_result_BO = result_features_BO_df.loc[0, 'result']
            best_result_BO.to_csv(folder2_sub+method+'_BayesSearch_best_result.csv')
            qa.Bayesopt_visualize(best_result_BO, path=folder2_sub+method+'_')
            y_test_BO = qa.y_test.tolist()
            y_labels_BO = {'y_test': y_test_BO, 'best_y_pred': best_y_pred_BO}
            y_labels_BO_dict.update({method + '_BayesSearch': y_labels_BO})
            best = result_features_BO_df.loc[0, :]
            best.drop(['result','y_pred_test'], inplace=True)
            my_index = pd.MultiIndex.from_tuples([(name, subfolder, method, 'BayesSearch')], 
                names = ['times', 'FeatureSelection', 'ClassificationMethod', 'HyperparameterOptimization'])
            df = pd.DataFrame([best], index=my_index)
            result_summary_2.append(df)

        y_labels_G_dict = {}
        # # GridSearch # #
        method_list_G = ['PLS-DA']
        process1_2_2 = tqdm(method_list_G, position=0, ncols=80, leave=False,
            total=len(method_list_G), desc='GridSearch', colour='blue')
        for method in process1_2_2:
            result_features_G = []
            process1_2_2_1 = tqdm(diff_list, position=0, ncols=80, leave=False, 
                total=len(diff_list), desc='GridSearch: '+method, colour='cyan')
            for n_features in process1_2_2_1:
                df_train = df_train.iloc[:, :n_features]
                df_test = df_test.iloc[:, :n_features]
                qa = QualitativeAnalysis(df_train, df_test, method=method)
                estimator = qa.getEstimator()
                spaces = qa.getSpaces()
                params = qa.getParams()
                result_G, best_model_G, y_pred_test_G, best_acc_train_G, \
                    best_acc_test_G = qa.GridsearchCV(cv=10)
                result_feature_G = [n_features, result_G, best_model_G, 
                    y_pred_test_G, best_acc_train_G, best_acc_test_G]
                result_features_G.append(result_feature_G)
            result_features_G_df = pd.DataFrame(result_features_G, 
                columns=['n_features', 'result', 'best_model', 'y_pred_test', 
                         'best_acc_train', 'best_acc_test']).sort_values(
                by='best_acc_train', ascending=False).reset_index(drop=True)
            best_y_pred_G = result_features_G_df.loc[0, 'y_pred_test'].tolist()
            best_result_G = qa.GridsearchCV_test(result_features_G_df.loc[0, 'result'])
            best_result_G.to_csv(folder2_sub + method + '_GridSearch_best_result.csv')
            qa.GridsearchCV_visualize(best_result_G, path=folder2_sub + method + '_')
            y_test_G = qa.y_test.tolist()
            y_labels_G = {'y_test': y_test_G, 'best_y_pred': best_y_pred_G}
            y_labels_G_dict.update({method + '_GridSearch': y_labels_G})
            best = result_features_G_df.loc[0, :]
            best.drop(['result','y_pred_test'], inplace=True)
            my_index = pd.MultiIndex.from_tuples([(name, subfolder, method, 'GridSearch')], 
                names = ['times', 'FeatureSelection', 'ClassificationMethod', 'HyperparameterOptimization'])
            df = pd.DataFrame([best], index=my_index)
            result_summary_2.append(df)
            
        result_summary_1.extend(result_summary_2)
        
        y_labels = list(y_labels_BO_dict.items()) + list(y_labels_G_dict.items())
        HyperImage.saveData2Json(y_labels, path=folder2_sub + 'y_labels.json')
        process1_2_3 = tqdm(y_labels, position=0, ncols=80, leave=False, 
            total=len(y_labels), desc='Model Evaluation', colour='magenta')
        for (method, y_labels) in process1_2_3:
            y_test = y_labels['y_test']
            best_y_pred = y_labels['best_y_pred']
            report = QualitativeAnalysis.report(y_test, best_y_pred, 
                                            path=folder2_sub + method + '_')
            matrix = qa.plot_confusion_matrix(y_test, best_y_pred, 
                                              path=folder2_sub + method + '_')
            QualitativeAnalysis.multiClassification_PR(y_test, best_y_pred, 
                                                path=folder2_sub + method+'_')
            QualitativeAnalysis.multiClassification_ROC(y_test, best_y_pred, 
                                                path=folder2_sub + method+'_')
    
    result_summary_0.extend(result_summary_1)
    
    print(' —— loop ' + str(i+1) + '\n')
    
Table.concat(result_summary_0, axis=0, reset_index=True, fitWidth=True, 
             path=path_result + 'Result_Summary.xlsx')
