# -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 20:49:47 2022

@author: xtp

"""
import numpy as np
import pandas as pd
from smalltools import Table, Tools
from sklearn.model_selection import train_test_split
from tqdm import tqdm

class DataLoad():
    '''
    数据集划分
    - random 随机划分数据集
    - ks 样本数据划分按照空间距离分布均匀
    - spxy 样本数据及标签按照空间距离分布均匀
    
    [1]ks,spxy: https://www.freesion.com/article/55471029290/
    [2]SPXY算法的西瓜可溶性固形物近红外光谱检测
    [3]SPXY样本划分法及蒙特卡罗交叉验证结合近红外光谱用于橘叶中橙皮苷的含量测定
    
    '''
    def __init__(self, df, label=None):
        df = df.dropna(axis=0)
        self.DataFrame = df
        if label == None:
            label = np.array(DataLoad.__strList2numberList(list(df.index)))
        self.Label = label                                                      # Array(n_samples,)
        self.Data = np.array(df.values).astype(np.float64)                        # Array(n_samples, n_features)
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
    
    
    @staticmethod
    def __strList2numberList(str_list):
        '''
        列表中相同的字符串转为相同的数字，数字按原列表顺序从1开始
        
        Arguments:
            str_list: list[str,...,str]
        Returns:
            number_list: list[int,...,int]
        
        '''
        number_list = str_list.copy()
        filter_list = list(set(str_list))
        filter_list.sort(key=str_list.index)
        for i, index1 in enumerate(filter_list):
            for j, index2 in enumerate(number_list):
                if index1 == index2:
                    number_list[j] = float(i+1)
        return number_list
    
    
    @staticmethod
    def random(data, label, test_ratio=0.2, random_state=123):
        '''
        随机划分

        Parameters
        ----------
        data : TYPE
            shape (n_samples, n_features).
        label : TYPE
            shape (n_sample, ).
        test_ratio : TYPE, optional
            the ratio of test_size. The default is 0.2.
        random_state : TYPE, optional
            the randomseed. The default is 123.

        Returns
        -------
        X_train : TYPE
            DESCRIPTION.
        X_test : TYPE
            DESCRIPTION.
        y_train : TYPE
            DESCRIPTION.
        y_test : TYPE
            DESCRIPTION.

        '''
        X_train, X_test, y_train, y_test = train_test_split(
            data, label, test_size=test_ratio, random_state=random_state)
    
        return X_train, X_test, y_train, y_test
    

    @staticmethod
    def ks(data, label, test_size=0.2):
        '''
        kennard-stone算法划分

        Parameters
        ----------
        data : TYPE
            shape (n_samples, n_features).
        label : TYPE
            shape (n_sample, ).
        test_size : TYPE, optional
            the ratio of test_size. The default is 0.3.

        Returns
        -------
        X_train : TYPE
            DESCRIPTION.
        X_test : TYPE
            DESCRIPTION.
        y_train : TYPE
            DESCRIPTION.
        y_test : TYPE
            DESCRIPTION.

        '''
        M = data.shape[0]
        N = round((1 - test_size) * M)
        samples = np.arange(M)
    
        D = np.zeros((M, M))
    
        for i in range((M - 1)):
            xa = data[i, :]
            for j in range((i + 1), M):
                xb = data[j, :]
                D[i, j] = np.linalg.norm(xa - xb)
    
        maxD = np.max(D, axis=0)
        index_row = np.argmax(D, axis=0)
        index_column = np.argmax(maxD)
    
        m = np.zeros(N)
        m[0] = np.array(index_row[index_column])
        m[1] = np.array(index_column)
        m = m.astype(int)
        dminmax = np.zeros(N)
        dminmax[1] = D[m[0], m[1]]
    
        for i in range(2, N):
            pool = np.delete(samples, m[:i])
            dmin = np.zeros((M - i))
            for j in range((M - i)):
                indexa = pool[j]
                d = np.zeros(i)
                for k in range(i):
                    indexb = m[k]
                    if indexa < indexb:
                        d[k] = D[indexa, indexb]
                    else:
                        d[k] = D[indexb, indexa]
                dmin[j] = np.min(d)
            dminmax[i] = np.max(dmin)
            index = np.argmax(dmin)
            m[i] = pool[index]
    
        m_complement = np.delete(np.arange(data.shape[0]), m)
    
        X_train = data[m, :]
        y_train = np.array(label)[m]
        X_test = data[m_complement, :]
        y_test = np.array(label)[m_complement]
    
        return X_train, X_test, y_train, y_test
    
    
    @staticmethod
    def spxy(data, label, test_size=0.2):
        '''
        SPXY算法划分

        Parameters
        ----------
        data : TYPE
            shape (n_samples, n_features).
        label : Array(光谱标签值)
            shape (n_sample, ).
        test_size : TYPE, optional
            the ratio of test_size. The default is 0.2.

        Returns
        -------
        X_train : TYPE
            DESCRIPTION.
        X_test : TYPE
            DESCRIPTION.
        y_train : TYPE
            DESCRIPTION.
        y_test : TYPE
            DESCRIPTION.

        '''
        y_backup = label
        M = data.shape[0]
        N = round((1 - test_size) * M)
        samples = np.arange(M)
    
        label = (label - np.mean(label)) / np.std(label)
        D = np.zeros((M, M))
        Dy = np.zeros((M, M))
    
        for i in range(M - 1):
            xa = data[i, :]
            ya = label[i]
            for j in range((i + 1), M):
                xb = data[j, :]
                yb = label[j]
                D[i, j] = np.linalg.norm(xa - xb)
                Dy[i, j] = np.linalg.norm(ya - yb)
    
        Dmax = np.max(D)
        Dymax = np.max(Dy)
        D = D / Dmax + Dy / Dymax
    
        maxD = D.max(axis=0)
        index_row = D.argmax(axis=0)
        index_column = maxD.argmax()
    
        m = np.zeros(N)
        m[0] = index_row[index_column]
        m[1] = index_column
        m = m.astype(int)
    
        dminmax = np.zeros(N)
        dminmax[1] = D[m[0], m[1]]
    
        for i in range(2, N):
            pool = np.delete(samples, m[:i])
            dmin = np.zeros(M - i)
            for j in range(M - i):
                indexa = pool[j]
                d = np.zeros(i)
                for k in range(i):
                    indexb = m[k]
                    if indexa < indexb:
                        d[k] = D[indexa, indexb]
                    else:
                        d[k] = D[indexb, indexa]
                dmin[j] = np.min(d)
            dminmax[i] = np.max(dmin)
            index = np.argmax(dmin)
            m[i] = pool[index]
    
        m_complement = np.delete(np.arange(data.shape[0]), m)
    
        X_train = data[m, :]
        y_train = y_backup[m]
        X_test = data[m_complement, :]
        y_test = y_backup[m_complement]
    
        return X_train, X_test, y_train, y_test
    
    
    def SetSplit(self, method='', label=None, 
                 test_size=0.3, randomseed=123, path=''):
        '''
        数据集划分方法

        Parameters
        ----------
        method : TYPE
            DESCRIPTION.
        label : TYPE
            DESCRIPTION.
        test_size : TYPE, optional
            DESCRIPTION. The default is 0.2.
        randomseed : TYPE, optional
            DESCRIPTION. The default is 123.

        Returns
        -------
        X_train : TYPE
            (n_samples, n_features).
        X_test : TYPE
            (n_samples, n_features).
        y_train : TYPE
            (n_sample, ).
        y_test : TYPE
            (n_sample, ).

        '''
        
        if method == "random":
            X_train, X_test, y_train, y_test = DataLoad.random(
                                self.Data, self.Label, test_size, randomseed)
        elif method == "ks":
            X_train, X_test, y_train, y_test = DataLoad.ks(
                                            self.Data, self.Label, test_size)
        elif method == "spxy":
            X_train, X_test, y_train, y_test = DataLoad.spxy(
                                            self.Data, self.label, test_size)
        else:
            print("no this  method of split dataset! ")
            
        df_train = Table.data2dataframe(X_train, y_train, self.DataFrame.columns)
        df_test = Table.data2dataframe(X_test, y_test, self.DataFrame.columns)

        if path != '':
            df_train.to_csv(path_or_buf = path + 'df_train.csv', header=False)
            df_test.to_csv(path_or_buf = path + 'df_test.csv', header=False)
        
        return df_train, df_test

    @staticmethod
    def zscore_mad_check(df, colname='', threshold=3.5):
        if colname == '':
            se = df
        else:
            se = df[colname]
        MAD = (se - se.median()).abs().median()
        zscore = ((se - se.median())* 0.6475 /MAD).abs()
        return df[zscore < threshold]
    
    @staticmethod
    def outlierCheck(df, colname='', color=[], threshold=3.5, band_num=5):
        '''
        光谱各波段异常值检测

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        colname : TYPE, optional
            波段名. The default is ''.
        color : TYPE, optional
            各类别的颜色列表. The default is [].
        threshold : TYPE, optional
            同一个波段异常的阈值，一般设置为 2.5  3.0  3.5. The default is 3.5.
        band_num : TYPE, optional
            同一个样本的异常波段数量阈值. The default is 5.
        plot : TYPE, optional
            DESCRIPTION. The default is True.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        df_keep : TYPE
            DESCRIPTION.
        df_drop : TYPE
            DESCRIPTION.

        '''
        if type(df.columns) == pd.core.indexes.multi.MultiIndex:
            df = df.T.reset_index(level=0, drop=True).T
        df_check = DataLoad.zscore_mad_check(df, colname='', threshold=3.5) 
        nan_num = df_check.isnull().sum(axis=1)
        df['nan_num'] = nan_num
        df_keep = df[df['nan_num'] <= band_num].drop('nan_num', axis=1)
        df_drop = df[df['nan_num'] > band_num].drop('nan_num', axis=1)
        from pandas.plotting import parallel_coordinates
        import matplotlib.ticker as ticker
        import matplotlib.pyplot as plt
        ax_keep = df_keep.sort_index().reset_index()
        ax = parallel_coordinates(ax_keep, class_column='Class', color=color, 
                            linewidth=0.5, axvlines=False, sort_labels=True)
        ax.grid(False)
        ax.xaxis.set_major_locator(ticker.MultipleLocator(20))
        try:
            df_drop.T.plot(ax=ax, color='black', linewidth=3.0, legend=False)
        except TypeError:
            pass
        plt.title(" Outlier Chick ", fontweight= "semibold", fontsize='large')
        
        return df_keep, df_drop

    @staticmethod
    def outlier_dataload(df, check_threshold=3.5, check_band_num=5, 
                split_method='ks', path='', path_p='', task='classification'):
        '''
        异常值处理和数据集划分

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        check_threshold : TYPE, optional
            DESCRIPTION. The default is 3.5.
        check_band_num : TYPE, optional
            DESCRIPTION. The default is 5.
        split_method : TYPE, optional
            DESCRIPTION. The default is 'ks'.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        df_train : TYPE
            DESCRIPTION.
        df_test : TYPE
            DESCRIPTION.

        '''
        if path != '':
            path_p = path
        if task == 'classification':
            df_train_frame = []
            df_test_frame = []
            table_processing = Table(df)
            df_frame = table_processing.factorIterate(factor='Class')
            for level, df in enumerate(df_frame[1]):
                colors = [['red'],['green'],['blue']]
                df_check = DataLoad.outlierCheck(df, color=colors[level], 
                            threshold=check_threshold, band_num=check_band_num)[0]  # Set Outlier check parameter.
                dataload = DataLoad(df_check, label=[level+1]*df_check.shape[0])
                loaded = dataload.SetSplit(test_size=0.3, method=split_method)      # Set Dataset Split method.
                df_train_frame.append(loaded[0])
                df_test_frame.append(loaded[1])
            df_train = Table.concat(df_train_frame, axis=0).DataFrame
            df_test = Table.concat(df_test_frame, axis=0).DataFrame
        elif task == 'regression':
            dataload = DataLoad(df, label=list(df.index))
            df_train, df_test = dataload.SetSplit(test_size=0.3, method=split_method)
        if path_p == '':
            plt.show()
        else:
            plt.savefig(path_p + '_check.png', dpi=300)
        plt.close()
        if path != '':
            df_train.to_csv(path_or_buf = path + '_train.csv')
            df_test.to_csv(path_or_buf = path + '_test.csv')
            
        return df_train, df_test

import sys
import pywt
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler

class PreProcessing():
    '''
    光谱数据预处理
    
    1.平滑处理：平滑高频噪音，提高信噪比。窗口内数据点越多，光谱分辨率下降越严重，
    造成的光谱失真也越严重；但是如果数据点太少，平滑去噪的效果不理想。
    2.散射校正：消除由样品颗粒分布不均匀及颗粒大小不同产生的散射对其光谱的影响。
    3.基线校正：消除光谱中基线的平移和漂移(散射)，可有效消除其他背景的干扰,分辨重叠
    峰,提高分辨率和灵敏度。
    4.数据增强：消除多余信息，增加样品之间的差异，从而提高模型的稳健性和预测能力。

    [1]近红外光谱分析中光谱预处理方法的作用及其发展
    [2]近红外分析中光谱预处理及波长选择方法进展与应用
    [3]https://gitee.com/aBugsLife/spectral-pretreatment-method
    
    '''
    def __init__(self, df):
        df = df.dropna(axis=0)
        self.Index = list(df.index)
        self.X_col = np.array(list(df.columns)).astype(np.float64)                # Array(n_features,)
        self.Data = np.array(df.values).astype(np.float64)                        # Array(n_samples, n_features)
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
    
    
    @staticmethod
    def Draw(X_col, Data, name='raw', legend=[], path= ''):
        plt.figure(figsize=(6.5,5))
        ax = plt.plot(X_col, np.transpose(Data))
        plt.xlabel("Wavelength(nm)", fontsize=15)
        plt.ylabel("Reflectance", fontsize=15)
        plt.xticks(size=12)
        plt.yticks(size=12)
        # plt.xlim(375, 1039)
        # plt.ylim(0, 0.5)
        # ax=plt.gca()
        # ax.xaxis.set_major_locator(plt.MaxNLocator(10))
        # ax.yaxis.set_major_locator(plt.MaxNLocator(6))
        plt.title("The spectrum of the " + name + " for dataset", 
                  fontweight= "semibold", fontsize=15)
        if legend != []:
            plt.legend(ax, legend, fontsize=12)
            
        plt.rc('font',family='Times New Roman')

        if path == '':
            plt.show()
        else:
            plt.savefig(path+'spectrum_'+name+'.png', dpi=300)
        plt.close()
    

    def MA(self, wsz=9, path=''):
        '''
        移动平均平滑 (Moving average)：
            平滑处理方法。根据时间序列资料、逐项推移，依次计算包含一定项数的序时平
        均值，以反映长期趋势的方法。

        Parameters
        ----------
        wsz : TYPE, optional
            Smoothing Window Size. The default is 9.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data_ma : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        data_ma = data.copy()
        for i in range(data_ma.shape[0]):
            out0 = np.convolve(data_ma[i], np.ones(wsz, dtype=int),
                               'valid') / wsz                                   # Window size: odd.
            r = np.arange(1, wsz - 1, 2)
            start = np.cumsum(data_ma[i, :wsz - 1])[::2] / r
            stop = (np.cumsum(data_ma[i, :-wsz:-1])[::2] / r)[::-1]
            data_ma[i] = np.concatenate((start, out0, stop))
            
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_ma, legend=self.Index, 
                           name=name, path=path)
        df_ma = Table.data2dataframe(data_ma, self.Index, self.X_col)
            
        return df_ma, data_ma
    
    
    def SG(self, wsz=9, k=3, path=''):
        '''
        平滑滤波 (Savitzky-Golay)：
            平滑处理方法。对一定长度窗口内的数据点进行 k 阶多项式拟合，获得拟合后
        的结果。
        
        Parameters
        ----------
        wsz : int, optional
            Smoothing Window Size. The default is 9.
        k : int, optional
            k-th order polynomial fitting. The default is 3.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data : TYPE
            DESCRIPTION.
        '''
        data = self.Data
        data_sg = signal.savgol_filter(data, wsz, k)                            # Window size: odd.
        
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_sg, legend=self.Index, 
                           name=name, path=path)
        df_sg = Table.data2dataframe(data_sg, self.Index, self.X_col)
            
        return df_sg, data_sg
    
    
    def MSC(self, path=''):
        '''
        多元散射校正 (Multiplicative Scatter Correction)：
            散射校正方法。将每个样本的光谱与平均光谱进行一元线性回归，求得每个样本
        的基线平移量和偏移量，减去求得的基线平移量后除以偏移量，得到校正后的光谱。

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data_msc : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        n, p = data.shape
        data_msc = np.ones((n, p))
    
        for j in range(n):
            mean = np.mean(data, axis=0)                                        # Calculate the average spectrum.

        for i in range(n):                                                      # Linear fitting.
            y = data[i, :]
            l = LinearRegression()
            l.fit(mean.reshape(-1, 1), y.reshape(-1, 1))
            k = l.coef_
            b = l.intercept_
            data_msc[i, :] = (y - b) / k
            
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_msc, legend=self.Index, 
                           name=name, path=path)
        df_msc = Table.data2dataframe(data_msc, self.Index, self.X_col)
            
        return df_msc, data_msc
    
    
    def SNV(self, path=''):
        '''
        标准正态变换 (Standard Normal Variate)：
            散射校正方法。将每个光谱减去其平均值后再除以光谱的标准差，处理后的光谱
        数据满足正态归一化特性。

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data_snv : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        m = data.shape[0]
        n = data.shape[1]
        data_std = np.std(data, axis=1)                                         # Standard deviation of each spectrum.
        data_average = np.mean(data, axis=1)                                    # Average value for each spectrum.
        data_snv = [[((data[i][j] - data_average[i]) / data_std[i]) 
                     for j in range(n)] for i in range(m)]
        data_snv = np.array(data_snv).astype(np.float64)
        
        name = sys._getframe().f_code.co_name 
        PreProcessing.Draw(self.X_col, data_snv, legend=self.Index, 
                           name=name, path=path)
        df_snv = Table.data2dataframe(data_snv, self.Index, self.X_col)
            
        return  df_snv, data_snv
    
    
    def D1(self, path=''):
        '''
        一阶导数光谱 (Derivative1)：
            基线校正方法。以反射率的导数为纵坐标，波长为横坐标所记录的光谱图。

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        None.

        '''
        data = self.Data
        n, p = data.shape
        Di = np.ones((n, p - 1))
        for i in range(n):
            Di[i] = np.diff(data[i])
                        
        name = sys._getframe().f_code.co_name 
        X_col_new = np.delete(self.X_col, -1)
        PreProcessing.Draw(X_col_new, Di, legend=self.Index, 
                           name=name, path=path)
        df_d1 = Table.data2dataframe(Di, self.Index, X_col_new)
            
        return df_d1, Di


    def D2(self, path=''):
        '''
        二阶导数光谱 (Derivative2):
            旋转校正方法。以反射率的二阶导数为纵坐标，波长为横坐标所记录的光谱图。

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        Di : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        n, p = data.shape
        Di = np.ones((n, p - 2))
        for i in range(n):
            Di[i] = np.diff(np.diff(data[i]))
        
        name = sys._getframe().f_code.co_name 
        X_col_new = np.delete(np.delete(self.X_col, -1), -1)
        PreProcessing.Draw(X_col_new, Di, legend=self.Index, 
                           name=name, path=path)
        df_d2 = Table.data2dataframe(Di, self.Index, X_col_new)
            
        return  df_d2, Di
    
    
    def DT(self, path=''):
        '''
        趋势校正 (Detrend)：
            基线校正方法。将原始光谱的吸光度和波长拟合出一条趋势线，然后从原光谱中
        减掉趋势线，从而消除漫反射光谱的基线漂移。
        
        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data_dt : TYPE
                DESCRIPTION.

        '''

        data = self.Data
        data_dt = data.copy()
        x = self.X_col
        l = LinearRegression()
        for i in range(data_dt.shape[0]):
            l.fit(x.reshape(-1, 1), data_dt[i].reshape(-1, 1))
            k = l.coef_
            b = l.intercept_
            for j in range(data_dt.shape[1]):
                data_dt[i][j] = data_dt[i][j] - (j * k + b)
                
        name = sys._getframe().f_code.co_name 
        PreProcessing.Draw(self.X_col, data_dt, legend=self.Index, 
                           name=name, path=path)
        df_dt = Table.data2dataframe(data_dt, self.Index, self.X_col)
            
        return df_dt, data_dt
    
    
    def WT(self, path=''):
        '''
        小波变换 (Wavelet Transform)：
        基线校正 & 噪声去除方法。

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        def wave_(data):
            w = pywt.Wavelet('db8')                                             # Daubechies8
            maxlev = pywt.dwt_max_level(len(data), w.dec_len)
            coeffs = pywt.wavedec(data, 'db8', level=maxlev)
            threshold = 0.04
            for i in range(1, len(coeffs)):
                coeffs[i] = pywt.threshold(coeffs[i], 
                                           threshold * max(coeffs[i]))
            datarec = pywt.waverec(coeffs, 'db8')
            return datarec
        
        data = self.Data
        data_wt = None
        for i in range(data.shape[0]):
            if i == 0:
                data_wt = wave_(data[i])
            else:
                data_wt = np.vstack((data_wt, wave_(data[i])))
        
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_wt, legend=self.Index, 
                           name=name, path=path)
        df_wt = Table.data2dataframe(data_wt, self.Index, self.X_col)
            
        return  df_wt, data_wt
    

    def MMN(self, path=''):
        '''
        最大最小值归一化 (Min-Max Normalization)：
            数据增强方法。去量纲，用线性化的方法将原始数据转换到[0，1]的范围，但不
        改变数据分布。       x*=(x-x_min)/(x_max-x_min)

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        data = self.Data
        data_mmn = MinMaxScaler().fit_transform(data)
        
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_mmn, legend=self.Index, 
                           name=name, path=path)
        df_mmn = Table.data2dataframe(data_mmn, self.Index, self.X_col)
            
        return df_mmn, data_mmn
    
    
    def VN(self, path=''):
        '''
        矢量归一化 (Vector Normalization)

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        df_vn : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        x_mean = np.mean(data, axis=1)
        x_means = np.tile(x_mean, data.shape[1]).reshape(
                                    (data.shape[0], data.shape[1]), order='F')
        x_2 = np.power(data,2)
        x_sum = np.sum(x_2,axis=1)
        x_sqrt = np.sqrt(x_sum)
        x_low = np.tile(x_sqrt, data.shape[1]).reshape(
                                    (data.shape[0],data.shape[1]), order='F')
        data_vn = (data - x_means) / x_low
        
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_vn, legend=self.Index, 
                           name=name, path=path)
        df_vn = Table.data2dataframe(data_vn, self.Index, self.X_col)
            
        return df_vn, data_vn
    
    
    def SS(self, path=''):
        '''
        标准化缩放 (Standard Scaling): 
            数据增强方法。对数据进行量纲化处理，将数据变换为均值为0，标准差为1的正
        态分布。        x*=(x-μ)/σ

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        data = self.Data
        data_ss = StandardScaler().fit_transform(data)
        
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_ss, legend=self.Index, 
                           name=name, path=path)
        df_ss = Table.data2dataframe(data_ss, self.Index, self.X_col)
            
        return df_ss, data_ss
    

    def MC(self, path=''):
        '''
        均值中心化 (Mean Centering)：
            数据增强方法。变量减去其均值，将所有数据中心平移至零点，增加样品光谱之
        间的差异，提高模型的稳健性和预测能力。         x*=x-μ

        Parameters
        ----------
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        data_mc : TYPE
            DESCRIPTION.

        '''
        data = self.Data
        data_mc = data.copy()
        for i in range(data_mc.shape[0]):
            MEAN = np.mean(data_mc[i])
            data_mc[i] = data_mc[i] - MEAN
            
        name = sys._getframe().f_code.co_name   
        PreProcessing.Draw(self.X_col, data_mc, legend=self.Index, 
                           name=name, path=path)
        df_mc = Table.data2dataframe(data_mc, self.Index, self.X_col)
            
        return  df_mc, data_mc


    def PreProcess(self, methods=[], return2data=True, path=''):
        '''
        光谱预处理方法

        Parameters
        ----------
        methods : list
            DESCRIPTION. The default is [].
        return2data : TYPE, optional
            单次预处理是否返回至原数据. The default is True.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        PreProcessing.Draw(self.X_col, self.Data, path=path)
        if len(methods) != 1:
            suf = "_".join(methods)
        else:
            suf = methods[0]
        for method in methods:
            if method == "None":
                df = self.DataFrame
            elif method == 'MA':
                df, data = self.MA(wsz=9, path=path)
            elif method == 'SG':
                df, data = self.SG(wsz=9, k=3, path=path)
            elif method == 'MSC':
                df, data = self.MSC(path=path)
            elif method == 'SNV':
                df, data = self.SNV(path=path)
            elif method == 'D1':
                df, data = self.D1(path=path)
            elif method == 'D2':
                df, data = self.D2(path=path)
            elif method == 'DT':
                df, data = self.DT(path=path)
            elif method == 'WT':
                df, data = self.WT(path=path)
            elif method == 'MMN':
                df, data = self.MMN(path=path)
            elif method == 'VN':
                df, data = self.VN(path=path)
            elif method == 'SS':
                df, data = self.SS(path=path)
            elif method == 'MC':
                df, data = self.MC(path=path)
            else:
                print("no this method of preprocessing!")
                
            if return2data == True:
                self.Data = data
                if method == 'D1':
                    self.X_col = np.delete(self.X_col, -1)
                if method == 'D2':
                    self.X_col = np.delete(np.delete(self.X_col, -1), -1)
                    
        if path != '':
            df.to_csv(path + 'Preprocessed_' + suf + '.csv') 

        return df
        

from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
from sklearn.decomposition import PCA
import copy
from scipy.linalg import qr
import scipy.stats
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from numpy.linalg import matrix_rank as rank

class FeatureSelection():
    '''
    光谱特征选择/特征提取
    
    [1]CARS: https://gitee.com/aBugsLife/CARS
    [2]MC: https://www.jianshu.com/p/3d30070932a8
    [3]SPA: https://gitee.com/aBugsLife/SPA
    [4]LARS: https://blog.csdn.net/guofei_fly/article/details/103845342
    
    '''
    def __init__(self, df, label=None):
        df = df.dropna(axis=0)
        self.DataFrame = df
        self.Columns = np.array(list(df.columns)).astype(np.float64)     
        if label == None:
            label = np.array(FeatureSelection.__strList2numberList(
                                                             list(df.index)))
        self.Label = label                                                      # Array(n_samples,)
        self.Data = np.array(df.values).astype(np.float64)                        # Array(n_samples, n_features)
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
    
    @staticmethod
    def __strList2numberList(str_list):
        '''
        列表中相同的字符串转为相同的数字，数字按原列表顺序从1开始
        
        Arguments:
            str_list: list[str,...,str]
        Returns:
            number_list: list[int,...,int]
        
        '''
        number_list = str_list.copy()
        filter_list = list(set(str_list))
        filter_list.sort(key=str_list.index)
        for i, index1 in enumerate(filter_list):
            for j, index2 in enumerate(number_list):
                if index1 == index2:
                    number_list[j] = float(i+1)
        return number_list
    
    def ReliefF(self, max_iter=10, tao=0, neighbors=10, path=''):
        X = self.Data
        y = self.Label
        features = self.Columns
        F = ReliefF(max_iter=max_iter, tao=tao, neighbors=neighbors)
        selected_df = F.fit_transform(X, y, features)
        F.weight_graph(features=features, path=path)
        return selected_df
    
    
    def CARS_Cloud(self, N=50, f=20, cv=10):
        '''
        竞争性自适应重加权采样法（competitive adapative reweighted sampling）

        Parameters
        ----------
        N : TYPE, optional
            蒙特卡洛(MonteCarlo Method)采样次数. The default is 50.
        f : TYPE, optional
            PLS最大主成分数. The default is 20.
        cv : TYPE, optional
            k-fold CV. The default is 10.

        Returns
        -------
        OptWave : TYPE
            DESCRIPTION.

        '''
        X = self.Data
        y = self.Label
        p = 0.8
        m, n = X.shape
        u = np.power((n/2), (1/(N-1)))
        k = (1/(N-1)) * np.log(n/2)
        cal_num = np.round(m * p)
        # val_num = m - cal_num
        b2 = np.arange(n)
        x = copy.deepcopy(X)
        D = np.vstack((np.array(b2).reshape(1, -1), X))
        WaveData = []
        # Coeff = []
        WaveNum =[]
        RMSECV = []
        r = []
        for i in range(1, N+1):
            r.append(u*np.exp(-1*k*i))
            wave_num = int(np.round(r[i-1]*n))
            WaveNum = np.hstack((WaveNum, wave_num))
            cal_index = np.random.choice    \
                (np.arange(m), size=int(cal_num), replace=False)
            wave_index = b2[:wave_num].reshape(1, -1)[0]
            xcal = x[np.ix_(list(cal_index), list(wave_index))]
            #xcal = xcal[:,wave_index].reshape(-1,wave_num)
            ycal = y[cal_index]
            x = x[:, wave_index]
            D = D[:, wave_index]
            d = D[0, :].reshape(1,-1)
            wnum = n - wave_num
            if wnum > 0:
                d = np.hstack((d, np.full((1, wnum), -1)))
            if len(WaveData) == 0:
                WaveData = d
            else:
                WaveData  = np.vstack((WaveData, d.reshape(1, -1)))
    
            if wave_num < f:
                f = wave_num
    
            pls = PLSRegression(n_components=f)
            pls.fit(xcal, ycal)
            beta = pls.coef_
            b = np.abs(beta)
            b2 = np.argsort(-b, axis=0)
            coef = copy.deepcopy(beta)
            coeff = coef[b2, :].reshape(len(b2), -1)
            rmsecv, rindex = FeatureSelection.__PC_Cross_Validation(
                                                            xcal, ycal, f, cv)
            RMSECV.append(FeatureSelection.__Cross_Validation(
                                                    xcal, ycal, rindex+1, cv))
    
        WAVE = []
        for i in range(WaveData.shape[0]):
            wd = WaveData[i, :]
            # cd = CoeffData[i, :]
            WD = np.ones((len(wd)))
            # CO = np.ones((len(wd)))
            for j in range(len(wd)):
                ind = np.where(wd == j)
                if len(ind[0]) == 0:
                    WD[j] = 0
                    # CO[j] = 0
                else:
                    WD[j] = wd[ind[0]]
                    # CO[j] = cd[ind[0]]
            if len(WAVE) == 0:
                WAVE = copy.deepcopy(WD)
            else:
                WAVE = np.vstack((WAVE, WD.reshape(1, -1)))
        
        MinIndex = np.argmin(RMSECV)
        Optimal = WAVE[MinIndex, :]
        # coulmns = np.multiply(np.int64(Optimal>0), self.X_col)
        # coulmn = coulmns[coulmns != 0]
        boindex = np.where(Optimal != 0)
        OptWave = boindex[0]
        
        fig = plt.figure()
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
        fonts = 16
        plt.subplot(211)
        plt.xlabel('蒙特卡洛迭代次数', fontsize=fonts)
        plt.ylabel('被选择的波长数量', fontsize=fonts)
        plt.title('最佳迭代次数：' + str(MinIndex) + '次', fontsize=fonts)
        plt.plot(np.arange(N), WaveNum)
        
        plt.subplot(212)
        plt.xlabel('蒙特卡洛迭代次数', fontsize=fonts)
        plt.ylabel('RMSECV', fontsize=fonts)
        plt.plot(np.arange(N), RMSECV)
        plt.show()
        
        return OptWave
    
    
    @staticmethod
    def __PC_Cross_Validation(X, y, pc, cv):
        '''
            x :光谱矩阵 nxm
            y :浓度阵 （化学值）
            pc:最大主成分数
            cv:交叉验证数量
        return :
            RMSECV:各主成分数对应的RMSECV
            PRESS :各主成分数对应的PRESS
            rindex:最佳主成分数
        '''
        kf = KFold(n_splits=cv)
        RMSECV = []
        for i in range(pc):
            RMSE = []
            for train_index, test_index in kf.split(X):
                x_train, x_test = X[train_index], X[test_index]
                y_train, y_test = y[train_index], y[test_index]
                pls = PLSRegression(n_components=i + 1)
                pls.fit(x_train, y_train)
                y_predict = pls.predict(x_test)
                RMSE.append(np.sqrt(mean_squared_error(y_test, y_predict)))
            RMSE_mean = np.mean(RMSE)
            RMSECV.append(RMSE_mean)
        rindex = np.argmin(RMSECV)
        return RMSECV, rindex
    
    
    @staticmethod
    def __Cross_Validation(X, y, pc, cv):
        '''
         x :光谱矩阵 nxm
         y :浓度阵 （化学值）
         pc:最大主成分数
         cv:交叉验证数量
         return :
                RMSECV:各主成分数对应的RMSECV
        '''
        kf = KFold(n_splits=cv)
        RMSE = []
        for train_index, test_index in kf.split(X):
            x_train, x_test = X[train_index], X[test_index]
            y_train, y_test = y[train_index], y[test_index]
            pls = PLSRegression(n_components=pc)
            pls.fit(x_train, y_train)
            y_predict = pls.predict(x_test)
            RMSE.append(np.sqrt(mean_squared_error(y_test, y_predict)))
        RMSE_mean = np.mean(RMSE)
        return RMSE_mean


    def SPA(self, m_min=1, m_max=None, Xval=None, yval=None, autoscaling=1):
        '''
        连续投影算法（Successive Projections Algorithm)

        Parameters
        ----------
        m_min : TYPE, optional
            设定最小波段数. The default is 1.
        m_max : TYPE, optional
             1. 当使用单独的验证集进行验证时， m_max = min(N-1, K)
             2. 当使用交叉验证时，m_max = min(N-2, K)
        Xval : TYPE, optional
            使用单独的验证集进行验证. The default is None.
        yval : TYPE, optional
            使用单独的验证集进行验证. The default is None.
        autoscaling : TYPE, optional
            是否使用自动刻度 yes = 1, no = 0. The default is 1.

        Returns
        -------
        var_sel : TYPE
            DESCRIPTION.

        '''
        Xcal = self.Data
        ycal = self.Label
        
        assert (autoscaling == 0 or autoscaling == 1), "请选择是否使用自动计算"

        N, K = Xcal.shape

        if m_max is None:
            if Xval is None:
                m_max = min(N - 1, K)
            else:
                m_max = min(N - 2, K)

            assert (m_max < min(N - 1, K)), "m_max 参数异常"

        # 第一步： 对测试集进行投影操作

        normalization_factor = None
        if autoscaling == 1:
            normalization_factor = np.std(
                Xcal, ddof=1, axis=0).reshape(1, -1)[0]
        else:
            normalization_factor = np.ones((1, K))[0]

        Xcaln = np.empty((N, K))
        for k in range(K):
            x = Xcal[:, k]
            Xcaln[:, k] = (x - np.mean(x)) / normalization_factor[k]

        SEL = np.zeros((m_max, K))

        # 进度条
        # with Bar('Projections :', max=K) as bar:
        for k in range(K):
            SEL[:, k] = FeatureSelection.__projections_qr(Xcaln, k, m_max)
        #        bar.next()

        # 第二步： 进行评估

        PRESS = float('inf') * np.ones((m_max + 1, K))

        # with Bar('Evaluation of variable subsets :', max=(K) * (m_max - m_min + 1)) as bar:
        for k in range(K):
            for m in range(m_min, m_max + 1):
                var_sel = SEL[:m, k].astype(np.int)
                _, e = FeatureSelection.__validation(Xcal, ycal, var_sel, Xval, yval)
                PRESS[m, k] = np.conj(e).T.dot(e)

        #            bar.next()

        PRESSmin = np.min(PRESS, axis=0)
        m_sel = np.argmin(PRESS, axis=0)
        k_sel = np.argmin(PRESSmin)

        # 第 k_sel波段为初始波段时最佳，波段数目为 m_sel（k_sel）
        var_sel_phase2 = SEL[:m_sel[k_sel], k_sel].astype(np.int)

        # 最后消去变量

        # 第 3.1 步 计算相关指数
        Xcal2 = np.hstack([np.ones((N, 1)), Xcal[:, var_sel_phase2]])
        b = np.linalg.lstsq(Xcal2, ycal, rcond=None)[0]
        std_deviation = np.std(Xcal2, ddof=1, axis=0)

        relev = np.abs(b * std_deviation.T)
        relev = relev[1:]

        index_increasing_relev = np.argsort(relev, axis=0)
        index_decreasing_relev = index_increasing_relev[::-1].reshape(1, -1)[0]

        PRESS_scree = np.empty(len(var_sel_phase2))
        yhat = e = None
        for i in range(len(var_sel_phase2)):
            var_sel = var_sel_phase2[index_decreasing_relev[:i + 1]]
            _, e = FeatureSelection.__validation(Xcal, ycal, var_sel, Xval, yval)

            PRESS_scree[i] = np.conj(e).T.dot(e)

        RMSEP_scree = np.sqrt(PRESS_scree / len(e))

        # 第 3.3： F-test 验证
        PRESS_scree_min = np.min(PRESS_scree)
        alpha = 0.25
        dof = len(e)
        fcrit = scipy.stats.f.ppf(1 - alpha, dof, dof)
        PRESS_crit = PRESS_scree_min * fcrit

        # 找到不明显比 PRESS_scree_min 大的最小变量

        i_crit = np.min(np.nonzero(PRESS_scree < PRESS_crit))
        i_crit = max(m_min, i_crit)

        var_sel = var_sel_phase2[index_decreasing_relev[:i_crit]]

        # print("var_sel")
        # print(var_sel)
        
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
        fig1 = plt.figure()
        plt.xlabel('Number of variables included in the model')
        plt.ylabel('RMSE')
        plt.title('Final number of selected variables:{}(RMSE={})'.format(
                                            len(var_sel), RMSEP_scree[i_crit]))
        plt.plot(RMSEP_scree)
        plt.scatter(i_crit, RMSEP_scree[i_crit], marker='s', color='r')
        plt.grid(True)

        fig2 = plt.figure()
        plt.plot(Xcal[0, :])
        plt.scatter(var_sel, Xcal[0, var_sel], marker='s', color='r')
        plt.legend(['First calibration object', 'Selected variables'])
        plt.xlabel('Variable index')
        plt.grid(True)
        plt.show()
        
        return var_sel


    @staticmethod
    def __projections_qr(X, k, M):
        '''
        X : 预测变量矩阵
        K ：投影操作的初始列的索引
        M : 结果包含的变量个数
        return ：由投影操作生成的变量集的索引
        '''

        X_projected = X.copy()

        # 计算列向量的平方和
        norms = np.sum((X ** 2), axis=0)
        # 找到norms中数值最大列的平方和
        norm_max = np.amax(norms)

        # 缩放第K列 使其成为“最大的”列
        X_projected[:, k] = X_projected[:, k] * 2 * norm_max / norms[k]

        # 矩阵分割 ，order 为列交换索引
        _, __, order = qr(X_projected, 0, pivoting=True)

        return order[:M].T
    
    
    @staticmethod
    def __validation(Xcal, ycal, var_sel, Xval=None, yval=None):
        '''
        [yhat,e] = validation(Xcal,var_sel,ycal,Xval,yval)
                   -->  使用单独的验证集进行验证
        [yhat,e] = validation(Xcal,ycalvar_sel)
                   --> 交叉验证
        '''
        N = Xcal.shape[0]  # N 测试集的个数
        if Xval is None:  # 判断是否使用验证集
            NV = 0
        else:
            NV = Xval.shape[0]  # NV 验证集的个数

        yhat = e = None

        # 使用单独的验证集进行验证
        if NV > 0:
            Xcal_ones = np.hstack(
                [np.ones((N, 1)), Xcal[:, var_sel].reshape(N, -1)])

            # 对偏移量进行多元线性回归
            b = np.linalg.lstsq(Xcal_ones, ycal, rcond=None)[0]
            # 对验证集进行预测
            np_ones = np.ones((NV, 1))
            Xval_ = Xval[:, var_sel]
            X = np.hstack([np.ones((NV, 1)), Xval[:, var_sel]])
            yhat = X.dot(b)
            # 计算误差
            e = yval - yhat
        else:
            # 为yhat 设置适当大小
            yhat = np.zeros((N, 1))
            for i in range(N):
                # 从测试集中 去除掉第 i 项
                cal = np.hstack([np.arange(i), np.arange(i + 1, N)])
                X = Xcal[cal,:]
                X = X[:, var_sel.astype(np.int)]
                y = ycal[cal]
                xtest = Xcal[i, var_sel]
                # ytest = ycal[i]
                X_ones = np.hstack([np.ones((N - 1, 1)), X.reshape(N - 1, -1)])
                # 对偏移量进行多元线性回归
                b = np.linalg.lstsq(X_ones, y, rcond=None)[0]
                # 对验证集进行预测
                yhat[i] = np.hstack([np.ones(1), xtest]).dot(b)
            # 计算误差
            e = ycal - yhat

        return yhat, e
    
 
    def UVE(self, nrep=10, ncomp=10, testSize=0.2, cv=5):
        '''
        无信息变量消除法（Uninformation Variable Elimination）

        Parameters
        ----------
        nrep : TYPE, optional
            随机排列交叉验证次数. The default is 10.
        ncomp : TYPE, optional
            结果包含的成分个数. The default is 10.                
        '' 
            The number of latent components should not be larger than
        any dimension size of independent matrix.
        
        ''
        testSize : TYPE, optional
            训练集比例. The default is 0.2.
        cv : TYPE, optional
            交叉验证折数. The default is 5.

        Returns
        -------
        selFeature : TYPE
            DESCRIPTION.

        '''
        X = self.Data
        y = self.Label
        featureR2, featureIndex = FeatureSelection.__evalCriteria(
                        X, y, nrep=nrep, ncomp=ncomp, testSize=testSize, cv=cv)
        cuti = np.argmax(featureR2)
        selFeature = featureIndex[:cuti+1]
        return selFeature
    
    
    def __calcCriteria(X, y, nrep=10, ncomp=10, testSize=0.2):
        PLSCoef = np.zeros((nrep, X.shape[1]))
        ss = ShuffleSplit(n_splits=nrep, test_size=testSize)
        step = 0
        for train, test in ss.split(X, y):
            xtrain = X[train, :]
            ytrain = y[train]
            plsModel = PLSRegression(min([ncomp, rank(xtrain)]))
            plsModel.fit(xtrain, ytrain)
            PLSCoef[step, :] = plsModel.coef_.T
            step += 1
        meanCoef = np.mean(PLSCoef, axis=0)
        stdCoef = np.std(PLSCoef, axis=0)
        criteria = meanCoef / stdCoef
        featureIndex = np.argsort(-np.abs(criteria))

        return featureIndex
    
    def __evalCriteria(X, y, nrep=10, ncomp=10, testSize=0.2, cv=5):
        featureR2 = np.full(X.shape[1], np.nan)
        for i in range(X.shape[1]):
            featureIndex = FeatureSelection.__calcCriteria(
                              X, y, nrep=nrep, ncomp=ncomp, testSize=testSize)
            xi = X[:, featureIndex[:i + 1]]
            if i<ncomp:
                regModel = LinearRegression()
            else:
                regModel = PLSRegression(min([ncomp, rank(xi)]))
            cvScore = cross_val_score(regModel, xi, y, cv=cv)
            featureR2[i] = np.mean(cvScore)
            
        return featureR2, featureIndex
    
    
    def LAR(self, nums=40):
        '''
        最小角回归算法（Least Angle Regression）

        Parameters
        ----------
        X : TYPE
            DESCRIPTION.
        y : TYPE
            DESCRIPTION.
        nums : TYPE, optional
            选择的特征点的数目. The default is 40.

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        X = self.Data
        y = self.Label
        Lars = linear_model.Lars()
        Lars.fit(X, y)
        corflist = np.abs(Lars.coef_)
    
        corf = np.asarray(corflist)
        SpectrumList = corf.argsort()[-1:-(nums+1):-1]
        SpectrumList = np.sort(SpectrumList)
    
        return SpectrumList


    def MRMR(self, feature_num=None, path=''):
        X = self.Data
        y = self.Label
        features = self.Columns
        if feature_num == None:
            feature_num = X.shape[1]
        M = MRMR(feature_num=feature_num)
        selected_df = M.fit_transform(X, y, features)
        M.weight_graph(features=features, path=path)
        return selected_df


    def PCA(self, nums=None):
        """
           :param X: raw spectrum data, shape (n_samples, n_features)
           :param nums: Number of principal components retained
           :return: X_reduction：Spectral data after dimensionality reduction
           https://blog.csdn.net/TSzero/article/details/116601796
        """
        X = self.Data
        if nums == None:
            nums = min(X.shape[0], X.shape[1])
        pca = PCA(n_components=nums)  # 保留的特征数码
        pca.fit(X)
        X_reduction = pca.fit_transform(X)
        # print(pca.explained_variance_ratio_)
        # print(pca.singular_values_)
    
        return pca, X_reduction


    def Select(self, method='', path='', path_p='', return2data = True):
        '''
        波长筛选/降维的方法

        Parameters
        ----------
        method : TYPE, optional
            DESCRIPTION. The default is ''.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        df_selected : TYPE
            DESCRIPTION.

        '''
        X = self.Data
        y = self.Label
        
        if path != '':
            path_p = path
            
        if method == "None":
            df_selected = self.DataFrame
        elif method== 'CARS':
            Featuresecletidx = self.CARS_Cloud(N=50, f=20, cv=10)
            df_selected = self.DataFrame.iloc[:, Featuresecletidx]
        elif method == 'SPA':
            Xcal, Xval, ycal, yval = train_test_split(X, y, test_size=0.2)
            Featuresecletidx = self.SPA(m_min=8, m_max=50, 
                                        Xval=Xval, yval=yval, autoscaling=1)
            df_selected = self.DataFrame.iloc[:, Featuresecletidx]
        elif method == 'UVE':
            Featuresecletidx = self.UVE(nrep=10, ncomp=10, testSize=0.2, cv=5)
            df_selected = self.DataFrame.iloc[:, Featuresecletidx]
        elif method == 'LARS':
            Featuresecletidx = self.LAR(nums=40)
            df_selected = self.DataFrame.iloc[:, Featuresecletidx]
        elif method == 'MRMR':
            df_selected = self.MRMR(feature_num=None, path=path_p)
        elif method == 'PCA':
            pca, X_reduction = self.PCA(nums=30)
            df_selected = pd.DataFrame(X_reduction, index=y)
            df_selected.columns = df_selected.columns + 1
            df_selected.index.name = 'Class'
        elif method == 'ReliefF':
            df_selected = self.ReliefF(max_iter=10, tao=-100, neighbors=5, path=path_p)
            
        if return2data == True:
            self.Data = np.array(df_selected.values).astype(np.float64)
            self.Columns = np.array(list(df_selected.columns)).astype(np.float64)
            
        if path != '':
            df_selected.to_csv(path_or_buf = path+'Selected_'+ method +'.csv')
        
        if method == 'PCA':
            return pca, df_selected 
        else:
            return df_selected 


class ReliefF():
    def __init__(self, max_iter, tao, neighbors):
        """
        This is a simple implementation of relifF algorithm which used for feature
        selections, the relifF is simple to understand and can process multi-classifications.

        Pay attention: relifF use random sample selection of same class rather than
        using nearest neighbor sample to calculate nearest hit and miss, and it cannot handle
        null data, it will be improved later.

        Read more in :ref:`https://blog.csdn.net/littlely_ll/article/details/71614826`.

        :param max_iter: max iterations of relifF

        :param tao: the threshold of feature weight

        :param neighbors: the neighbors of each class to calculate weight
        """
        self.max_iter = max_iter
        self.tao = tao
        self.neighbors = neighbors
        self._weight = None
        self._important_weight = dict()

    def fit(self, X, y):
        """
        fit an array data

        :param X: a numpy array

        :param y: the label, a list or one dimension array

        :return:
        """
        
        m, n = X.shape

        self._weight = np.zeros(n)

        label_count = dict()
        label_index = dict()
        for label in np.unique(y):
            label_index[label] = np.where(y == label)[0]
            label_count[label] = len(np.where(y == label)[0])

        # label probability
        label_probability = dict((label, count/m) for label, count in label_count.items())

        col_type = []
        for i in range(n):
            if isinstance(X[:,i][0], str):
                col_type.append((1,))
            else:
                col_min = X[:, i].min()
                col_max = X[:, i].max()
                difference = col_max - col_min
                col_type.append((0, difference))

        for sample_seed in range(m):       # 将随机抽样修改为所有样本的遍历 并*max_iter次取平均
            # sample_seed = random.randint(0, m - 1)
            # sample_y = y[sample_seed]
            # sample_x = X[sample_seed]
            sample_y = y[sample_seed]
            sample_x = X[sample_seed]

            for j in range(n):
                near_hit_sum = 0
                near_miss_sum = 0
                for label in label_index.keys():
                    if label == sample_y:
                        near_hit_neighbors = np.random.choice(label_index[label], self.neighbors, replace=False)
                        for i in near_hit_neighbors:
                            sample_i = X[i]
                            if col_type[j][0] == 1:
                                if sample_x[j] != sample_i[j]:
                                    near_hit_sum += 1
                            else:
                                near_hit_sum += np.abs(sample_x[j] - sample_i[j]) / col_type[j][1]
                    else:
                        pre_near_miss_sum = 0
                        near_miss_neighbors = np.random.choice(label_index[label], self.neighbors, replace=False)
                        for i in near_miss_neighbors:
                            sample_i = X[i]
                            if col_type[j][0] == 1:
                                if sample_x[j] != sample_i[j]:
                                    pre_near_miss_sum += 1
                            else:
                                pre_near_miss_sum += np.abs(sample_x[j] - sample_i[j]) / col_type[j][1] + 0.001
                        near_miss_sum += pre_near_miss_sum * label_probability[label] / (
                                    1 - label_probability[sample_y])

                self._weight[j] += (near_miss_sum - near_hit_sum) / (self.neighbors * m)
            
        for i, w in enumerate(self._weight):
            if w >= self.tao:
                self._important_weight[i] = w

        return self

    def transform(self, X, y, features):
        """
        transform the array data

        :param X:  array of data

        :return: the selected features
        """
        important_col = list(self._important_weight.keys())
        transform = X[:, important_col]
        
        weight_dict = self.important_features
        items = sorted(weight_dict.items()) 
        key = [k for k,v in items]
        weight = [v for k,v in items]
        feature_selected = features[key]
        transform_df = pd.DataFrame(transform, index=y, 
                                    columns=[weight,feature_selected])
        transform_df.columns.names = ['weight', 'feature']
        transform_df.index.name = 'Class'
        return transform_df

    def fit_transform(self, X, y, features):
        m, n = X.shape
        weight = np.zeros(n)
        for _ in range(self.max_iter):
            self.fit(X, y)
            weight += self._weight / self.max_iter
        self._weight = weight
        for i, w in enumerate(self._weight):
            if w >= self.tao:
                self._important_weight[i] = w
        return self.transform(X, y, features)

    @property
    def important_features(self):
        return self._important_weight

    @property
    def weight(self):
        return self._weight
                
    def weight_graph(self, features=[], path=''):
        weight_dict = self.important_features
        items = sorted(weight_dict.items()) 
        key = [k for k,v in items]
        weight = [v for k,v in items]
        feature_selected = features[key]
        plt.figure(figsize=(6,5))
        ax = plt.plot(feature_selected, weight)
        plt.xlabel("Feature")
        plt.ylabel("Weight")
        plt.title(" Feature weights based on ReliefF ", 
                  fontweight= "semibold", fontsize='large')
        if path != '':
            plt.savefig(path+'ReliefF.png', dpi=300)
        else:
            plt.show()
        plt.close()

class MRMR():
    def __init__(self, feature_num):
        """
        mRMR is a feature selection which maximises the feature-label correlation and minimises
        the feature-feature correlation. this implementation can only applied for numeric values,
        read more about mRMR, please refer :ref:`https://blog.csdn.net/littlely_ll/article/details/71749776`.

        :param feature_num: selected number of features
        """
        self.feature_num = feature_num
        self._selected_features = []
        self._selected_theta = []

    def fit(self, X, y):
        """
        fit an array data

        :param X: a numpy array

        :param y: the label, a list or one dimension array

        :return:
        """

        if self.feature_num > X.shape[1]:
            self.feature_num = X.shape[1]
            import warnings
            warnings.warn("The feature_num has to be set less or equal to {}".format(X.shape[1]), UserWarning)

        MIs = self.feature_label_MIs(X, y)
        max_MI_arg = np.argmax(MIs)

        selected_features = []

        MIs = list(zip(range(len(MIs)), MIs))
        selected_features.append(MIs.pop(int(max_MI_arg)))
        
        selected_theta = [0.5]  # 手动设置一个最大Label-Feature互信息特征的Feature-Feature互信息熵
        
        while True:
            max_theta = float("-inf")
            max_theta_index = None

            for mi_outset in MIs:
                ff_mis = []
                for mi_inset in selected_features:
                    ff_mi = self.feature_feature_MIs(X[:, mi_outset[0]], X[:, mi_inset[0]])
                    ff_mis.append(ff_mi)
                theta = mi_outset[1] / (1 / len(selected_features) * sum(ff_mis))
                if theta >= max_theta:
                    max_theta = theta
                    max_theta_index = mi_outset
            selected_theta.append(max_theta)
            selected_features.append(max_theta_index)
            MIs.remove(max_theta_index)

            if len(selected_features) >= self.feature_num:
                break

        self._selected_features = [ind for ind, mi in selected_features]
        self._selected_theta = selected_theta
        
        return self

    def transform(self, X, y, features):
        important_col = self._selected_features
        transform = X[:, important_col]
        
        feature_index = self.important_features
        feature = [features[i] for i in feature_index]
        score = self.features_score
        items = sorted(dict(zip(feature, score)).items())
        key = [k for k,v in items]
        score = [v for k,v in items]
        transform_df = pd.DataFrame(transform, index=y, 
                                    columns=[score, key])
        transform_df.columns.names = ['score', 'feature']
        transform_df.index.names = ['Class']
        return transform_df

    def fit_transform(self, X, y, features):
        self.fit(X, y)
        return self.transform(X, y, features)

    def entropy(self, c):
        """
        entropy calculation

        :param c:

        :return:
        """
        c_normalized = c / float(np.sum(c))
        c_normalized = c_normalized[np.nonzero(c_normalized)]
        H = -sum(c_normalized * np.log2(c_normalized))
        return H

    def feature_label_MIs(self, arr, y):
        """
        calculate feature-label mutual information

        :param arr:

        :param y:

        :return:
        """
        m, n = arr.shape
        MIs = []
        p_y = np.histogram(y)[0]
        h_y = self.entropy(p_y)

        for i in range(n):
            p_i = np.histogram(arr[:, i])[0]
            p_iy = np.histogram2d(arr[:, i], y)[0]

            h_i = self.entropy(p_i)
            h_iy = self.entropy(p_iy)

            MI = h_i + h_y - h_iy
            MIs.append(MI)
        return MIs

    def feature_feature_MIs(self, x, y):
        """
        calculate feature-feature mutual information

        :param x:

        :param y:

        :return:
        """
        p_x = np.histogram(x)[0]
        p_y = np.histogram(y)[0]
        p_xy = np.histogram2d(x, y)[0]

        h_x = self.entropy(p_x)
        h_y = self.entropy(p_y)
        h_xy = self.entropy(p_xy)

        return h_x + h_y - h_xy

    @property
    def important_features(self):
        return self._selected_features
    
    @property
    def features_score(self):
        return self._selected_theta
    
    def weight_graph(self, features=[], path=''):
        feature_index = self.important_features
        feature = [features[i] for i in feature_index]
        score = self.features_score
        items = sorted(dict(zip(feature, score)).items())
        key = [k for k,v in items]
        score = [v for k,v in items]
        plt.figure(figsize=(6,5))
        ax = plt.plot(key, score)
        plt.xlabel("Feature")
        plt.ylabel("Score")
        plt.title(" Feature scores based on MRMR ", 
                  fontweight= "semibold", fontsize='large')
        if path == '':
            plt.show()
        else:
            plt.savefig(path+'MRMR.png', dpi=300)
        plt.close()
            
    
def strList2numberList(str_list):
    '''
    列表中相同的字符串转为相同的数字，数字按原列表顺序从1开始
    
    Arguments:
        str_list: list[str,...,str]
    Returns:
        number_list: list[int,...,int]
    
    '''
    number_list = str_list.copy()
    filter_list = list(set(str_list))
    filter_list.sort(key=str_list.index)
    for i, index1 in enumerate(filter_list):
        for j, index2 in enumerate(number_list):
            if index1 == index2:
                number_list[j] = float(i+1)
    return number_list


# from Classification.ClassicCls import ANN, SVM, PLS_DA, RF
# from Classification.CNN import CNN
# from Classification.SAE import SAE

from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
import sklearn.svm as svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from itertools import product
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import auc,roc_auc_score,roc_curve,precision_recall_curve
from sklearn.preprocessing import label_binarize
from skopt import BayesSearchCV
from sklearn.model_selection import KFold
from bayes_opt import BayesianOptimization
import copy
from hyperimage import HyperImage

class QualitativeAnalysis():
    '''
    光谱定性分析模块
    
    [1]PLS_DA: 
    [2]ANN: 
    [3]SVM: 
    [4]RF: 
    [5]CNN: 
    [6]SAE: 
    
    @blogs  : https://blog.csdn.net/Echo_Code?spm=1000.2115.3001.5343
    @github : https://github.com/FuSiry/OpenSA
    
    '''
    def __init__(self, df_train, df_test, method):

        self.method = method

        self.estimator = None
        self.params = None
        self.spaces = None
        
        self.X_train = np.array(df_train.values).astype(np.float64)
        self.X_test = np.array(df_test.values).astype(np.float64)
        self.y_train = np.array(df_train.index).astype(np.int64)
        self.y_test = np.array(df_test.index).astype(np.int64)
        self.train_feature = np.array(list(df_train.columns)).astype(np.float64)     
        self.test_feature = np.array(list(df_test.columns)).astype(np.float64)
        self.labels = list(set(list(self.y_train)))
        self.labels.sort()
        
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
    
    def getEstimator(self):
        
        if self.method == 'SVM':
            self.estimator = svm.SVC(probability=True)
        if self.method == 'RF':
            self.estimator = RandomForestClassifier()
        if self.method == 'PLS-DA':
            self.estimator = PLSRegression()
            
        return self.estimator
    
    def getSpaces(self, method=''):
        if method == '':
            method = self.method
        if method == 'SVM':
            self.spaces = [
                {'kernel': ['rbf'], 'C': (1e-8, 100.0), 'gamma':(1e-8, 100.0)},
                {'kernel': ['poly'], 'C': (1e-8, 100.0), 'degree': (1, 20)}
                ]
        if method == 'RF':
            self.spaces = {'n_estimators':(1, 500), 'max_depth':(1, 100), 
                           'min_samples_split':(2, 100),
                           'min_samples_leaf':(1, 100)}
        if method == 'PLS-DA':
            x, y = self.X_train.shape
            self.spaces = {'n_components':(1,min(x,y)+1)}
            
        return self.spaces
    
    def getParams(self, method=''):
        if method == '':
            method = self.method
        if method == 'SVM':
            self.params = [
                {'kernel': ['poly'], 'C': [1e-4,1e-3,1e-2,1e-1,0.2,0.5,1,3,5,7,9,15,20,25,30,40,50,60,70,80,90,100], 
                  'degree': list(range(1,20))},
                {'kernel': ['rbf'], 'C': [1e-4,1e-3,1e-2,1e-1,0.2,0.5,1,3,5,7,9,15,20,25,30,40,50,60,70,80,90,100], 
                  'gamma':[1e-7,1e-6,1e-5,1e-4,1e-3,1e-2,1e-1,0.2,0.5,1,3,5,7,9,15,20,25,30,40,50,60,70,80,90,100]}
                ]
        if method == 'RF':
            self.params = {'n_estimators':list(range(50,201,50)),
                           'max_depth':list(range(1,21,5)), 
                           'min_samples_split':list(range(2,21,5)),
                           'min_samples_leaf':list(range(1,21,5))}
        if method == 'PLS-DA':
            x, y = self.X_train.shape
            self.params = {'n_components':list(range(1,min(x,y)+1))}
        
        return self.params
    
    @staticmethod
    def addParams(method='', params={}):
        if method == 'SVM':
            estimator = svm.SVC(**params, probability=True)
        if method == 'RF':
            params = dict(zip(params.keys(), [int(i) for i in params.values()]))
            estimator = RandomForestClassifier(**params)
        if method == 'PLS-DA':
            estimator = PLSRegression(**params)
            
        return estimator
    
    @staticmethod
    def SVM(X_train, X_test, y_train, y_test, param):
    
        estimator = svm.SVC(**param, probability=True)
        estimator.fit(X_train, y_train)
        y_pred_test = estimator.predict(X_test)
        acc_test = accuracy_score(y_test, y_pred_test)
        
        return y_test, y_pred_test, acc_test
    
    @staticmethod
    def RF(X_train, X_test, y_train, y_test, param):
    
        estimator = RandomForestClassifier(**param, oob_score=True)
        estimator.fit(X_train, y_train)
        y_pred_test = estimator.predict(X_test)
        acc_test = accuracy_score(y_test, y_pred_test)
    
        return y_test, y_pred_test
    
    @staticmethod
    def PLS_DA(X_train, X_test, y_train, y_test, n_components=1):
    
        y_train = pd.get_dummies(y_train)
        estimator = PLSRegression(n_components=n_components)
        estimator.fit(X_train, y_train)
        y_pred_test = estimator.predict(X_test)
        y_pred_test = np.array([y_train.columns[np.argmax(i)] for i in y_pred_test])
        acc = accuracy_score(y_test, y_pred_test)
    
        return y_test, y_pred_test
    
    def PLS_DA_cv_test(self, cv=10, n_fold=10, max_iter=1000):
        
        train_x = self.X_train
        test_x = self.X_test
        train_y = self.y_train
        test_y = self.y_test
        
        component = min(train_x.shape[0], train_x.shape[1])
        k_range = np.linspace(1, component, component)
        kf = KFold(n_splits=cv, random_state=None, shuffle=True)
        accuracy_cv = np.zeros((1,component))
        accuracy_test = np.zeros((1,component))
        for j in range(component):
            p = 0
            acc = 0
            model = PLSRegression(n_components=j+1, max_iter=100)
            train_y_dum = pd.get_dummies(train_y)
            model.fit(train_x, train_y_dum)
            y_pred = model.predict(test_x)
            y_pred = np.array([train_y_dum.columns[np.argmax(i)] for i in y_pred]) 
            accuracy_test[:,j] = accuracy_score(test_y, y_pred)

            for train_index, test_index in kf.split(train_x):
                X_train, X_test = train_x[train_index], train_x[test_index]
                y_train, y_test = train_y[train_index], train_y[test_index]
                y_train_cv = pd.get_dummies(y_train)
                model_cv=PLSRegression(n_components=j+1, max_iter=100)
                model_cv.fit(X_train, y_train_cv)
                Y_pred = model_cv.predict(X_test)
                Y_pred = np.array([y_train_cv.columns[np.argmax(i1)] for i1 in Y_pred]) 
                acc = accuracy_score(y_test, Y_pred) + acc
                p = p+1
            accuracy_cv[:,j] = acc / p
        
        result = pd.concat([pd.Series(accuracy_cv.ravel()), pd.Series(accuracy_test.ravel()),
                            pd.Series([{'n_components': int(n)} for n in k_range])], axis=1)
        result.columns=['mean_test_score', 'test_set_score', 'params']
        
        best = result.copy().sort_values(by='mean_test_score', ascending=False).reset_index(drop=True).loc[0,:]
        best_acc_train = best['mean_test_score']
        best_acc_test = best['test_set_score']
        best_params = best['params']
        best_model = QualitativeAnalysis.addParams(method='PLS-DA', params=best_params)
        train_y = pd.get_dummies(train_y)
        best_model.fit(train_x, train_y)
        y_pred_test_proba = best_model.predict(test_x)
        y_pred_test = np.array([train_y.columns[np.argmax(i2)] for i2 in y_pred_test_proba]) 
        
        return result, best_model, y_pred_test, y_pred_test_proba, best_acc_train, best_acc_test
    
    @staticmethod
    def ANN(X_train, X_test, y_train, y_test, StandScaler=None):
    
        if StandScaler:
            scaler = StandardScaler() # 标准化转换
            X_train = scaler.fit_transform(X_train)
            X_test = scaler.transform(X_test)
    
        # 神经网络输入为2，第一隐藏层神经元个数为5，第二隐藏层神经元个数为2，输出结果为2分类。
        # solver='lbfgs',  MLP的求解方法：L-BFGS 在小数据上表现较好，Adam 较为鲁棒，
        # SGD在参数调整较优时会有最佳表现（分类效果与迭代次数）,SGD标识随机梯度下降。
        #clf =  MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(8,8), random_state=1, activation='relu')
        clf =  MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
                      beta_2=0.999, early_stopping=False, epsilon=1e-08,
                      hidden_layer_sizes=(10, 8), learning_rate='constant',
                      learning_rate_init=0.001, max_iter=200, momentum=0.9,
                      nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
                      solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
                      warm_start=False)
    
        clf.fit(X_train,y_train.ravel())
        predict_results=clf.predict(X_test)
        acc = accuracy_score(predict_results, y_test.ravel())
    
        return acc
    
    def Bayesopt(self, init_points=5, n_iter=25):
        
        method = self.method
        train_x = self.X_train
        test_x = self.X_test
        train_y = self.y_train
        test_y = self.y_test
        spaces = self.spaces
        pbounds = copy.deepcopy(spaces)
        
        if type(pbounds).__name__ != 'list':
            pbounds = [pbounds]
            
        result_frame = []
        for pbound in pbounds:
            if method == 'SVM':
                kernel = pbound['kernel'][0]
                del pbound['kernel']
                def crossval(**pbound):
                    val = cross_val_score(svm.SVC(kernel=kernel, **pbound, probability=True), train_x, train_y, scoring=None, cv=10).mean()
                    return val
                def test(**pbound):
                    val = svm.SVC(kernel=kernel, **pbound, probability=True).fit(train_x, train_y).score(test_x, test_y)
                    return val
                
            if method == 'RF':
                def crossval(n_estimators, min_samples_split, min_samples_leaf, max_depth):
                    val = cross_val_score(RandomForestClassifier(n_estimators=int(n_estimators),
                        min_samples_split=int(min_samples_split), min_samples_leaf=int(min_samples_split),
                        max_depth=int(max_depth)), train_x, train_y, scoring=None, cv=10).mean()
                    return val
                def test(n_estimators, min_samples_split, min_samples_leaf, max_depth):
                    val = RandomForestClassifier(n_estimators=int(n_estimators),
                        min_samples_split=int(min_samples_split), min_samples_leaf=int(min_samples_split),
                        max_depth=int(max_depth)).fit(train_x, train_y).score(test_x, test_y)
                    return val

            optimizer_crossval = BayesianOptimization(f=crossval, pbounds=pbound, random_state=1234, verbose=0)
            optimizer_crossval.maximize(init_points=init_points, n_iter=n_iter)
            optimizer_test = BayesianOptimization(f=test, pbounds=pbound, random_state=1234, verbose=0)
            optimizer_test.maximize(init_points=init_points, n_iter=n_iter)

            result_crossval_df = pd.DataFrame(optimizer_crossval.res)
            result_test_df = pd.DataFrame(optimizer_test.res)
            for result_df in [result_crossval_df, result_test_df]:
                params = result_df.copy()
                del params['target']
                params = list(params['params'])
                if method == 'SVM':
                    for param in params:
                        param['kernel'] = kernel
            result_df = pd.concat([result_crossval_df['target'], result_test_df['target'], 
                                    pd.Series(params)], axis=1)
            result_frame.append(result_df)
        result = pd.concat(result_frame, axis=0)
        result.columns = ['mean_test_score', 'test_set_score', 'params']
        best = result.copy().sort_values(by='mean_test_score', ascending=False).reset_index(drop=True).loc[0,:]
        best_acc_train = best['mean_test_score']
        best_acc_test = best['test_set_score']
        best_params = best['params']
        best_model = QualitativeAnalysis.addParams(method=method, params=best_params)
        best_model.fit(train_x, train_y)
        y_pred_test = best_model.predict(test_x)
        y_pred_test_proba = best_model.predict_proba(test_x)
        
        return result, best_model, y_pred_test, y_pred_test_proba, best_acc_train, best_acc_test
    
    
    def Bayesopt_visualize(self, result, path=''):
        
        spaces = self.spaces
        
        result_acc = result[['mean_test_score', 'test_set_score']].reset_index(drop=True)
        result_params = pd.DataFrame(list(result['params']))
        df = pd.concat([result_acc, result_params], axis=1)
        if type(spaces).__name__ == 'list':
            df_list = []
            for columname in df.columns:
                if df[columname].count() != len(df):
                    loc = df[columname][df[columname].isnull().values==True].index.tolist()
                    df_split = df.iloc[loc, :]
                    df_split = df_split.drop(columname, 1).reset_index(drop=True)
                    df_list.append(df_split)
            df_list.reverse()
        else:
            df_list = [df]
            spaces = [spaces]
        
        for i, space, df in zip(range(len(spaces)), spaces, df_list):
            results = ['mean_test_score', 'test_set_score']
            fig, axes = plt.subplots(1, len(space), 
                                     figsize = (5*len(space), 7),
                                     sharey='row')
            axes[0].set_ylabel("Score", fontsize=20)
        
            for idx, param_name in enumerate(space.keys()):
                grouped_df = df.groupby(f'{param_name}')[results]\
                    .agg({'mean_test_score': 'mean',
                          'test_set_score': 'mean'}).sort_index()
                    
                axes[idx].set_xlabel(param_name, fontsize=20)
                axes[idx].set_ylim(0.0, 1.1)
                lw = 2
                if grouped_df.shape[0] == 1:
                    axes[idx].scatter(grouped_df.index, grouped_df['mean_test_score'], label="Cross-validation score",
                                color="navy")
                    axes[idx].scatter(grouped_df.index, grouped_df['test_set_score'], label="Testing score",
                                color="red")
                else:
                    axes[idx].plot(grouped_df.index, grouped_df['mean_test_score'], label="Cross-validation score",
                                color="navy", lw=lw)
                    axes[idx].plot(grouped_df.index, grouped_df['test_set_score'], label="Testing score",
                                color="red", lw=lw)
                
            handles, labels = axes[1].get_legend_handles_labels()
            fig.suptitle('BayesSearch: ' + self.method, fontsize=25)
            fig.legend(handles, labels, loc=8, ncol=20, fontsize=20)
        
            fig.subplots_adjust(bottom=0.25, top=0.85)  
            
            if path == '':
                plt.show()
            else:
                plt.savefig(path+'BayesSearch_'+str(i+1)+'.png', dpi=300)
            plt.close()

    
    def BayessearchCV(self, n_iter=50, cv=10):
                
        X_train = self.X_train
        X_test = self.X_test
        y_train = self.y_train
        y_test = self.y_test
        
        model = BayesSearchCV(self.estimator, self.spaces, n_iter=n_iter, 
                              n_points=n_iter, cv=cv, refit=True, n_jobs=1)
        model.fit(X_train, y_train)
        result = model.cv_results_
        best_model = model.best_estimator_
        y_pred_test = best_model.predict(X_test)
        best_acc_train = model.best_score_
        best_acc_test = accuracy_score(y_test, y_pred_test)
        print('cv_results_:',model.cv_results_.keys())
        print('best_score_:',model.best_score_)
        print('best_estimator_:',model.best_estimator_) 
        print('pred_test_y:',y_pred_test)
        print("acc is {}".format(best_acc_test))
        return result, best_model, y_pred_test, best_acc_train, best_acc_test

    @staticmethod
    def BayessearchCV_visualize(result):
        keys = []
        for key in result.keys():
            if 'param_' in key:
                keys.append(key)
                df = pd.DataFrame(result).loc[:, [key,'mean_test_score']].sort_values(by=key)
                fig = df.plot(x=0 ,y=1)
        df_result = pd.DataFrame(result).loc[:, ['rank_test_score',
              'mean_test_score']+keys].sort_values(by='rank_test_score')
        return df_result

    def GridsearchCV(self, cv=10):
        
        X_train = self.X_train
        X_test = self.X_test
        y_train = self.y_train
        y_test = self.y_test
        
        if self.method == 'PLS-DA':
            result, best_model, y_pred_test, y_pred_test_proba, best_acc_train, best_acc_test \
                = self.PLS_DA_cv_test(cv=cv, n_fold=10, max_iter=1000)
        else:
            model = GridSearchCV(self.estimator, self.params, cv=cv, refit=True, 
                                 return_train_score=True, scoring=None, n_jobs=1)
            model.fit(X_train, y_train)
            result = model.cv_results_
            best_model = model.best_estimator_
            y_pred_test = best_model.predict(X_test)
            y_pred_test_proba = best_model.predict_proba(X_test)
            best_acc_train = model.best_score_
            best_acc_test = accuracy_score(y_test, y_pred_test)
        # print('cv_results_:',model.cv_results_.keys())
        # print('best_score_:',model.best_score_)
        # print('best_estimator_:',model.best_estimator_) 
        # print('pred_test_y:',y_pred_test)
        # print("acc is {}".format(best_acc_test))
        
        return result, best_model, y_pred_test, y_pred_test_proba, best_acc_train, best_acc_test
    

    def GridsearchCV_test(self, result):
        
        params = self.params
        train_x = self.X_train
        test_x = self.X_test
        train_y = self.y_train
        test_y = self.y_test
        
        if self.method == 'PLS-DA':
            pass
        else:
            score_list = []
            if type(params).__name__ == 'list':
                params_list = []
                for params in params:
                    params_lst = [dict(zip(params, v)) for v in product(*params.values())]
                    for param in params_lst:
                        estimator = QualitativeAnalysis.addParams(method=self.method, params=param)
                        estimator.fit(train_x, train_y)
                        score = estimator.score(test_x, test_y)
                        score_list.append(score)
                    params_list.extend(params_lst)
            else:
                params_list = [dict(zip(params, v)) for v in product(*params.values())]
                for param in params_list:
                    estimator = QualitativeAnalysis.addParams(method=self.method, params=param)
                    estimator.fit(train_x, train_y)
                    score = estimator.score(test_x, test_y)
                    score_list.append(score)
                    
            result["test_set_score"] = score_list
            
            keys = []
            for key in result.keys():
                if 'param_' in key:
                    keys.append(key)
            result = pd.DataFrame(result).loc[:, ['test_set_score','mean_test_score',
                        'mean_train_score','std_test_score', 'std_train_score'] + keys]

        return result

    def GridsearchCV_visualize(self, result, path=''):
        
        params = self.params
        
        def pooled_var(stds):
            # https://en.wikipedia.org/wiki/Pooled_variance#Pooled_standard_deviation
            n = 5 # size of each group
            return np.sqrt(sum((n-1)*(stds**2))/ len(stds)*(n-1))
        
        if self.method == 'PLS-DA':
            x = list(range(1, result.shape[0]+1))
            fig = plt.figure(figsize=(6,5))
            plt.plot(x, result['mean_test_score'], 'o-',label="Cross-validation score",color="r")
            plt.plot(x, result['test_set_score'], 'o-',label="Testing score",color="b")
            plt.xlabel("N components")
            plt.ylabel("Score")
            plt.legend(loc="best")
            plt.rc('font',family='Times New Roman')
            plt.rcParams['font.size'] = 10
            plt.title('GridSearch: ' + self.method, fontsize=20)
            if path == '':
                plt.show()
            else:
                plt.savefig(path+'GridSearch_1.png', dpi=300)
            plt.close()
            
        else:
            df = pd.DataFrame(result)
            if type(params).__name__ == 'list':
                df_list = []
                for columname in df.columns:
                    if df[columname].count() != len(df):
                        loc = df[columname][df[columname].isnull().values==True].index.tolist()
                        df_split = df.iloc[loc, :]
                        df_split = df_split.drop(columname, 1)
                        df_list.append(df_split)
                df_list.reverse()
            else:
                df_list = [df]
                params = [params]
                
            for i, param, df in zip(range(len(params)), params, df_list):
                results = ['mean_test_score','mean_train_score','std_test_score', 
                           'std_train_score', 'test_set_score']
                fig, axes = plt.subplots(1, len(param), 
                                         figsize = (5*len(param), 7),
                                         sharey='row')
                axes[0].set_ylabel("Score", fontsize=20)
        
                for idx, (param_name, param_range) in enumerate(param.items()):
                    grouped_df = df.groupby(f'param_{param_name}')[results]\
                        .agg({'mean_train_score': 'mean',
                              'mean_test_score': 'mean',
                              'std_train_score': pooled_var,
                              'std_test_score': pooled_var,
                              'test_set_score': 'mean'})
                    previous_group = df.groupby(f'param_{param_name}')[results]
                    axes[idx].set_xlabel(param_name, fontsize=20)
                    axes[idx].set_ylim(0.0, 1.1)
                    lw = 2
                    axes[idx].plot(param_range, grouped_df['mean_train_score'], label="Training score",
                                color="darkorange", lw=lw)
                    axes[idx].fill_between(param_range,grouped_df['mean_train_score'] - grouped_df['std_train_score'],
                                    grouped_df['mean_train_score'] + grouped_df['std_train_score'], alpha=0.2,
                                    color="darkorange", lw=lw)
                    axes[idx].plot(param_range, grouped_df['mean_test_score'], label="Cross-validation score",
                                color="navy", lw=lw)
                    axes[idx].fill_between(param_range, grouped_df['mean_test_score'] - grouped_df['std_test_score'],
                                    grouped_df['mean_test_score'] + grouped_df['std_test_score'], alpha=0.2,
                                    color="navy", lw=lw)
                    axes[idx].plot(param_range, grouped_df['test_set_score'], label="Testing score",
                                color="red", lw=lw*2.5)
                    
                handles, labels = axes[0].get_legend_handles_labels()
                fig.suptitle('GridSearch: ' + self.method, fontsize=30)
                fig.legend(handles, labels, loc=8, ncol=20, fontsize=20)
        
                fig.subplots_adjust(bottom=0.25, top=0.85)  
                
                if path == '':
                    plt.show()
                else:
                    plt.savefig(path+'GridSearch_'+str(i+1)+'.png', dpi=300)
                plt.close()
    
    def EstimatorTest(self, path=''):
        
        X_train = self.X_train
        X_test = self.X_test
        y_train = self.y_train
        y_test = self.y_test
        method = self.method
        
        if method == "PLS_DA":
            acc = QualitativeAnalysis.PLS_DA(X_train, X_test, y_train, y_test)
        elif method == "ANN":
            acc = QualitativeAnalysis.ANN(X_train, X_test, y_train, y_test)
        elif method == "SVM":
            acc = QualitativeAnalysis.SVM(X_train, y_train, X_test, y_test)
        elif method == "RF":
            acc = QualitativeAnalysis.RF(X_train, X_test, y_train, y_test)
        elif method == "CNN":
            acc = QualitativeAnalysis.CNN(X_train, X_test, y_train, y_test, 16, 160, 4)
        elif method == "SAE":
            acc = QualitativeAnalysis.SAE(X_train, X_test, y_train, y_test)
        else:
            print("no this model of QuantitativeAnalysis")
    
        if path != '':
            acc.to_csv(path_or_buf = path+'Classification_'+ method +'.csv')
    
        return acc


    def report(y_test, y_pred, path=''):
        report = classification_report(y_test, y_pred)
        if path != '':
            HyperImage.saveData2Json(report, path = path + 'report.json')
        
        return report

    def plot_confusion_matrix(self, y_test, y_pred, matrix=[], labels='', 
                              cmap=plt.cm.Blues, normalize=True, path=''):
        if matrix == []:
            matrix = confusion_matrix(y_test, y_pred)
        if labels == '':
            labels = self.labels
        from sklearn.metrics import ConfusionMatrixDisplay
        if normalize == True:
            matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
        disp = ConfusionMatrixDisplay(confusion_matrix=matrix, display_labels=labels)
        disp.plot(cmap=cmap)
        if path == '':
            plt.show()
        else:
            plt.savefig(path+'Confusion_Matrix.png', dpi=300)
        plt.close()
        
        return matrix
    
    @staticmethod
    def multiClassification_PR(result_y, pred_result_y, path=''):
        n_classes = len(set(result_y))
        if len(set(result_y)) != 2:
            result_y = label_binarize(result_y, classes=[i+1 for i in range(n_classes)])
        else:
            result_y = label_binarize(result_y, classes=[1,2,0])[:, 0:2]
        pred_result_y = np.array(pred_result_y)
        precision = dict()
        recall = dict()
        for i in range(n_classes):
            precision[i], recall[i], _ = precision_recall_curve(result_y[:, i], pred_result_y[:, i])
        plt.rcParams['font.family']=['Times New Roman']
        plt.figure()
        lw = 1
        # classes
        colors = ['blue', 'red', 'green', 'pink', 'magenta']
        for i, color in zip(range(n_classes), colors):
            plt.plot(precision[i], recall[i], color=color, lw=lw+0.5,
                     label='PR curve of class {0}'.format(i+1))
        # micro
        precision["micro"], recall["micro"], _ = precision_recall_curve(result_y.ravel(),pred_result_y.ravel())
        plt.step(recall['micro'], precision['micro'], where='post', label='micro-averaged over all classes')
        plt.plot([1, 0], [0, 1], 'k--', lw=lw)
        plt.xlim([-0.05, 1.05])
        plt.ylim([0, 1])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Precision Recall curve for multi-class data')
        plt.legend(loc="lower left")
        if path == '':
            plt.show()
        else:
            plt.savefig(path+'P-R_curve.png', dpi=300)
        plt.close()
        
    @staticmethod
    def multiClassification_ROC(result_y, pred_result_y, path=''):
        n_classes = len(set(result_y))
        if len(set(result_y)) != 2:
            result_y = label_binarize(result_y, classes=[i+1 for i in range(n_classes)])
        else:
            result_y = label_binarize(result_y, classes=[1,2,0])[:, 0:2]
        pred_result_y = np.array(pred_result_y)
        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(result_y[:, i], pred_result_y[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        plt.rcParams['font.family']=['Times New Roman']
        plt.figure()
        lw = 1
        # classes
        colors = ['blue', 'red', 'green', 'pink', 'magenta']
        for i, color in zip(range(n_classes), colors):
            plt.plot(fpr[i], tpr[i], color=color, lw=lw,
                     label='ROC curve of class {0} (AUC = {1:0.3f})'
                     ''.format(i+1, roc_auc[i]))
        # micro
        fpr["micro"], tpr["micro"], _ = roc_curve(result_y.ravel(), pred_result_y.ravel())
        roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
        plt.plot(fpr["micro"], tpr["micro"],
                 label='micro-average ROC curve (area = {0:0.2f})'
                       ''.format(roc_auc["micro"]),
                 color='deeppink', linestyle=':', linewidth=4)
        # macro
        all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
        mean_tpr = np.zeros_like(all_fpr)
        for i in range(n_classes):
            mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
        mean_tpr /= n_classes
        fpr["macro"] = all_fpr
        tpr["macro"] = mean_tpr
        roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
        plt.plot(fpr["macro"], tpr["macro"],
                 label='macro-average ROC curve (area = {0:0.2f})'
                       ''.format(roc_auc["macro"]),
                 color='navy', linestyle=':', linewidth=4)
        plt.plot([0, 1], [0, 1], 'k--', lw=lw)
        plt.xlim([-0.05, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver Operating Characteristic for multi-class data')
        plt.legend(loc="lower right")
        if path == '':
            plt.show()
        else:
            plt.savefig(path+'ROC_curve.png', dpi=300)
        plt.close()



from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import scale,MinMaxScaler,Normalizer,StandardScaler
from sklearn.metrics import mean_squared_error,r2_score,mean_absolute_error
from sklearn.neural_network import MLPRegressor
import numpy as np


class QuantitativeAnalysis():
    '''
    光谱定量分析模块
    
    [1]PLSR
    
    @blogs  : https://blog.csdn.net/Echo_Code?spm=1000.2115.3001.5343
    @github : https://github.com/FuSiry/OpenSA
    
    '''
    def __init__(self, df_train, df_test, method):

        self.method = method

        self.estimator = None
        self.params = None
        self.spaces = None
        
        self.X_train = np.array(df_train.values).astype(np.float64)
        self.X_test = np.array(df_test.values).astype(np.float64)
        self.y_train = np.array(df_train.index).astype(np.float64)
        self.y_test = np.array(df_test.index).astype(np.float64)
        self.train_feature = np.array(list(df_train.columns)).astype(np.float64)     
        self.test_feature = np.array(list(df_test.columns)).astype(np.float64)
        self.labels = list(set(list(self.y_train)))
        self.labels.sort()

    def getEstimator(self):
        
        if self.method == 'PLSR':
            self.estimator = PLSRegression()
            
        return self.estimator
    
    def getParams(self, method=''):
        
        if method == '':
            method = self.method
        if method == 'PLSR':
            x, y = self.X_train.shape
            self.params = {'n_components':list(range(1,min(x,y)+1))}
        
        return self.params
    
    def PLSR(self, n_components=10):
        
        X_train = self.X_train
        X_test = self.X_test
        y_train = self.y_train
        y_test = self.y_test

        model = PLSRegression(n_components=n_components)
        model.fit(X_train, y_train)
        train_pred = model.predict(X_train)
        test_pred = model.predict(X_test)
        train_R2 = r2_score(y_train, train_pred)
        test_R2 = r2_score(y_test, test_pred)
        train_MSE = mean_squared_error(y_train, train_pred)
        test_MSE = mean_squared_error(y_test, test_pred)
        train_RMSE = np.sqrt(train_MSE)
        test_RMSE = np.sqrt(test_MSE)
    
        return model
    
    
    @staticmethod
    def calculate_vips(model):
        '''
        https://blog.csdn.net/fjsd155/article/details/93414969
        https://github.com/scikit-learn/scikit-learn/issues/7050
        https://www.codeleading.com/article/77096220783/
        https://www.sciencedirect.com/science/article/pii/S0169743914001786
        
        '''
        t = model.x_scores_
        w = model.x_rotations_
        q = model.y_loadings_
        p, h = w.shape
        vips = np.zeros((p,))#np.zeros()表示初始化0向量
        s = np.diag(np.matmul(np.matmul(np.matmul(t.T,t),q.T), q)).reshape(h, -1)
        #np.matmul(a,b)表示两个矩阵相乘;np.diag()输出矩阵中对角线上的元素，若矩阵是一维数组则输出一个以一维数组为对角线的矩阵
        total_s = np.sum(s)
        for i in range(p):
            weight = np.array([ (w[i,j] / np.linalg.norm(w[:,j]))**2 for j in range(h) ])
            #np.linarg.norm()表示求范数：矩阵整体元素平方和开根号，不保留矩阵二维特性
            vips[i] = np.sqrt(p*(np.matmul(s.T, weight))/total_s)
            #s.T表示矩阵的转置
        return vips
    
    
    def PLSR_cv_test(self, label, cv=10, n_fold=10, max_iter=1000):
        
        train_x = self.X_train
        test_x = self.X_test
        train_y = self.y_train
        test_y = self.y_test
        component = min(train_x.shape[0], train_x.shape[1])
        Label = [label]*component
        k_range = np.linspace(1, component, component)
        kf = KFold(n_splits=cv, random_state=None, shuffle=True)
        R2_Train = np.zeros((1,component))
        RMSE_Train = np.zeros((1,component))
        R2_Test = np.zeros((1,component))
        RMSE_Test = np.zeros((1,component))
        R2_CV = np.zeros((1,component))
        RMSE_CV = np.zeros((1,component))
        VIPs = []
        for j in range(component):
            p = 0
            R2_test = 0
            RMSE_test = 0
            model = PLSRegression(n_components=j+1, max_iter=100)
            model.fit(train_x, train_y)
            train_pred = model.predict(train_x)
            test_pred = model.predict(test_x)
            train_R2 = r2_score(train_y, train_pred)
            test_R2 = r2_score(test_y, test_pred)
            train_MSE = mean_squared_error(train_y, train_pred)
            test_MSE = mean_squared_error(test_y, test_pred)
            train_RMSE = np.sqrt(train_MSE)
            test_RMSE = np.sqrt(test_MSE)
            R2_Train[:, j] = train_R2
            RMSE_Train[:, j] = train_RMSE
            R2_Test[:, j] = test_R2
            RMSE_Test[:, j] = test_RMSE
            
            for train_index, test_index in kf.split(train_x):
                X_train, X_test = train_x[train_index], train_x[test_index]
                y_train, y_test = train_y[train_index], train_y[test_index]
                model_cv = PLSRegression(n_components=j+1, max_iter=100)
                model_cv.fit(X_train, y_train)
                pred_test = model_cv.predict(X_test)
                R2_test = r2_score(y_test, pred_test) + R2_test
                MSE_test = mean_squared_error(y_test, pred_test)
                RMSE_test = np.sqrt(MSE_test) + RMSE_test
                p = p+1
            R2_CV[:, j] = R2_test / p
            RMSE_CV[:, j] = RMSE_test / p
            
            VIP = tuple(QuantitativeAnalysis.calculate_vips(model))
            VIPs.append(VIP)
        
        result = pd.concat([pd.Series(Label), pd.Series([{'n_components': int(n)} for n in k_range]),
                pd.Series(R2_Train[0]), pd.Series(R2_Test[0]), pd.Series(R2_CV[0]),
                pd.Series(RMSE_Train[0]), pd.Series(RMSE_Test[0]), pd.Series(RMSE_CV[0]),
                pd.Series(VIPs)], axis=1)
        result.columns=['Label', 'params', 'R2_Train', 'R2_Test', 'R2_CV',
                        'RMSE_Train', 'RMSE_Test', 'RMSE_CV', 'VIPs']
        
        return result
    
    
    
    
        
        X_train = self.X_train
        X_test = self.X_test
        y_train = self.y_train
        y_test = self.y_test
        
        model = GridSearchCV(self.estimator, self.params, cv=cv, refit=True, 
                             return_train_score=True, scoring=None, n_jobs=1)
        model.fit(X_train, y_train)
        result = model.cv_results_
        best_model = model.best_estimator_
        y_pred_test = best_model.predict(X_test)
        best_acc_train = model.best_score_
        best_acc_test = accuracy_score(y_test, y_pred_test)
        # print('cv_results_:',model.cv_results_.keys())
        # print('best_score_:',model.best_score_)
        # print('best_estimator_:',model.best_estimator_) 
        # print('pred_test_y:',y_pred_test)
        # print("acc is {}".format(best_acc_test))
        
        return result, best_model, y_pred_test, y_pred_test_proba, best_acc_train, best_acc_test