# -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 09:33:37 2022

@author: xtp

"""

import numpy as np
import os
import pandas as pd
import re
import glob
from fnmatch import fnmatch
import json
from sklearn.cluster import KMeans

class Formatting():
    '''
    Fluorescence data reading, pre-processing, writing.
    
    '''
    
    def __init__(self, dataframe):
        self.DataFrame = dataframe
        self.Factors = list(dataframe.index.names)
        self.Level = list(dataframe.index)
        self.Features = list(dataframe.columns)
        self.Shape = dataframe.shape
    
    def print_object(self):
        print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))

    @classmethod
    def readFile2Table(cls, path, ext, flag ='all', subfolder=False, 
                       header=None, index_col=None, skiprows=[], 
                       feature_name='', addroindex=False, addNaNcolumn=False, 
                       dropindex=False):
        '''
        读取文件并以其文件名为索引转化为表格

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        path : str
            处理数据路径
        ext : list[str]
            需要筛选的文件/文件类型
        addroindex : bool
            是否添加感兴趣区域作为列索引
        addNaNcolumn : bool
            是否补全空值
        dropindex : bool
            是否舍弃原索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        process = Files(path, ext, flag = flag)
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt): 
            df = pd.read_excel(path, header=header, index_col=index_col,
                               skiprows=skiprows)
            df.index.name = feature_name
            if addroindex == True:
                df = Formatting.__addROIndex(df, 
                            default=['Leaf1','Leaf2','Leaf3','Leaf4','Leaf5'],  # Set default columns 
                            exception=['Leaf2','Leaf3','Leaf4','Leaf5'])        # and exception.
            if addNaNcolumn == True:
                df = Formatting.__addNaNcolumn(df, default_number=5, loc=0,     # Set ROI number, NaN position
                                                columns_name='Leaf1')           # and NaN columns name.
            df = df.astype(float).T
            if addroindex == True:
                df.index.name = 'ROI'                                           # Set ROI class name.
            if subfolder == True:
                path_list = re.split(r'\\|\/', path)
                foldername = path_list[-2]
                df = Formatting.__addIndex(df, index=foldername+name, 
                                        class_name='Filename', drop=dropindex)
            else:
                df = Formatting.__addIndex(df, index=name, 
                                        class_name='Filename', drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        return cls(df_table)
    
    @classmethod
    def resetTableIndex(cls, df, flags_list=[], filter_list=[], 
                        new_index_list=[], class_name_list=[], 
                        dropindex=False):
        '''
        按文件名重置表格索引

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        df : DataFrame
            以文件名为索引的数据表格
        flags_list : TYPE, optional
            分割区间    flags: list[int,int]
        filter_list : TYPE, optional
            筛选列表    filters: list[str]
        new_index_list : TYPE, optional
            替换列表    new_index: list[str]
        class_name_list : TYPE, optional
            类名      class_name: str
            
        "    Example:
                    flags_list = [[0,3],[3,5],[5,7]]
                    filter_list = [['20','335','958'],
                                   ['1','2','3'],
                                   ['1','2','3','4','5','6']]
                    new_index_list = [['JD20','XY335','ZD958'],
                                      ['N0','N1','N2'],
                                      ['W','W','W','D','D','D']]
                    class_name_list = ['Varieties','Nitrogen','Water']    "
                    
        dropindex : bool
            是否保留文件名索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        try:
            df = df.reset_index(level=list(range(1, len(df.index[0]))),
                                drop=False)
        except IndexError :
            pass
        for flags, filters, new_index, class_name in zip(
                flags_list, filter_list, new_index_list, class_name_list):
            df = Formatting.__convertIndex(df, flags=flags, filters=filters,    
                                   new_index=new_index, class_name=class_name)
        if dropindex == False:
            df = df.reset_index()
            df_multindex = df.set_index(['Index']+class_name_list)
        else:
            df_multindex = df.set_index(class_name_list)
        return cls(df_multindex)
    
    
    @staticmethod
    def __addROIndex(df, default=[], exception=[]):
        '''
        手动添加列索引（ROI）

        Parameters
        ----------
        df : DataFrame
            数据表格
        default : list[str]
            默认列索引
        exception : list[str]
            异常列索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df_arr = np.array(df)
        try:
            df = pd.DataFrame(df_arr, index=df.index, columns=default)
        except ValueError:
            df = pd.DataFrame(df_arr, index=df.index, columns=exception)
        return df
    
    @staticmethod
    def __addNaNcolumn(df, default_number, loc, columns_name=''):
        '''
        异常位置添加空值

        Parameters
        ----------
        df : DataFrame
            数据表格
        default_number : int
            ROI 默认列数
        loc : int
            空值列添加位置
        columns_name : str
            添加列的名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        if len(df.columns) == default_number:
            return df
        else:
            df.insert(loc, columns_name, np.nan)
            return df
        
    @staticmethod
    def __addIndex(df, index='', class_name='', drop=False):
        '''
        给一个数据表格添加相同的索引（Filename）

        Parameters
        ----------
        df : DataFrame
            数据表格
        index : str
            添加的索引
        class_name : str
            添加索引类别名称
        drop : bool
            是否保留原索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index_list = [index]*(df.shape[0])
        df = df.reset_index(drop=drop)
        df[class_name] = index_list
        df = df.set_index(class_name, drop=True)
        return df
        
    @staticmethod
    def __convertIndex(df, flags=[], filters=[], new_index=[], class_name=''):
        '''
        按已有索引分类，并转化为多级索引（Filename → Factors）

        Parameters
        ----------
        df : DataFrame
            带索引的数据表格
        flags : list[int,int]
            已有索引的筛选区间
        filters : list[str,...,str]
            已有索引的筛选名称
        new_index : list[str,...,str]
            新索引的名称
        class_name : str
            新索引类别名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index = list(df.index)
        for f, filterr in enumerate(filters):
            for i, index_str in enumerate(index):
                if filterr in index_str[flags[0]:flags[1]]:
                    index[i] = new_index[f]
        df[class_name] = index
        return df
            
    @staticmethod
    def setIndex(df, keys, drop=True, append=False):
        '''
        表格索引设置

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        keys : Series, Index / str, list[str]
            直接设置索引 / 从表格列中设置索引.
        drop : TYPE, optional
            是否删除要用作新索引的列. The default is True.
        append : TYPE, optional
            是否将列追加到现有索引. The default is False.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df.set_index(keys=keys, drop=drop, append=append, 
                     inplace=True, verify_integrity=False)
        
        return df
    

    @staticmethod
    def multiIndexExtract(df, first = [], second = [], names = []):
        '''
        表格索引拆分变多级索引(两个)

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        first : [int,int]/[int], optional
            DESCRIPTION. The default is [].
        second : [int,int]/[int], optional
            DESCRIPTION. The default is [].
        names : TYPE, optional
            DESCRIPTION. The default is [].

        Returns
        -------
        newindex : TYPE
            DESCRIPTION.

        '''
        index_frame = []
        for i in range(len(list(df.index))):
            try:
                firstindex = list(df.index)[i][0][first[0]:first[1]]
            except IndexError:
                firstindex = list(df.index)[i][0][first[0]]
            try:
                secondindex = list(df.index)[i][1][second[0]:second[1]]
            except IndexError:
                secondindex = list(df.index)[i][1][second[0]]
            multindex = (firstindex, secondindex)
            index_frame.append(multindex)
            newindex = pd.MultiIndex.from_tuples(index_frame, names = names)
        return newindex

    @staticmethod
    def multiIndexExtract2(df, flags_list = [], names = []):
        '''
        表格单个索引拆分变多级索引

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        flags_list : TYPE, optional
            DESCRIPTION. The default is [].
        names : TYPE, optional
            DESCRIPTION. The default is [].

        Returns
        -------
        newindex : TYPE
            DESCRIPTION.

        '''
        index_frame = []
        newindex_list = []
        for i in range(len(list(df.index))):
            for f in flags_list:
                try:
                    newindex = list(df.index)[i][f[0]:f[1]]
                except IndexError:
                    newindex = list(df.index)[i][f[0]]
                newindex_list.append(newindex)
            multindex = tuple(newindex_list)
            index_frame.append(multindex)
            newindex_list = []
        newmultindex = pd.MultiIndex.from_tuples(index_frame, names=names)
        df = pd.DataFrame(np.array(df), index=newmultindex, columns=df.columns)
        return df


        
class Files():
    '''
    文件筛选、路径及文件名提取
    
    '''
    def __init__(self, folder, ext, flag ='this'):
        '''
        Arguments:
            self.path: 文件夹全路径，str
            self.ext: 筛选文件或文件类型，list[str]
            self.count: 筛选文件的个数，int
            self.filesNoExt: 需要筛选的文件名称，list
            self.files: 需要筛选的文件名称带后缀，list
            self.filesWithPath: 需要筛选的文件全路径，list
            self.filesWithPathNoExt: 需要筛选的文件全路径不带后缀，list
            self.filesPath: 文件夹全路径列表，list 

        '''
        folder = os.path.normpath(folder)
        self.path = folder
        self.ext = ext
        self.filesNoExt, self.files, self.filesWithPath, self.count = \
            [], [], [], 0

        if flag == 'this':
            [self.filesNoExt, self.files, self.filesWithPath,
             self.count, self.ext] = Files.__getfiles(folder, ext)
                
        elif flag == 'all':
            [self.filesNoExt, self.files, self.filesWithPath, 
             self.count, self.ext] = Files.__getfilesAll(folder, ext)
                
        self.filesPath = [folder] * self.count
        self.filesWithPathNoExt = [os.path.splitext(f)[0] 
                                   for f in self.filesWithPath]
        
    @staticmethod
    def __getfiles(dir0, filter0):
        '''
        按特定的筛选条件获取 dir0 目录下的文件

        Arguments:
            dir0: 文件夹全路径，str
            filter0: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        os.chdir(dir0)
        filenamesStr = []
        filenamesStr_ext = []
        fileswithpath = []
        ext = []
        for t in filter0:
            for file in glob.glob(t):
                [fileNoExt, ext2] = os.path.splitext(file)
                filenamesStr.append(fileNoExt)
                fileswithpath.append(dir0+'/'+file)
                filenamesStr_ext.append(file)
            e = len(glob.glob(t))
            for i in t.split('o_0')*e:
                ext.append(i)
        count = len(filenamesStr)
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext

    @staticmethod
    def __getfilesAll(path, filter_):
        '''
        按特定的筛选条件获取 path 目录及其子目录下的文件

        Arguments:
            path: 文件夹全路径，str
            filter_: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        filenamesStr = []
        fileswithpath = []
        filenamesStr_ext = []
        ext = []
        for path, subdirs, f in os.walk(path):
            # path: 当前正在遍历的这个文件夹的本身的地址，str
            # subdirs: 该文件夹中所有的目录的名字(不包括子目录)，list
            # files: 该文件夹中所有的文件(不包括子目录)，list
            for name in f:
                for t in filter_:
                    if fnmatch(name, t):
                        [fileNoExt, ext2] = os.path.splitext(name)
                        filenamesStr.append(fileNoExt)
                        fileswithpath.append(path+'/'+ name)
                        filenamesStr_ext.append(name)
                    e = len(glob.glob(t))
                    for i in t.split('o_0')*e:
                        ext.append(i)
        count = len(filenamesStr)
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext

    def sortFilesName(self, reg=(0,0)):
        self.filesNoExt, self.filesSortedIndex = Files.sortListStringHuman(
                self.filesNoExt, reg=reg)
        self.files = [self.files[i] for i in  self.filesSortedIndex]
        self.filesWithPath = [self.filesWithPath[i] for i in  self.filesSortedIndex]
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
        
    def splitFilesName(self, sp='__'):
        b = self.filesNoExt[0].split(sp)
        for file in self.filesNoExt[1:]:
            name = file.split(sp)
            b = np.column_stack((b, name))
        self.namesElements = b
        return b
   
    @staticmethod
    def sortListStringHuman(text, reg=(0,0)):
        """Sort the string list with human order

        Args:
            text: The input string list

        Returns:
            data: sorted string list

        """
        import re

        def natural_keys(text):
            '''
            alist.sort(key=natural_keys) sorts in human order
            http://nedbatchelder.com/blog/200712/human_sorting.html
            (See Toothy's implementation in the comments)
            float regex comes from https://stackoverflow.com/a/12643073/190597
            '''
            def atof(text):
                try:
                    retval = float(text)
                    # print(retval)
                except ValueError:
                    retval = text
                return retval
            
            return [atof(c) for c in re.split
                    (r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)]
        if reg == (0,0):
            textReg = text
        elif reg[1] == 0:
            textReg = [t[reg[0]:] for t in text]
        else: 
            textReg = [t[reg[0]:reg[1]] for t in text]

        retIndex = sorted(range(len(textReg)), key=lambda k:natural_keys(textReg[k]))
        retV = [text[i] for i in retIndex]
        return retV, retIndex 
   
    @staticmethod
    def mkdirPath(pathstr):
        import os
        path = os.path.normpath(pathstr)
        pathparts = path.split(os.sep)
        # print(pathparts)
        for i in range(2, len(pathparts)):
            crtpath = '/'.join(pathparts[0:i+1])
            print(crtpath, end='    ')
            if os.path.isdir(crtpath):
                print('this is a folder')
                pass
            else:
                print('this is not a folder: ', end=' ')
                os.mkdir(crtpath)
                print('created')
    
    @staticmethod
    def saveData2Json(data, path=None):
        try:
            with open(path, 'w') as fr:
                json.dump(data, fr)
        except TypeError:
            try:
                for i in range(len(data)):
                    data[i][0]=data[i][0].tolist()
                with open(path, 'w') as fr:
                    json.dump(data, fr)
            except IndexError:
                data = data.tolist()
                with open(path, 'w') as fr:
                    json.dump(data, fr)
    
    @staticmethod
    def getfilePath():
        '''
        获取该模块的地址
        https://blog.csdn.net/NeverLate_gogogo/article/details/111867089

        Returns
        -------
        path_real : TYPE
            DESCRIPTION.
        dir_real : TYPE
            DESCRIPTION.
        path_abs : TYPE
            DESCRIPTION.
        dir_abs : TYPE
            DESCRIPTION.

        '''
        path_real = os.path.realpath(__file__)
        dir_real = os.path.dirname(path_real)
        path_abs = os.path.abspath(__file__)
        dir_abs = os.path.dirname(__file__)
        
        return path_real, dir_real, path_abs, dir_abs


if __name__=="__main__":
    
# =============================================================================
#                                   STATEMENT
# =============================================================================
    path_processing = 'D:/Data/Variety Selection/data_1th/processing/'
    ext0 = ['*.xlsx']
    path_result = 'D:/Data/Variety Selection/data_1th/result/'
    ext1 = ['*.csv']
# =============================================================================
#                          MERGE & Calculation & SPLIT
# 1.多个指标合并
# 2.数据校正（0浓度 → max）
# 3.按主区因素重新分割、求胁迫系数（1.|对照-除草剂胁迫|/对照 2.|对照-除草剂胁迫|）
# 4.导出各指标两次重复中各自的胁迫系数及合并后的胁迫系数
# =============================================================================

    # Files.mkdirPath(path_result)
    # indicators_org = Formatting.readFile2Table(path_processing, ext0, 
    #                     subfolder=False, header=None, index_col=0, 
    #                     skiprows=[0], feature_name='Index',
    #                     addroindex=False, addNaNcolumn=False, dropindex=False)
    # df_org = indicators_org.DataFrame                                           # origin dataframe
    # Formatting.setIndex(df_org, keys=df_org.columns[0], drop=True, append=True)
    
    # indicators_idx = Formatting.resetTableIndex(df_org.T, flags_list = [[0,2]],
    #                     filter_list = [['A0','A1','A2','B0','B1','B2']],
    #                     new_index_list = [['A0','A1','A2','B0','B1','B2']],
    #                     class_name_list = ['Main Area'],
    #                     dropindex=False)
    # df_idx = indicators_idx.DataFrame                                           # dataframe index used to calculate

    # df_org_copy = df_org.copy().reset_index(level=1, drop=True)
    # df_crt = Formatting.multiIndexExtract2(df_org_copy.T, 
    #                               flags_list=[[0],[1],[3,5]], 
    #                               names=['Area', 'Concentration','Varieties'])
    # # replace!!!: 0 to maximum value (+1%)
    # frame=[]
    # for name, group in df_crt.groupby(['Area','Varieties']):
    #     df_np0 = np.array(group)
    #     try:
    #         df_MDA = group['MDA']
    #         df_np_MDA = np.sort(np.array(df_MDA), axis=0)*1.01
    #         a = group.columns.searchsorted('MDA')
    #         b = a + df_MDA.shape[1]
    #         df_SP = group['SP']
    #         df_np_SP = np.sort(np.array(df_SP), axis=0)*1.01
    #         c = group.columns.searchsorted('SP')
    #         d = c + df_SP.shape[1]
    #         df_np1 = abs(np.sort(-np.array(group), axis=0))*1.01
    #         df_np1[::,a:b] = df_np_MDA
    #         df_np1[::,c:d] = df_np_SP
    #     except KeyError:
    #         df_np1 = abs(np.sort(-df_np0, axis=0))*1.01
    #     df_np2 = df_np0.copy()
    #     df_np2[0,:] = df_np1[0]
    #     df = pd.DataFrame(df_np2, index=group.index, columns=group.columns)
    #     frame.append(df)
    # df_crt = pd.concat(frame, axis=0).sort_index()                              # corrected dataframe
    # df_cal = pd.DataFrame(np.array(df_crt), index=df_idx.index, 
    #                       columns=df_idx.columns)                               # dataframe used to calculate

    # frame_0 = []
    # frame_1 = []
    # frame_2 = []
    # indicators_number_list = []
    # filename_list = list(df_cal.T.reset_index(level=1).index)
    # indicators_list = list(set(filename_list))
    # indicators_list.sort(key=filename_list.index)
    # df_sort = df_cal.loc[:, indicators_list]                                    # sort dataframe by filename to avoid reporting errors
    # for name, group in df_sort.groupby('Main Area'):
    #     df = group.swaplevel('Main Area', 'Index')
    #     if name == 'A0':                                                        # grouped by concentration/main area
    #         A0 = df.groupby(level='Filename',axis=1).agg('mean')
    #         for i in indicators_list:
    #             number = df[i].shape[1]
    #             indicators_number_list.append(number)
    #         A0_np = np.repeat(np.array(A0), indicators_number_list, axis=1)     # 0 mean & repeat
    #     if name == 'B0':
    #         B0 = df.groupby(level='Filename',axis=1).agg('mean')
    #         B0_np = np.repeat(np.array(B0), indicators_number_list, axis=1)
    #     if name[1] == '0':
    #         df_copy0 = df.copy()
    #         df = df.reset_index(drop=True)
    #         frame_0.append(df)
    #     if name[1] == '1':
    #         df_copy1 = df.copy()
    #         df = df.reset_index(drop=True)
    #         frame_1.append(df)
    #     if name[1] == '2':
    #         df_copy2 = df.copy()
    #         df = df.reset_index(drop=True)
    #         frame_2.append(df)
        
    # df0 = pd.concat(frame_0, axis=1).T.sort_index().T                           # merge by concentration
    # merge0 = df0.groupby(level='Filename', axis=1).agg('mean')
    # indicators_number_list2= list(map(lambda x : x*2, indicators_number_list))
    # merge0_np = np.repeat(np.array(merge0), indicators_number_list2, axis=1)
    
    # df1 = pd.concat(frame_1, axis=1).T.sort_index()                             # set merged dataframe index
    # df2 = pd.concat(frame_2, axis=1).T.sort_index()
    # column1th = []
    # for i in indicators_list:
    #     count = len(list(df1.loc[i].index))
    #     for j in range(count):
    #         column1th.append(j+1) 
    # df1 = df1.reset_index(level=1, drop=True).T
    # df2 = df2.reset_index(level=1, drop=True).T
    # column0th = list(df1.columns)
    # newcolumns = pd.MultiIndex.from_arrays([column0th, column1th],
    #                             names = ['Filename', 'index'])
    # newindex_1 = Formatting.multiIndexExtract(df_copy1, first=[1], 
    #                     second=[1,5],names = ['Concentration', 'Varieties'])
    # newindex_2 = Formatting.multiIndexExtract(df_copy2, first=[1], 
    #                     second=[1,5],names = ['Concentration', 'Varieties'])
    
    # for name, group in df_sort.groupby('Main Area'):                            # calculation: AB split & merge
    #     df = group.swaplevel('Main Area', 'Index')
        
    #     # # Calculation1
    #     # if name[0] == 'A' and name != 'A0':
    #     #     df_A = pd.DataFrame(((A0_np-np.array(df))/A0_np), 
    #     #                       columns = df.columns, index = df.index)
    #     #     df_A.to_csv(path_result + name + '_calculated.csv')
    #     # if name[0] == 'B' and name != 'B0':
    #     #     df_B = pd.DataFrame(((B0_np-np.array(df))/B0_np), 
    #     #                       columns = df.columns, index = df.index)
    #     #     df_B.to_csv(path_result + name + '_calculated.csv')
    #     # whole_1 = pd.DataFrame((merge0_np-np.array(df1))/merge0_np, 
    #     #                         index = newindex_1, columns = newcolumns)
    #     # whole_1.to_csv(path_result + 'whole_1_calculated.csv')
    #     # whole_2 = pd.DataFrame((merge0_np-np.array(df2))/merge0_np, 
    #     #                         index = newindex_2, columns = newcolumns)
    #     # whole_2.to_csv(path_result + 'whole_2_calculated.csv')
        
    #     # Calculation2
    #     if name[0] == 'A' and name != 'A0':
    #         df = pd.DataFrame((A0_np-np.array(df)), 
    #                           columns = df.columns, index = df.index)
    #         df.to_csv(path_result + name + '_calculated.csv')
    #     if name[0] == 'B' and name != 'B0':
    #         df = pd.DataFrame((B0_np-np.array(df)),
    #                           columns = df.columns, index = df.index)
    #         df.to_csv(path_result + name + '_calculated.csv')
    #     whole_1 = pd.DataFrame((merge0_np-np.array(df1)), 
    #                             index = newindex_1, columns = newcolumns)
    #     whole_1.to_csv(path_result + 'whole_1_calculated.csv')
    #     whole_2 = pd.DataFrame((merge0_np-np.array(df2)), 
    #                             index = newindex_2, columns = newcolumns)
    #     whole_2.to_csv(path_result + 'whole_2_calculated.csv')

    
# =============================================================================
#                           MEAN & Correction & MERGE
# 1.合并数据
# 2.胁迫系数修正（MDA、SP取反，所有<0数据记为Nan）
# 3.异常数据处理（1.异常数据剔除(以均值代替) 2.异常数据修正（记为无胁迫））
# 4.合并校正后的数据
# 5.Bartlett's Test & KMO Test
# =============================================================================
    
    # df_frame = []
    # file3 = Files(path_processing, ext1, flag ='this')
    # for path, name in zip(file3.filesWithPath, file3.filesNoExt):
    #     df = pd.read_table(path, header=[0,1], sep=',',index_col=[0,1])
    #     df_frame.append(df)
    # df_merged = pd.concat(df_frame, axis=0, ignore_index=False)

    # try:
    #     df_merged[df_merged[['MDA']]>0]=np.nan
    #     df_merged['MDA'] = -df_merged['MDA']
    #     # cols = [i for i in df_merged.columns if i not in [
    #     #         tuple(['MDA', j]) for j in df_merged['MDA'].columns]]
    #     # df_merged[df_merged[cols]<0]=np.nan
    #     df_merged[df_merged[['SP']]>0]=np.nan
    #     df_merged['SP'] = -df_merged['SP']
    # except KeyError :
    #     pass
    # df_merged[df_merged<0]=np.nan
        
    # # # Remove
    # # df_merged = df_merged.groupby(level='Filename',axis=1).agg('mean')
    # # df_merged = df_merged.fillna(value=0)
    
    # # Modify
    # df_merged = df_merged.fillna(value=0)
    # df_merged = df_merged.groupby(level='Filename',axis=1).agg('mean')

    # df_merged.to_csv(path_result + 'merge_corrected.csv')
    
    # # Validity test
    # # 变量相关系数矩阵检验
    # from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity   # Bartlett's Test of Sphericity, p<0.05
    # chi_square_value,p_value=calculate_bartlett_sphericity(df_merged)
    # B_test = {'X2' : chi_square_value, 'P-value' : p_value }
    # print(B_test)
    # # 变量相关性检验
    # from factor_analyzer.factor_analyzer import calculate_kmo                   # Kaiser-Meyer-Olkin, KMO>0.6
    # kmo_all,kmo_model=calculate_kmo(df_merged)
    # KMO = {'KMO test' : kmo_model }
    # print(KMO)
    # Files.saveData2Json({**B_test, **KMO}, path=path_result+'test.json')
    
    
# =============================================================================
#                               Stepwise Regression
# 1. 逐步回归分析进行指标筛选，可不做
# 2. 进行前，将excel中的排名结果与样本指标数据合并，并将排名得分列名置为'y',保存为
# 'stepwise_regression.csv'并置于'path_result'目录下
# =============================================================================
    
    # def forward_selected(data, response):
    #     """
        
    #     使用Adjusted R-squared来评判新加的参数是否提高回归中的统计显著性
    #     https://planspace.org/20150423-forward_selection_with_statsmodels/
        
    #     Linear model designed by forward selection.
    #     Parameters:
    #     -----------
    #     data : pandas DataFrame with all possible predictors and response
    #     response: string, name of response column in data
    #     Returns:
    #     --------
    #     model: an "optimal" fitted statsmodels linear model
    #             with an intercept
    #             selected by forward selection
    #             evaluated by adjusted R-squared
    #     """
    #     import statsmodels.formula.api as smf
    #     remaining = set(data.columns)
    #     remaining.remove(response)
    #     selected = []
    #     current_score, best_new_score = 0.0, 0.0
    #     while remaining and current_score == best_new_score:
    #         scores_with_candidates = []
    #         for candidate in remaining:
    #             formula = "{} ~ {} + 1".format(response,
    #                                             ' + '.join(selected + [candidate]))
    #             score = smf.ols(formula, data).fit().rsquared_adj
    #             scores_with_candidates.append((score, candidate))
    #         scores_with_candidates.sort()
    #         best_new_score, best_candidate = scores_with_candidates.pop()
    #         if current_score < best_new_score:
    #             remaining.remove(best_candidate)
    #             selected.append(best_candidate)
    #             current_score = best_new_score
    #     formula = "{} ~ {} + 1".format(response,
    #                                     ' + '.join(selected))
    #     model = smf.ols(formula, data).fit()
     
    #     return model


    # data = pd.read_csv(path_result + 'stepwise_regression.csv', sep=',')
    # model = forward_selected(data, 'y')
 
    # # 回归模型
    # print(model.model.formula)
    # # 系数
    # print(model.params)
    # # Adjusted R-squared
    # print(model.rsquared_adj)


# =============================================================================
#                               1d-k-means
# 1. 一维得分数据的k均值聚类
# 2. 进行前，将excel中的排名结果保存为'source.txt'并置于'path_result'目录下
# =============================================================================
    
    df = pd.read_table(path_result + 'source.txt', header=[0], index_col=[0])
    X = np.array(df).reshape(-1,1)
    kmeans = KMeans(n_clusters=3).fit(X)
    pred = kmeans.predict(X)+1
    result = {'class': list(pred), 
              'number': list(df.index), 
              'source': X.reshape(1,-1)[0] }
    result = pd.DataFrame(result)
    result = result.set_index('class')
    result.to_csv(path_result + 'k-means.csv')
    
    
    
    
    
    
    
    
    