# -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 11:21:20 2022

@author: xtp

"""

import glob
import os
import re
import numpy as np
from fnmatch import fnmatch

class Files():
    '''
    文件筛选、路径及文件名提取
    
    '''
    def __init__(self, folder, ext, flag ='this'):
        '''
        Arguments:
            self.path: 文件夹全路径，str
            self.ext: 筛选文件或文件类型，list[str]
            self.count: 筛选文件的个数，int
            self.filesNoExt: 需要筛选的文件名称，list
            self.files: 需要筛选的文件名称带后缀，list
            self.filesWithPath: 需要筛选的文件全路径，list
            self.filesWithPathNoExt: 需要筛选的文件全路径不带后缀，list
            self.filesPath: 文件夹全路径列表，list 
            self.folderName0, self.folderName1 文件前一级/二级文件夹名称

        '''
        folder = os.path.normpath(folder)
        self.path = folder
        self.ext = ext
        self.filesNoExt, self.files, self.filesWithPath, self.count = \
            [], [], [], 0

        if flag == 'this':
            [self.filesNoExt, self.files, self.filesWithPath,
             self.count, self.ext] = Files.__getfiles(folder, ext)
                
        elif flag == 'all':
            [self.filesNoExt, self.files, self.filesWithPath, self.count, 
             self.ext, self.folderName0, self.folderName1
             ] = Files.__getfilesAll(folder, ext)

        self.filesPath = [folder] * self.count
        self.filesWithPathNoExt = [os.path.splitext(f)[0] 
                                   for f in self.filesWithPath]
        
    @staticmethod
    def __getfiles(dir0, filter0):
        '''
        按特定的筛选条件获取 dir0 目录下的文件

        Arguments:
            dir0: 文件夹全路径，str
            filter0: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        os.chdir(dir0)
        filenamesStr = []
        filenamesStr_ext = []
        fileswithpath = []
        ext = []
        for t in filter0:
            for file in glob.glob(t):
                [fileNoExt, ext2] = os.path.splitext(file)
                filenamesStr.append(fileNoExt)
                fileswithpath.append(dir0+'/'+file)
                filenamesStr_ext.append(file)
            e = len(glob.glob(t))
            for i in t.split('o_0')*e:
                ext.append(i)
        count = len(filenamesStr)
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext

    @staticmethod
    def __getfilesAll(path, filter_):
        '''
        按特定的筛选条件获取 path 目录及其子目录下的文件

        Arguments:
            path: 文件夹全路径，str
            filter_: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        filenamesStr = []
        fileswithpath = []
        filenamesStr_ext = []
        ext = []
        foldername0 = []
        foldername1 = []
        for path, subdirs, f in os.walk(path):
            # path: 当前正在遍历的这个文件夹的本身的地址，str
            # subdirs: 该文件夹中所有的目录的名字(不包括子目录)，list
            # files: 该文件夹中所有的文件(不包括子目录)，list
            for name in f:
                for t in filter_:
                    if fnmatch(name, t):
                        [fileNoExt, ext2] = os.path.splitext(name)
                        filenamesStr.append(fileNoExt)
                        fileswithpath.append(path+'/'+ name)
                        filenamesStr_ext.append(name)
                    e = len(glob.glob(t))
                    for i in t.split('o_0')*e:
                        ext.append(i)
        
        for path_lists in fileswithpath:
            path_list = re.split(r'\\|\/', path_lists)
            folder0 = path_list[-2]
            folder1 = path_list[-3]
            foldername0.append(folder0)
            foldername1.append(folder1)
        count = len(filenamesStr)
        
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext, foldername0, foldername1

    def sortFilesName(self, reg=(0,0)):
        self.filesNoExt, self.filesSortedIndex = Files.sortListStringHuman(
                self.filesNoExt, reg=reg)
        self.files = [self.files[i] for i in  self.filesSortedIndex]
        self.filesWithPath = [self.filesWithPath[i] for i in  self.filesSortedIndex]
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
        
    def splitFilesName(self, sp='__'):
        b = self.filesNoExt[0].split(sp)
        for file in self.filesNoExt[1:]:
            name = file.split(sp)
            b = np.column_stack((b, name))
        self.namesElements = b
        return b
   
    @staticmethod
    def sortListStringHuman(text, reg=(0,0)):
        """Sort the string list with human order

        Args:
            text: The input string list

        Returns:
            data: sorted string list

        """
        import re

        def natural_keys(text):
            '''
            alist.sort(key=natural_keys) sorts in human order
            http://nedbatchelder.com/blog/200712/human_sorting.html
            (See Toothy's implementation in the comments)
            float regex comes from https://stackoverflow.com/a/12643073/190597
            '''
            def atof(text):
                try:
                    retval = float(text)
                    # print(retval)
                except ValueError:
                    retval = text
                return retval
            
            return [atof(c) for c in re.split
                    (r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)]
        if reg == (0,0):
            textReg = text
        elif reg[1] == 0:
            textReg = [t[reg[0]:] for t in text]
        else: 
            textReg = [t[reg[0]:reg[1]] for t in text]

        retIndex = sorted(range(len(textReg)), key=lambda k:natural_keys(textReg[k]))
        retV = [text[i] for i in retIndex]
        return retV, retIndex 
   
    @staticmethod
    def mkdirPath(pathstr):
        import os
        path = os.path.normpath(pathstr)
        pathparts = path.split(os.sep)
        # print(pathparts)
        for i in range(len(pathparts)):
            crtpath = '/'.join(pathparts[0:i+1])
            # print(crtpath, end='    ')
            if os.path.isdir(crtpath):
                # print('this is a folder')
                pass
            else:
                try:
                    # print('this is not a folder: ', end=' ')
                    os.mkdir(crtpath)
                    # print('created')
                except OSError:
                    pass
                
    
    @staticmethod
    def getfilePath():
        '''
        获取该模块的地址
        https://blog.csdn.net/NeverLate_gogogo/article/details/111867089

        Returns
        -------
        path_real : TYPE
            DESCRIPTION.
        dir_real : TYPE
            DESCRIPTION.
        path_abs : TYPE
            DESCRIPTION.
        dir_abs : TYPE
            DESCRIPTION.

        '''
        path_real = os.path.realpath(__file__)
        dir_real = os.path.dirname(path_real)
        path_abs = os.path.abspath(__file__)
        dir_abs = os.path.dirname(__file__)
        
        return path_real, dir_real, path_abs, dir_abs
    
    
import pandas as pd
import re
import scipy.io as scio

class Table(): 
    '''
    批量读取表格，数据筛选
    
    '''
    def __init__(self, dataframe):
        self.DataFrame = dataframe
        self.Factors = list(dataframe.index.names)
        self.Level = list(dataframe.index)
        self.Features = list(dataframe.columns)
    
    def print_object(self):
        print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
        
    @classmethod
    def read(cls, path='', header=None, index_col=None, sep=',', skiprows=[]):
        df = pd.read_table(path, header=header, index_col=index_col,
                           sep=sep, skiprows=skiprows)
        return cls(df)
    
    @staticmethod
    def dataframe(X, index):
        df = pd.DataFrame(X, index=index)
        return df
    
    @classmethod
    def addname(cls, df, index_name_list, column_name_list):
        df.index.names = index_name_list
        df.columns.names = column_name_list
        return cls(df)
    
    @staticmethod
    def write(df, path=''):
        df.to_csv(path) 
        return df
    
    @classmethod
    def concat(cls, df_frame, axis=1, reset_index=False, path='', fitWidth=True):
        from styleframe import StyleFrame
        df = pd.concat(df_frame, axis=axis)
        if reset_index == True:
            df = df.reset_index()
        if path != '':
            if fitWidth == True:
                excel_writer = StyleFrame.ExcelWriter(path)
                sf = StyleFrame(df)
                sf.to_excel(excel_writer=excel_writer, best_fit=list(df.columns),
                            row_to_add_filters=0)
                excel_writer.save()
            else:
                df.to_excel(path, index=False)
        return cls(df)
    
    @classmethod
    def resetIndex(cls, df, level=0, drop=True, col_level=0, col_fill=''):
        '''
        表格索引重置

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        level : str, list[str]
            设置需要重置的索引.
        drop : TYPE, optional
            是否删除需要重置的索引. The default is True.
        col_level : TYPE, optional
            如果列具有多个级别，则确定将标签插入到哪个级别. The default is 0.
        col_fill : TYPE, optional
            如果列具有多个级别，则确定其他级别的命名方式. The default is ''.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df.reset_index(level=level, drop=drop, inplace=True, 
                            col_level=col_level, col_fill=col_fill)
        return cls(df)
        
    @staticmethod
    def multiIndexMerge(df, merge=[], reserve=[]):
        '''
        多级索引合并

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        merge : list[int]
            需合并的索引位置
        reserve : list[int]
            保留的索引位置

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        merge_list =[]
        reserve_list =[]
        for i, m in enumerate(merge):
            if i == 0:
                merge_str = '{0[' + str(m) + ']}'
            else:
                merge_str = '-{0[' + str(m) + ']}'
            merge_list.append(merge_str) 
        index_merged = ''.join(merge_list)
        for r in reserve:
            reserved = df.index.get_level_values(r)
            reserve_list.append(reserved)
        df.index = reserve_list + [df.index.map(index_merged.format)]
    
        return df
    
    @staticmethod
    def multiIndexExtract(df, slices = [], names = []):
        '''
        表格索引拆分变多级索引

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        names : TYPE, optional
            DESCRIPTION. The default is [].

        Returns
        -------
        newindex : TYPE
            DESCRIPTION.

        '''
        multindex_list = []
        index_frame = []
        for i in range(len(list(df.index))):
            for j in slices:
                a=list(df.index)[i]
                try:
                    index = list(df.index)[i][j[0]:j[1]]
                except IndexError:
                    index = list(df.index)[i][j[0]]
                multindex_list.append(index)
            multindex = tuple(multindex_list)
            multindex_list = []
            index_frame.append(multindex)
        newindex = pd.MultiIndex.from_tuples(index_frame, names = names)
        df = df.set_index(newindex)
        return df
    
    @classmethod
    def readFile2Table(cls, path, ext, flag ='all', addfilename=True,
                       header=None, index_col=None, sep='', skiprows=[], 
                       feature_name='', dropindex=False):
        process = Files(path, ext, flag = flag)
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt): 
            df = pd.read_table(path, header=header, index_col=index_col,
                               sep=sep, skiprows=skiprows)
            df.index.name = feature_name
            df = df.astype(float).T
            if addfilename == True:
                if flag == 'all':
                    path_list = re.split(r'\\|\/', path)
                    foldername = path_list[-2]
                    df = Table.__addIndex(df, index=foldername+name, 
                                    class_name='Filename', drop=dropindex)
                else:
                    df = Table.__addIndex(df, index=name, 
                                    class_name='Filename', drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        return cls(df_table)
    
    @classmethod
    def readFile2Table2(cls, path, ext, flag ='this', addfilename=False,
                       header=None, index_col=None, sep='', skiprows=[], 
                       class_name='', resetindex=False, dropindex=False):
        process = Files(path, ext, flag = flag)
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt): 
            df = pd.read_table(path, header=header, index_col=index_col,
                               sep=sep, skiprows=skiprows)
            origin_index = list(df.index.names)
            if addfilename == True:
                df = Table.__addIndex(df, index=name, 
                                    class_name=class_name, drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        if resetindex == False:
            try:
                df_table = df_table.set_index(origin_index, append=True, drop=True)
            except KeyError:
                pass
        return cls(df_table)
    
    @classmethod
    def readFile2Table3(cls, path, ext, flag ='all', addfilename=True,
                       header=None, index_col=None, sep='', skiprows=[], 
                       feature_name='', dropindex=False):
        process = Files(path, ext, flag = flag)
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt): 
            df = pd.read_table(path, header=header, index_col=index_col,
                               sep=sep, skiprows=skiprows)
            df.index.name = feature_name
            df = df.astype(float)
            if addfilename == True:
                if flag == 'all':
                    path_list = re.split(r'\\|\/', path)
                    foldername = path_list[-2]
                    df = Table.__addIndex(df, index=foldername, 
                                    class_name='Concentration', drop=dropindex)
                else:
                    df = Table.__addIndex(df, index=name, 
                                    class_name='Filename', drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        return cls(df_table)
    
    
    @classmethod
    def resetTableIndex(cls, df, flags_list=[], filter_list=[], 
                        new_index_list=[], class_name_list=[], 
                        dropindex=False):
        '''
        按文件名重置表格索引

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        df : DataFrame
            以文件名为索引的数据表格
        flags_list : TYPE, optional
            分割区间    flags: list[int,int]
        filter_list : TYPE, optional
            筛选列表    filters: list[str]
        new_index_list : TYPE, optional
            替换列表    new_index: list[str]
        class_name_list : TYPE, optional
            类名      class_name: str
            
        "    Example:
                    flags_list = [[0,3],[3,5],[5,7]]
                    filter_list = [['20','335','958'],
                                   ['1','2','3'],
                                   ['1','2','3','4','5','6']]
                    new_index_list = [['JD20','XY335','ZD958'],
                                      ['N0','N1','N2'],
                                      ['W','W','W','D','D','D']]
                    class_name_list = ['Varieties','Nitrogen','Water']    "
                    
        dropindex : bool
            是否保留文件名索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        origindex_name_copy = list(df.index.names)
        try:
            df = df.reset_index(level=list(range(1, len(df.index[0]))),
                                drop=False)
        except IndexError:
            pass
        for flags, filters, new_index, class_name in zip(
                flags_list, filter_list, new_index_list, class_name_list):
            df = Table.__convertIndex(df, flags=flags, filters=filters,    
                                   new_index=new_index, class_name=class_name)
        if dropindex == False:
            df = df.reset_index()
            df_multindex = df.set_index(origindex_name_copy+class_name_list)
        else:
            df_multindex = df.set_index(class_name_list)
        return cls(df_multindex)
    
    @classmethod
    def resetTableColumns(cls, df, new_columns=[]):
        
        df.columns = new_columns
        
        return cls(df)
    
    @staticmethod
    def __addROIndex(df, default=[], exception=[]):
        '''
        手动添加列索引（ROI）

        Parameters
        ----------
        df : DataFrame
            数据表格
        default : list[str]
            默认列索引
        exception : list[str]
            异常列索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df_arr = np.array(df)
        try:
            df = pd.DataFrame(df_arr, index=df.index, columns=default)
        except ValueError:
            df = pd.DataFrame(df_arr, index=df.index, columns=exception)
        return df
    
    @staticmethod
    def __addNaNcolumn(df, default_number, loc, columns_name=''):
        '''
        异常位置添加空值

        Parameters
        ----------
        df : DataFrame
            数据表格
        default_number : int
            ROI 默认列数
        loc : int
            空值列添加位置
        columns_name : str
            添加列的名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        if len(df.columns) == default_number:
            return df
        else:
            df.insert(loc, columns_name, np.nan)
            return df
        
    @staticmethod
    def __addIndex(df, index='', class_name='', drop=False):
        '''
        给一个数据表格添加相同的索引（Filename）

        Parameters
        ----------
        df : DataFrame
            数据表格
        index : str
            添加的索引
        class_name : str
            添加索引类别名称
        drop : bool
            是否保留原索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index_list = [index]*(df.shape[0])
        df = df.reset_index(drop=drop)
        df[class_name] = index_list
        df = df.set_index(class_name, drop=True)
        return df
        
    @staticmethod
    def addIndex(df, index='', class_name='', drop=False):
        '''
        给一个数据表格添加相同的索引（Filename）

        Parameters
        ----------
        df : DataFrame
            数据表格
        index : str
            添加的索引
        class_name : str
            添加索引类别名称
        drop : bool
            是否保留原索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index_list = [index]*(df.shape[0])
        df[class_name] = index_list
        if drop == True:
            df = df.reset_index(drop=True)
            df = df.set_index(class_name, drop=True)
        else: 
            df = df.set_index(class_name, drop=True, append=True)
        return df
    
    
    @staticmethod
    def __convertIndex(df, flags=[], filters=[], new_index=[], class_name=''):
        '''
        按已有索引分类，并转化为多级索引（Filename → Factors）

        Parameters
        ----------
        df : DataFrame
            带索引的数据表格
        flags : list[int,int]
            已有索引的筛选区间
        filters : list[str,...,str]
            已有索引的筛选名称
        new_index : list[str,...,str]
            新索引的名称
        class_name : str
            新索引类别名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index = list(df.index)
        for f, filterr in enumerate(filters):
            for i, index_str in enumerate(index):
                if filterr in index_str[flags[0]:flags[1]]:
                    index[i] = new_index[f]
        df[class_name] = index
        return df
    
    def featureIterate(self, factor='', features=[], path=''):
        '''
        分析指定因素的指定特征，多个特征按组迭代，分别生成表格并按特征名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        features : list[str]
            指定特征的名称
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        df = self.DataFrame.loc[:, features].stack('Features').unstack(factor)
        if path != '':
            for name, group in df.groupby('Features'):
                group.to_csv(path + name + '.csv')
    
    def factorIterate(self, factor='', features=[], path=''):
        '''
        分析指定因素的指定特征，因素按组迭代，分别生成表格并按因素名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        features : list[str]
            指定特征的名称
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        name_list = []
        iterate_list = []
        df = self.DataFrame
        if features != []:
            df = self.DataFrame.loc[:, features]
        for name, group in df.groupby(factor):
            name_list.append(name)
            iterate_list.append(group)
            if path != '':
                group.to_csv(path + str(name) + '.csv')
        return name_list, iterate_list
    
    
    # @staticmethod
    # def featureIterate(df, factor='', feature='', features=[], path=''):
    #     '''
    #     分析指定因素的指定特征，多个特征按组迭代，分别生成表格并按特征名保存

    #     Parameters
    #     ----------
    #     factor : str
    #         指定因素的名称
    #     feature : str
    #         指定因素的名称
    #     features : list[str]
    #         选择的特征
    #     path : str
    #         默认保存路径

    #     Returns
    #     -------
    #     None.

    #     '''
    #     name_list = []
    #     iterate_list = []
    #     if features != []:
    #         df = df.loc[:, features]
    #     df = df.stack(feature).unstack(factor)
    #     for name, group in df.groupby(factor):
    #         name_list.append(name)
    #         iterate_list.append(group)
    #         if path != '':
    #             group.to_csv(path + str(name) + '.csv')
    #     return name_list, iterate_list
    
    # @staticmethod
    # def factorIterate(df, factor='', features=[], path=''):
    #     '''
    #     分析指定因素的指定特征，因素按组迭代，分别生成表格并按因素名保存

    #     Parameters
    #     ----------
    #     factor : str
    #         指定因素的名称
    #     features : list[str]
    #         指定特征的名称
    #     path : str
    #         默认保存路径

    #     Returns
    #     -------
    #     None.

    #     '''
    #     name_list = []
    #     iterate_list = []
    #     if features != []:
    #         df = df.loc[:, features]
    #     for name, group in df.groupby(factor):
    #         name_list.append(name)
    #         iterate_list.append(group)
    #         if path != '':
    #             group.to_csv(path + str(name) + '.csv')
    #     return name_list, iterate_list
    
    @staticmethod
    def columnIterate(df, columns_label, path=''):
        '''
        表格各列按组迭代

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        feature : TYPE
            DESCRIPTION.
        path : TYPE, optional
            DESCRIPTION. The default is ''.

        Returns
        -------
        name_list : TYPE
            DESCRIPTION.
        iterate_list : TYPE
            DESCRIPTION.

        '''
        name_list = []
        iterate_list = []
        df = df.stack()
        for name, group in df.groupby(columns_label):
            group.name = name
            group = group.reset_index(columns_label, drop=True)
            group = group.to_frame()
            name_list.append(name)
            iterate_list.append(group)
            if path != '':
                group.to_csv(path + str(name) + '.csv')
        return name_list, iterate_list

    def dataFilter(self, factors=[], levels=[], 
                   features=[], feature_mode=0, levels_mode=0, path=''):
        '''
        按因素水平和特征进行数据筛选

        Parameters
        ----------
        levels : list[str]
            因素的水平名称
        features : list[str]
            特征名称
        feature_mod : 0,1,2
            0: 单一特征字符串； 1: 多特征列表； 2: 多特征范围
        path : str
            文件路径
        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''
        df = self.DataFrame.sort_index()
        if factors != []:
            df = df.reset_index(level=factors)
            df = df.set_index(factors, drop=True)
        if levels != []:
            if levels_mode == 0:
                df = df.loc[tuple(levels), :]
            if levels_mode == 1:
                df = df.loc[levels, :]
            elif levels_mode == 2:
                df = df.loc[tuple(levels), :]
        if features != []:
            if feature_mode == 0:
                df = df.loc[:, features]
            elif feature_mode == 1:
                df = df.loc[:, features]
            elif feature_mode == 2:
                df = df.loc[:, features[0]:features[1]]
        if path !=  '':
            df.to_csv(path)
        return df
        
    @staticmethod
    def ColDatafilter(self, filterfunc, filterkey=[]):
        '''
        使用函数，筛选每列中的数据

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        filterfunc : TYPE
            自定义函数.
        filterkey : list, optional
            列名列表. The default is [].

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df = self.DataFrame
        if filterkey != []:
            for k in filterkey:
                df[k] = df[k].apply(filterfunc)
        else:
            for i in list(df.columns):
                df[i] = df[i].apply(filterfunc)
        return df
    
    @staticmethod
    def groupBy(df, mode='', path=''):
        '''
        分组计算

        Parameters
        ----------
        df : DataFrame
            筛选后的数据表格
        mode : str
            计算方法
        path : str
            文件夹路径

        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''
        group = df.groupby(df.index.names)
        if mode == 'mean':
            df = group.mean()
        if mode == 'std':
            df = group.std()
        if mode == 'describe':
            df = group.describe().stack()
        if path != '':
            df.to_csv(path + mode + '.csv')
        else:
            return df
    
    @staticmethod
    def strList2numberList(str_list):
        '''
        列表中相同的字符串转为相同的数字，数字按原列表顺序从1开始
        
        Arguments:
            str_list: list[str,...,str]
        Returns:
            number_list: list[int,...,int]
        
        '''
        number_list = str_list.copy()
        filter_list = list(set(str_list))
        filter_list.sort(key=str_list.index)
        for i, index1 in enumerate(filter_list):
            for j, index2 in enumerate(number_list):
                if index1 == index2:
                    number_list[j] = float(i+1)
        return number_list
    
    @staticmethod
    def saveTable2mat(path, df):
        '''
        提取 table 行列索引、数据并保存为 mat 格式
        
        Arguments: 
            path: 保存文件全路径，str
            df: 数据表格，DataFrame

        '''
        
        index = list(df.index)
        columns = list(df.columns)
        Index = Table.strList2numberList(index)
        Index = np.array(Index, dtype=float).reshape(-1,1)
        scio.savemat(path,{'data':df.values,'class':Index,
                           'class_str':index,'variables_str':columns})
        return df.values, Index, index, columns
    
    @staticmethod
    def readcsv2dict(path):
        '''
        csv表格转字典（每列第一个元素为 key，其余为 value）

        Parameters
        ----------
        path : TYPE
            DESCRIPTION.

        Returns
        -------
        result : TYPE
            DESCRIPTION.

        '''
        import csv
        reader = csv.reader(open(path, 'r'))
        result = {}
        keys = []
        for i, row in enumerate(reader):
            if i > 0:
                for j, v in enumerate(row):
                    result[keys[j]] += [v]
            else:
                for v in row:
                    result[v] = []
                keys = row
        return result

    @staticmethod
    def data2dataframe(data, index, columns):
        '''
        数据构建表格

        Parameters
        ----------
        data : TYPE
            DESCRIPTION.
        index : TYPE
            DESCRIPTION.
        columns : TYPE
            DESCRIPTION.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df = pd.DataFrame(data, columns = columns)
        df.index = index
        return df
        
    @staticmethod
    def data2excel(data, columns=[], path='',
                       Index=False, Header=False):
        '''
        数据构建表格

        Parameters
        ----------
        data : TYPE
            DESCRIPTION.
        index : TYPE
            DESCRIPTION.
        columns : TYPE
            DESCRIPTION.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        
        df = pd.DataFrame(data)
        if path != '':
            df.to_excel(path, index=Index, header=Header)
        return df
    
    @staticmethod
    def plotCurves(df, figsize=(5,5), xticks=None, yticks=None, 
                   xlim=(0,128), ylim=(0,1), fontsize=10, path=''):
        '''
        表格曲线绘制

        '''
        import matplotlib.pyplot as plt
        
        df.T.plot(x=None, y=None, use_index=True, kind='line', 
                  figsize=(5,5), title=None, grid=None, legend=True,
                  xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
                  fontsize=fontsize, colormap=None)
        if path == '':
            plt.show()
        else:
            plt.savefig(path, dpi=600)
            plt.clf()
    
class List:
    def __init__(self):
        pass
    @staticmethod
    def count(lst, v=[]):
        if v == []:
            dct = {n: lst.count(n) for n in lst}
        else:
            dct = {n: lst.count(n) for n in v}
        return dct
    
    @staticmethod
    def findList(lst, v):
        return list(filter(lambda x: lst[x] == v, range(len(lst))))
        
    # get unique element list of a list
    @staticmethod
    def rmDuplicates(lst):
        """get unique element list of a list
        
        Arguments:
            lst {[list]} -- [description]
        
        Returns:
            [list] -- [description]
        """
        rtlist = list(dict.fromkeys(lst))
        return rtlist
    
    # replace the list element, which is same with one of the key of the dict,
    # to the dict[key]
    @staticmethod
    def replace(lst, dct):
        """replace the list element, which is same with one of the key of the dict,
            to the dict[key]
        
        Arguments:
            lst {[list]} -- [description]
            dct {[dict]} -- [description]
        
        Returns:
            [list] -- [description]
        """     
        for i, l in enumerate(lst):
            for k, v in dct.items():
                if l == k:
                    lst[i] = v
        return lst
    @staticmethod
    def unique(list1):       
        # intilize a null list 
        unique_list = []           
        # traverse for all elements 
        for x in list1: 
            # check if exists in unique_list or not 
            if x not in unique_list: 
                unique_list.append(x)
        return unique_list
    
    
class Dict:
    @staticmethod
    def changeKeyName(dct, newKey, oldKey=None):
        if oldKey == None:
            dctBack = dct.copy()
            for k in dctBack.keys():
                newK = k + newKey
                dct[newK] = dct.pop(k)
        else:
            dct[newKey] = dct.pop(oldKey)
        return dct

    @staticmethod
    def dictFilterOut(dct, keyfilter, vfilter, flag='all'):
        d = dct.copy()
        value = d[keyfilter]
        ind = List.findList(value, vfilter)
        if flag == 'all':
            d = Dict.dictpop(d, ind, keys='all')
        else:
            d = Dict.dictpop(d, ind, keys=[keyfilter])
        return d, ind

    @staticmethod
    def dictFilterIn(dct, keyfilter, vfilter, flag='all'):
        d = dct.copy()
        value = d[keyfilter]
        ind = List.findList(value, vfilter)
        if flag == 'all':
            d = Dict.dictselect(d, ind, keys='all')
        else:
            d = Dict.dictselect(d, ind, keys=[keyfilter])
        return d, ind

    @staticmethod
    def dictpop(dct, ind, keys='all'):
        d = dct.copy()
        if keys == 'all':
            keys = d.keys()
        for k in keys:
            d[k] = [v for i, v in enumerate(d[k]) if i not in ind]
        return d
   
    @staticmethod    
    def dictselect(dct, ind, keys='all'):
        d = dct.copy()
        if keys == 'all':
            keys = d.keys()
        for k in keys:
            d[k] = [v for i, v in enumerate(d[k]) if i in ind]
        return d  

    @staticmethod
    def merge(dict1, dict2):
        ''' Merge dictionaries and keep values of common keys in list'''
        dict3 = {**dict1, **dict2}
        for key, value in dict3.items():
            if key in dict1 and key in dict2:
                if type(value) is not list and type(dict1[key]) is list:
                    dict3[key] = dict1[key] + [value]
                else:
                    dict3[key] = [dict1[key], value]
        return dict3

    @staticmethod
    def toNpArray(dct):
        """get numpy array and keys list
        
        Arguments:
            dct {[dict]} -- [description]
        
        Returns:
            [list, numpy.array] -- [keys, array data]
        """
        keys = dct.keys()
        values = np.asarray(list(dct.values())).T  
        return keys, values

    @staticmethod
    def reset(dct, v):
        return {k:v for k,vv in dct.items()}


class Read:
    '''
    read json files in a folder.
    input:
        path
        labeltype='points'
    return
        allLabelDict = {'filename':label={'labelname0':label0, 'labelname1':label1, ...},
                        ...}
        fileNameOnlyList = [filename0, filename1, ...], which is sorted
    '''
    def __init__(self):
        pass
    @staticmethod
    def readJsonFiles(path, labeltype='points'):
        import json
        jsonFiles = Files(path, ['*.json'])
        jsonFiles.sortFilesName()
        fileNameList = jsonFiles.filesWithPath
        fileNameOnlyList = jsonFiles.filesNoExt
        allLabelDict = {}
        for i, f in enumerate(fileNameList):    
            filename = fileNameOnlyList[i]
            jsonStr = open(f).read()
            data = json.loads(jsonStr)
            labeldictlist = data['shapes']
            newLabel = {}
            for label in labeldictlist:
               # print(label['label'], label['points'])
               points = label[labeltype]
               newLabel[label['label']]= label[labeltype]
            allLabelDict[filename] = newLabel
        return allLabelDict, fileNameOnlyList

    @staticmethod
    def readJson2Dict(path, labeltype='points'):
        """read json files to a dict
        
        Arguments:
            path {[str]} -- [file's path]
        
        Keyword Arguments:
            labeltype {str} -- [description] (default: {'points'})
        
        Returns:
            [dict] -- [data dict]
        """
        import json
        jsonStr = open(path).read()
        data = json.loads(jsonStr)
        return data
    
    @staticmethod
    def readJsonLabel2Dict(path, labeltype='points'):
        """read json files to a dict
        
        Arguments:
            path {[str]} -- [file's path]
        
        Keyword Arguments:
            labeltype {str} -- [description] (default: {'points'})
        
        Returns:
            [dict] -- [data dict]
        """
        import json
        jsonStr = open(path).read()
        data = json.loads(jsonStr)
        labeldictlist = data['shapes']
        newLabel = {}
        for label in labeldictlist:
           # print(label['label'], label['points'])
           points = label[labeltype]
           newLabel[label['label']]= label[labeltype]
        return newLabel
    @staticmethod
    def readMat2Dict(path):
        import scipy.io as sio
        f = sio.loadmat(path)
        return f

    @staticmethod
    def readNpNdArrfromFile(filename):
        import bloscpack as bp
        arr = bp.unpack_ndarray_from_file(filename)
        return arr

    @staticmethod
    def readPickle(name):
        import pickle
        with open(name, 'rb') as f:
            return pickle.load(f)
    @staticmethod
    def readcsv2array(path):
        import pandas as pd
        df = pd.read_csv(path, sep=',', header=None)
        rtData = df.values
        return rtData
    @staticmethod
    def readcsv2dict(path):
        import csv
        reader = csv.reader(open(path, 'r'))
        result = {}
        keys = []
        for i, row in enumerate(reader):
            if i > 0:
                for j, v in enumerate(row):
                    result[keys[j]] += [v]
            else:
                for v in row:
                    result[v] = []
                keys = row
        return result
    @staticmethod
    def readRaw(path):
        a = open(path, 'rb').read()
        return a

    @staticmethod
    def readRawPt(path, height=608, width=808):
        a = open(path, 'rb').read()

        dt = np.dtype(np.uint16)
        dt = dt.newbyteorder('>')
        cc = np.frombuffer(a, dtype=dt)
        img = np.reshape(cc, (608, 808))
        h = np.left_shift(img, 4)
        l = np.right_shift(img, 12)
        img16bit = (h+l)*16
        return img16bit

    @staticmethod
    def tiff2np(path, swapaxis=True):
        from tifffile import imread
        nparray = imread(path)
        if swapaxis:
            nparray = np.rollaxis(nparray, 0, 3)
        return nparray


class Write:
    def __init__(self):
        pass
    @staticmethod
    def save2DArray2csv(path, arr):
        import pandas as pd
        df = pd.DataFrame(arr)
        df.to_csv(path, index=False, header=False)
    @staticmethod
    def saveDict2csv(path, dsrc):
        import pandas as pd
        df = pd.DataFrame.from_dict(dsrc)
        df.to_csv(path, index=False)

    @staticmethod
    def saveDict2mat(path, d):
        import scipy.io as sio
        sio.savemat(path, d)
        
    @staticmethod
    def saveDict2json(path, d):
        import json
        with open(path, 'w') as fp:
            json.dump(d, fp, sort_keys=True, indent=4)

    @staticmethod
    def saveHeatmap(filename, data, rangee=[0, 1], cmap='RdYlGn', 
                    bar=True, orient='horizontal', dpi=800):
        import matplotlib.pyplot as plt
        
        if bar == True:
            fig, ax = plt.subplots(figsize=(16, 9))
            im = ax.imshow(data, cmap=cmap)
            im.set_clim(rangee[0], rangee[1])
            fig.colorbar(im, orientation=orient, pad=0.2)
            plt.savefig(filename, dpi=dpi)
            plt.clf()
        else:
            plt.imsave(filename, data, format="png", cmap = cmap,
                   vmin=rangee[0], vmax=rangee[1])
            plt.clf()

    @staticmethod
    def saveNpNdArr2blp(filename, arr):
        import bloscpack as bp
        bp.pack_ndarray_to_file(arr, filename)

    @staticmethod
    def savePickle(name, obj):
        import pickle
        if name[-4:] != '.pkl':
            name = name + '.pkl'
        with open(name, 'wb') as f:
            pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
    @staticmethod        
    def csvWriteHeader(name, content):
        import csv
        from collections import OrderedDict
        ordered_fieldnames = OrderedDict([('field1', None), ('field2', None)])
        with open(name, 'w') as fou:
            dw = csv.DictWriter(fou, fieldnames=content, dialect='excel')
            dw.writeheader()

    @staticmethod
    def csvWriteRow(name, content):
        import csv
        with open(name, 'a', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(content)
            
    @staticmethod
    def tiff(name, nparray, swapaxis = True):
        from tifffile import imsave
        if swapaxis:
            nparray = np.rollaxis(nparray, 2, 0)
        imsave(name, nparray)  
        
       
import os, sys
class Tools():
    '''
    小工具
    
    '''
    @staticmethod
    def div(a, b):
        """Divid, if the result is infinite, set it with zero
    
        Args:
            a: The dividend
            b: The divisor
    
        Return:
            c: The quotient
    
        """
    
        with np.errstate(divide='ignore', invalid='ignore'):
            c = np.true_divide(a, b)
            c[~np.isfinite(c)] = 0  # -inf inf NaN
        return c
        
    @staticmethod
    def fig2data(fig):
        """Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it

        Args
            fig: a matplotlib figure
        Return:
            buf: a numpy 3D array of RGBA values

        """
        import numpy as np
        # draw the renderer
        fig.canvas.draw()
        # Get the RGBA buffer from the figure
        # w, h = fig.canvas.get_width_height()
        buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
        buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3,))
        return buf
        
    @staticmethod
    def plotMatrix(file_path='', start='', end='', labels=[], matrix='',
                   show_percentage=True):
        '''
        混淆矩阵的生成

        Parameters
        ----------
        file_path : str
            矩阵文件路径
        start : str
            文件中矩阵开始行
        end : str
            文件中矩阵结束行
        labels : list[str]
            矩阵标签
        matrix : str, e.g.'0.41,0.18,0.41; 0.25,0.63,0.12; 0.18,0,0.82'
            手动输入矩阵
        show_percentage : bool
            百分比显示. The default is True.

        Returns
        -------
        None.

        '''
        import matplotlib.pyplot as plt 
        from sklearn.metrics import ConfusionMatrixDisplay
        
        if file_path != '':
            data_list = []
            percentage_list = []
            with open(file_path,'r') as f:
                for i, line in enumerate(f.readlines()):
                    m = re.findall('\s*'+ start, line)
                    n = re.findall('\s*' + end, line)
                    if m:
                        x = i
                    if n:
                        y = i
            with open(file_path,'r') as f:  
                for j, lines in enumerate(f.readlines()):
                    if j in range(x, y+1):
                        data = re.findall('(?<= )\d+\.?\d*', lines)
                        if data:
                            data_list.append(data)
                matrix = np.array(data_list).astype(np.float64).T
            if show_percentage == True:
                for i in range(matrix.shape[0]):
                    label_sum = sum(matrix[i,:])
                    percentage = np.divide(matrix[i], label_sum)
                    percentage_list.append(percentage)
                matrix = np.array(percentage_list)    
        else:
            matrix = np.matrix(matrix)
        disp = ConfusionMatrixDisplay(confusion_matrix=matrix, 
                                      display_labels=labels)
        disp.plot(cmap='Blues')
        plt.show()
        
    @staticmethod
    def max_diff(df, path='', scatternumber=10):
        '''
        按列求表格最大差值

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.

        Returns
        -------
        df_diff_sort : TYPE
            DESCRIPTION.

        '''
        df_np = np.sort(abs(np.array(df)), axis=0)
        np_min = df_np[0, :]
        np_max = df_np[-1, :]
        np_diff = np_max - np_min 
        df_diff = pd.DataFrame(np_diff, index=df.columns)
        df_diff_sort = df_diff.sort_values(0, ascending=False)
        
        import matplotlib.pyplot as plt
        import matplotlib.ticker as ticker
        df_mean = df.mean()
        sort = df_diff_sort.iloc[0:scatternumber,:]
        fig, ax = plt.subplots(figsize=(8,6))
        x1 = df_mean.index.astype(np.float64).astype(np.int32)
        y1 = np.array(df_mean)
        ax.plot(x1, y1)
        x2 = sort.index.astype(np.float64).astype(np.int32)
        y2 = np.array(df_mean.loc[sort.index])
        ax.scatter(x2, y2, c='r')
        for i, txt in enumerate(np.arange(scatternumber)):
            txt = ' NO.' + str(txt+1) + ' (' + str(x2[i]) + ')' 
            ax.annotate(txt, (x2[i],y2[i]))
        ax.xaxis.set_major_locator(ticker.MultipleLocator(40))
        if path != '':
            plt.savefig(path+'Max_diff.png', dpi=300)
        else:
            plt.show()
        plt.close()
        
        diff_sort = pd.DataFrame(df_diff_sort.index[0:scatternumber])
        diff_sort_list = diff_sort.values[:,0]
        
        return diff_sort_list

    @staticmethod
    def diff_list(lst_length, step=5):
        start = step
        num = int(lst_length/step)
        stop = num*step
        diff_list = np.linspace(start=start, stop=stop, num=num).astype(np.int64)
        return diff_list

    ## 屏蔽输出 ##
    def __enter__(self):
        self._original_stdout = sys.stdout
        sys.stdout = open(os.devnull, 'w')

    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout.close()
        sys.stdout = self._original_stdout

    @staticmethod
    def bands_cut(path0, ext):
        import numpy as np
        process = Files(path0, ext, flag='all')
        for path1, name in zip(process.filesWithPath, process.filesNoExt):
            if name == 'All':
                table_processing = Table.read(path1, header=0, index_col=[0, 1], sep=',')
                table_processing = Table.resetIndex(table_processing.DataFrame)
            else:
                table_processing = Table.read(path1, header=0, index_col=[0], sep=',')
            df = table_processing.DataFrame.sort_index()
            df.to_csv(path0 + name + '.csv')
            band1 = df.columns.astype(np.float64) < 1000
            df = df * band1
            df = df.loc[:, (df != 0).any(axis=0)]
            df.to_csv(path0 + name + '_band1.csv')
            band2 = df.columns.astype(np.float64) > 400
            df = df * band2
            df = df.loc[:, (df != 0).any(axis=0)]
            df.to_csv(path0 + name + '_band2.csv')
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        