# -*- coding: utf-8 -*-
"""
Created on Thu Nov 24 20:26:35 2022

@author: xtp

"""

import numpy as np
import os
import pandas as pd
import re
import glob
from fnmatch import fnmatch
from scipy import stats
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import seaborn
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
from statannotations.Annotator import Annotator
from sklearn.metrics import ConfusionMatrixDisplay


class Formatting():
    '''
    Fluorescence data reading, pre-processing, writing.
    
    '''
    
    def __init__(self, dataframe):
        self.DataFrame = dataframe
        self.Factors = list(dataframe.index.names)
        self.Level = list(dataframe.index)
        self.Features = list(dataframe.columns)
        self.Shape = dataframe.shape
    
    def print_object(self):
        print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))

    @classmethod
    def readFile2Table(cls, path, ext, flag ='all', subfolder=False, 
                       header=None, index_col=None, skiprows=[], 
                       feature_name='', addroindex=False, addNaNcolumn=False, 
                       dropindex=False):
        '''
        读取文件并以其文件名为索引转化为表格

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        path : str
            处理数据路径
        ext : list[str]
            需要筛选的文件/文件类型
        addroindex : bool
            是否添加感兴趣区域作为列索引
        addNaNcolumn : bool
            是否补全空值
        dropindex : bool
            是否舍弃原索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        process = Files(path, ext, flag = flag)
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt): 
            df = pd.read_excel(path, header=header, index_col=index_col,
                               skiprows=skiprows)
            df.index.name = feature_name
            if addroindex == True:
                df = Formatting.__addROIndex(df, 
                            default=['Leaf1','Leaf2','Leaf3','Leaf4','Leaf5'],  # Set default columns 
                            exception=['Leaf2','Leaf3','Leaf4','Leaf5'])        # and exception.
            if addNaNcolumn == True:
                df = Formatting.__addNaNcolumn(df, default_number=5, loc=0,     # Set ROI number, NaN position
                                                columns_name='Leaf1')           # and NaN columns name.
            df = df.astype(float).T
            if addroindex == True:
                df.index.name = 'ROI'                                           # Set ROI class name.
            if subfolder == True:
                path_list = re.split(r'\\|\/', path)
                foldername = path_list[-2]
                df = Formatting.addIndex(df, index=foldername+name, 
                                        class_name='Filename', drop=dropindex)
            else:
                df = Formatting.addIndex(df, index=name, 
                                        class_name='Filename', drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        return cls(df_table)
    
    @classmethod
    def resetTableIndex(cls, df, flags_list=[], filter_list=[], 
                        new_index_list=[], class_name_list=[], 
                        dropindex=False):
        '''
        按文件名重置表格索引

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        df : DataFrame
            以文件名为索引的数据表格
        flags_list : TYPE, optional
            分割区间    flags: list[int,int]
        filter_list : TYPE, optional
            筛选列表    filters: list[str]
        new_index_list : TYPE, optional
            替换列表    new_index: list[str]
        class_name_list : TYPE, optional
            类名      class_name: str
            
        "    Example:
                    flags_list = [[0,3],[3,5],[5,7]]
                    filter_list = [['20','335','958'],
                                   ['1','2','3'],
                                   ['1','2','3','4','5','6']]
                    new_index_list = [['JD20','XY335','ZD958'],
                                      ['N0','N1','N2'],
                                      ['W','W','W','D','D','D']]
                    class_name_list = ['Varieties','Nitrogen','Water']    "
                    
        dropindex : bool
            是否保留文件名索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        try:
            df = df.reset_index(level=list(range(1, len(df.index[0]))),
                                drop=False)
        except IndexError :
            pass
        for flags, filters, new_index, class_name in zip(
                flags_list, filter_list, new_index_list, class_name_list):
            df = Formatting.__convertIndex(df, flags=flags, filters=filters,    
                                   new_index=new_index, class_name=class_name)
        if dropindex == False:
            df = df.reset_index()
            df_multindex = df.set_index(['Index']+class_name_list)
        else:
            df_multindex = df.set_index(class_name_list)
        return cls(df_multindex)
    
    
    @staticmethod
    def __addROIndex(df, default=[], exception=[]):
        '''
        手动添加列索引（ROI）

        Parameters
        ----------
        df : DataFrame
            数据表格
        default : list[str]
            默认列索引
        exception : list[str]
            异常列索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df_arr = np.array(df)
        try:
            df = pd.DataFrame(df_arr, index=df.index, columns=default)
        except ValueError:
            df = pd.DataFrame(df_arr, index=df.index, columns=exception)
        return df
    
    @staticmethod
    def __addNaNcolumn(df, default_number, loc, columns_name=''):
        '''
        异常位置添加空值

        Parameters
        ----------
        df : DataFrame
            数据表格
        default_number : int
            ROI 默认列数
        loc : int
            空值列添加位置
        columns_name : str
            添加列的名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        if len(df.columns) == default_number:
            return df
        else:
            df.insert(loc, columns_name, np.nan)
            return df
        
    @staticmethod
    def addIndex(df, index='', class_name='', drop=False):
        '''
        给一个数据表格添加相同的索引（Filename）

        Parameters
        ----------
        df : DataFrame
            数据表格
        index : str
            添加的索引
        class_name : str
            添加索引类别名称
        drop : bool
            是否保留原索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index_list = [index]*(df.shape[0])
        df = df.reset_index(drop=drop)
        df[class_name] = index_list
        if drop == False:
            df = df.set_index([class_name,df.columns[0]], drop=True)
        else:
            df = df.set_index(class_name, drop=True)
        return df
        
    @staticmethod
    def __convertIndex(df, flags=[], filters=[], new_index=[], class_name=''):
        '''
        按已有索引分类，并转化为多级索引（Filename → Factors）

        Parameters
        ----------
        df : DataFrame
            带索引的数据表格
        flags : list[int,int]
            已有索引的筛选区间
        filters : list[str,...,str]
            已有索引的筛选名称
        new_index : list[str,...,str]
            新索引的名称
        class_name : str
            新索引类别名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index = list(df.index)
        for f, filterr in enumerate(filters):
            for i, index_str in enumerate(index):
                if filterr in index_str[flags[0]:flags[1]]:
                    index[i] = new_index[f]
        df[class_name] = index
        return df
            
    @staticmethod
    def setIndex(df, keys, drop=True, append=False):
        '''
        表格索引设置

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        keys : Series, Index / str, list[str]
            直接设置索引 / 从表格列中设置索引.
        drop : TYPE, optional
            是否删除要用作新索引的列. The default is True.
        append : TYPE, optional
            是否将列追加到现有索引. The default is False.

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df.set_index(keys=keys, drop=drop, append=append, 
                     inplace=True, verify_integrity=False)
        
        return df
    

    @staticmethod
    def multiIndexExtract(df, first = [], second = [], names = []):
        '''
        表格索引拆分变多级索引(两个)

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        first : [int,int]/[int], optional
            DESCRIPTION. The default is [].
        second : [int,int]/[int], optional
            DESCRIPTION. The default is [].
        names : TYPE, optional
            DESCRIPTION. The default is [].

        Returns
        -------
        newindex : TYPE
            DESCRIPTION.

        '''
        index_frame = []
        for i in range(len(list(df.index))):
            try:
                firstindex = list(df.index)[i][0][first[0]:first[1]]
            except IndexError:
                firstindex = list(df.index)[i][0][first[0]]
            try:
                secondindex = list(df.index)[i][1][second[0]:second[1]]
            except IndexError:
                secondindex = list(df.index)[i][1][second[0]]
            multindex = (firstindex, secondindex)
            index_frame.append(multindex)
            newindex = pd.MultiIndex.from_tuples(index_frame, names = names)
        return newindex

    @staticmethod
    def multiIndexExtract2(df, flags_list = [], names = []):
        '''
        表格单个索引拆分变多级索引

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        flags_list : TYPE, optional
            DESCRIPTION. The default is [].
        names : TYPE, optional
            DESCRIPTION. The default is [].

        Returns
        -------
        newindex : TYPE
            DESCRIPTION.

        '''
        index_frame = []
        newindex_list = []
        for i in range(len(list(df.index))):
            for f in flags_list:
                try:
                    newindex = list(df.index)[i][f[0]:f[1]]
                except IndexError:
                    newindex = list(df.index)[i][f[0]]
                newindex_list.append(newindex)
            multindex = tuple(newindex_list)
            index_frame.append(multindex)
            newindex_list = []
        newmultindex = pd.MultiIndex.from_tuples(index_frame, names=names)
        df = pd.DataFrame(np.array(df), index=newmultindex, columns=df.columns)
        return df


    @staticmethod
    def featureIterate(df, factor='', feature='', features=[], path=''):
        '''
        分析指定因素的指定特征，多个特征按组迭代，分别生成表格并按特征名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        feature : str
            指定因素的名称
        features : list[str]
            选择的特征
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        name_list = []
        iterate_list = []
        if features != []:
            df = df.loc[:, features]
        df = df.stack(feature).unstack(factor)
        for name, group in df.groupby('Filename'):
            name_list.append(name)
            iterate_list.append(group)
            if path != '':
                group.to_csv(path + str(name) + '.csv')
        return name_list, iterate_list
    
    
    @staticmethod
    def factorIterate(df, factor='', features=[], path=''):
        '''
        分析指定因素的指定特征，因素按组迭代，分别生成表格并按因素名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        features : list[str]
            指定特征的名称
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        name_list = []
        iterate_list = []
        if features != []:
            df = df.loc[:, features]
        for name, group in df.groupby(factor):
            name_list.append(name)
            iterate_list.append(group)
            if path != '':
                group.to_csv(path + str(name) + '.csv')
        return name_list, iterate_list
    
    @classmethod
    def groupBy(cls, df, mode='mean', level='', axis=1, path=''):
        '''
        分组计算

        Parameters
        ----------
        df : DataFrame
            筛选后的数据表格
        mode : str
            计算方法
        path : str
            文件夹路径

        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''
        df = df.groupby(level=level,axis=axis).agg(mode)
        if path != '':
            df.to_csv(path + mode + '.csv')
            
        return cls(df)

    @staticmethod
    def dataFilter(df, factors=[], levels=[], features=[], 
                   feature_mode=0, levels_mode = 0, path='', dropindex=False):
        '''
        按因素水平和特征进行数据筛选

        Parameters
        ----------
        levels : list[str]
            因素的水平名称
        features : list[str]
            特征名称
        feature_mod : 0,1,2
            0: 单一特征字符串； 1: 多特征列表； 2: 多特征范围
        path : str
            文件路径
        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''

        if factors != []:
            orgin_index_names = list(df.index.names)
            df = df.reset_index()
            df = df.set_index(factors, drop=False)
        if levels != []:
            if levels_mode == 0:
                df = df.loc[tuple(levels), :]
            if levels_mode == 0:
                df = df.loc[tuple(levels), :]
            elif levels_mode == 2:
                df = df.loc[tuple(levels), :]
        if features != []:
            if feature_mode == 0:
                df = df.loc[:, features]
            elif feature_mode == 1:
                df = df.loc[:, features]
            elif feature_mode == 2:
                df = df.loc[:, features[0]:features[1]]
        if dropindex == True:
            del df[orgin_index_names]
        else:
            df = df.set_index(orgin_index_names, drop=True)
        if path !=  '':
            df.to_csv(path)
        return df

class Graph():
    '''
    数据分析，绘图
    
    '''
    def __init__(self):
        pass
    
    @staticmethod
    def plotNumericBox(df, figsize=(), xlabel='', ylabel='', path='', 
                      fontsize=20, testmethod='Mann-Whitney', testitem='All'):
        '''
        以数据表格列为横坐标，绘制箱线图

        Parameters
        ----------
        df : DataFrame
            数据表格
        figsize : tuple(a,b)
            画布大小
        xlabel : str
            x轴标签
        ylabel : str
            y轴标签
        path : str
            保存路径
        fontsize : int
            刻度轴及标签字符大小
        testmethod : str
            假设检验方法:
                'Mann-Whitney','Mann-Whitney-gt','Mann-Whitney-ls',
                't-test_ind','t-test_welch','t-test_paired',
                'Wilcoxon','Wilcoxon-legacy',
                'Kruskal','Levene'.
        testitem : str
            假设检验项：'Near','All'

        Returns
        -------
        None.

        '''
        df = df.astype(float)
        plt.figure(figsize=figsize)
        box = sns.boxplot(data=df, palette="Reds", showmeans=True, notch=False)
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.rcParams['axes.unicode_minus']=False
        plt.xlabel(xlabel, fontsize=fontsize)
        plt.ylabel(ylabel, fontsize=fontsize)
        plt.xticks(fontsize=fontsize)
        plt.yticks(fontsize=fontsize)
        index = list(df.columns)
        if testitem == 'All':
            pairs = list(itertools.combinations(index, 2))
        if testitem == 'Near':
            pairs = []
            for i in range(0, len(index)-1):
                pair = tuple(index[i:i+2])
                pairs.append(pair)
        annotator = Annotator(box, pairs, data=df)
        annotator.configure(test=testmethod, text_format='star', 
                            line_height=0.02,line_width=1,fontsize=fontsize)
        annotator.apply_and_annotate()
        if path == '':
            plt.show()
        else:
            plt.savefig(path, dpi=600)
            plt.clf()
    
    
    @staticmethod
    def plotHeatmap(table, logarithm=False, cor_transpose=False, 
                    vmax=None, vmin=None, path=''):
        '''
        聚类热图和相关性聚类热图

        Parameters
        ----------
        table : DataFrame
            热图数据表格
        logarithm : bool
            数据是否取对数. The default is False.
        cor_transpose : bool
            相关性聚类热图是否转置. The default is False.
        vmax : int
            聚类热图图例最大值
        vmin : int
            聚类热图图例最小值
        path : str
            保存文件夹路径

        Returns
        -------
        None.

        '''
        if logarithm == True:
            table = table.apply(np.log1p)
        plt.figure()
        heatmap = seaborn.clustermap(table, pivot_kws=None, 
                                     method='average', metric='euclidean', 
                                     z_score=None, standard_scale=None, 
                                     figsize=(10, 10), cbar_kws=None, 
                                     row_cluster=True, col_cluster=True, 
                                     row_linkage=None, col_linkage=None, 
                                     row_colors=None, col_colors=None, 
                                     mask=None, dendrogram_ratio=0.2, 
                                     colors_ratio=0.03, tree_kws=None, 
                                     cbar_pos=(0.02, 0.8, 0.05, 0.18),
                                     vmax=vmax, vmin=vmin)
        if path != '':
            heatmap.savefig(path+'heatmap.png', dpi=300)
        if cor_transpose == True:
            table = table.T
        plt.figure()
        table_corr = table.corr(method='pearson')
        correlation_heatmap = seaborn.clustermap(table_corr)
        if path != '':
            correlation_heatmap.savefig(path+'correlation heatmap.png', 
                                        dpi=300)
    
    @staticmethod
    def plotMatrix(file_path='', start='', end='', labels=[], matrix='',
                   show_percentage=True):
        '''
        混淆矩阵的生成

        Parameters
        ----------
        file_path : str
            矩阵文件路径
        start : str
            文件中矩阵开始行
        end : str
            文件中矩阵结束行
        labels : list[str]
            矩阵标签
        matrix : str, e.g.'0.41,0.18,0.41; 0.25,0.63,0.12; 0.18,0,0.82'
            手动输入矩阵
        show_percentage : bool
            百分比显示. The default is True.

        Returns
        -------
        None.

        '''
        if file_path != '':
            data_list = []
            percentage_list = []
            with open(file_path,'r') as f:
                for i, line in enumerate(f.readlines()):
                    m = re.findall('\s*'+ start, line)
                    n = re.findall('\s*' + end, line)
                    if m:
                        x = i
                    if n:
                        y = i
            with open(file_path,'r') as f:  
                for j, lines in enumerate(f.readlines()):
                    if j in range(x, y+1):
                        data = re.findall('(?<= )\d+\.?\d*', lines)
                        if data:
                            data_list.append(data)
                matrix = np.array(data_list).astype(np.float64).T
            if show_percentage == True:
                for i in range(matrix.shape[0]):
                    label_sum = sum(matrix[i,:])
                    percentage = np.divide(matrix[i], label_sum)
                    percentage_list.append(percentage)
                matrix = np.array(percentage_list)    
        else:
            matrix = np.matrix('0.41,0.18,0.41; 0.25,0.63,0.12; 0.18,0,0.82')
        disp = ConfusionMatrixDisplay(confusion_matrix=matrix, 
                                      display_labels=labels)
        disp.plot(cmap='Blues')
        plt.show()
    
    @staticmethod
    def dataCorrelationFig(pathname, x, y, name, colordict, markerdict={}, 
                           xlabel='x', ylabel='y', tittle='tittle'):            # 相关性分析


        datasize = len(x)
        slist = []
        clist = []
        if len(name[0]) == 1:
            slist = 'o'
            for n in name:
                clist.append(colordict[n])
        else:
            for n in name[0]:
                slist.append(markerdict[n])
            if len(name) == 2:
                for n in name[1]:
                    clist.append(colordict[n])
            if len(name) > 2:
                nameback = name[1:]
                for i in range(datasize):
                    c = ''
                    for n in nameback:
                        c = c + '_'+n[i]
                    clist.append(colordict[c[1:]])
        print(slist, clist)
        unique_markers = set(slist)
        fig, ax = plt.subplots()

        ax.cla()
        for n in unique_markers:
            inds = [i for i, ele in enumerate(slist) if ele == n]
            xsub = [x[i] for i in inds]
            ysub = [y[i] for i in inds]
            csub = [clist[i] for i in inds]
            ax.scatter(xsub, ysub, marker=n, c=csub)
            print(xsub, ysub, csub)

        #para = np.poly1d(np.polyfit(x,y,1))
        #xp = np.linspace(min(x), max(x), 100)
        #plt.plot(xp, para(xp), '-k')
        # regression part
        x = np.asarray(x)
        y = np.asarray(y)
        slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
        line = slope*x+intercept
        ax.plot(x, line, 'k',
                label='y={:.2f}x+{:.2f}\t$r^2$ = {:.2f}'.format(slope, intercept, r_value))
        # end
        # ax.legend(fontsize=9)
        legend_elements = []
        if len(name[0]) > 1:
            for k, v in markerdict.items():
                legend_elements.append(Line2D([0], [0], marker=v, color='k', label=k,
                                              markerfacecolor='w', markersize=5))
        for k, v in colordict.items():
            legend_elements.append(Patch(facecolor=v, edgecolor='w',
                                         label=k))

        legend_elements.append(Line2D([0], [0], color='k', lw=1,
                                      label='y={:.2f}x+{:.2f}\n$r^2$ = {:.2f}'.
                                      format(slope, intercept, r_value)))
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])

        ax.legend(handles=legend_elements,
                  loc='center left', bbox_to_anchor=(1, 0.5))
        plt.title(tittle)
        plt.xlabel(xlabel)
        plt.ylabel(ylabel)
        plt.savefig(pathname, dpi=360)
        plt.show()

class Files():
    '''
    文件筛选、路径及文件名提取
    
    '''
    def __init__(self, folder, ext, flag ='this'):
        '''
        Arguments:
            self.path: 文件夹全路径，str
            self.ext: 筛选文件或文件类型，list[str]
            self.count: 筛选文件的个数，int
            self.filesNoExt: 需要筛选的文件名称，list
            self.files: 需要筛选的文件名称带后缀，list
            self.filesWithPath: 需要筛选的文件全路径，list
            self.filesWithPathNoExt: 需要筛选的文件全路径不带后缀，list
            self.filesPath: 文件夹全路径列表，list 

        '''
        folder = os.path.normpath(folder)
        self.path = folder
        self.ext = ext
        self.filesNoExt, self.files, self.filesWithPath, self.count = \
            [], [], [], 0

        if flag == 'this':
            [self.filesNoExt, self.files, self.filesWithPath,
             self.count, self.ext] = Files.__getfiles(folder, ext)
                
        elif flag == 'all':
            [self.filesNoExt, self.files, self.filesWithPath, 
             self.count, self.ext] = Files.__getfilesAll(folder, ext)
                
        self.filesPath = [folder] * self.count
        self.filesWithPathNoExt = [os.path.splitext(f)[0] 
                                   for f in self.filesWithPath]
        
    @staticmethod
    def __getfiles(dir0, filter0):
        '''
        按特定的筛选条件获取 dir0 目录下的文件

        Arguments:
            dir0: 文件夹全路径，str
            filter0: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        os.chdir(dir0)
        filenamesStr = []
        filenamesStr_ext = []
        fileswithpath = []
        ext = []
        for t in filter0:
            for file in glob.glob(t):
                [fileNoExt, ext2] = os.path.splitext(file)
                filenamesStr.append(fileNoExt)
                fileswithpath.append(dir0+'/'+file)
                filenamesStr_ext.append(file)
            e = len(glob.glob(t))
            for i in t.split('o_0')*e:
                ext.append(i)
        count = len(filenamesStr)
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext

    @staticmethod
    def __getfilesAll(path, filter_):
        '''
        按特定的筛选条件获取 path 目录及其子目录下的文件

        Arguments:
            path: 文件夹全路径，str
            filter_: 需要筛选的文件，list[str]
        Returns:
            filenamesStr: 需要筛选的文件名称，list
            filenamesStr_ext: 需要筛选的文件名称带后缀，list
            fileswithpath: 需要筛选的文件全路径，list
            count: 文件个数，int

        '''
        filenamesStr = []
        fileswithpath = []
        filenamesStr_ext = []
        ext = []
        for path, subdirs, f in os.walk(path):
            # path: 当前正在遍历的这个文件夹的本身的地址，str
            # subdirs: 该文件夹中所有的目录的名字(不包括子目录)，list
            # files: 该文件夹中所有的文件(不包括子目录)，list
            for name in f:
                for t in filter_:
                    if fnmatch(name, t):
                        [fileNoExt, ext2] = os.path.splitext(name)
                        filenamesStr.append(fileNoExt)
                        fileswithpath.append(path+'/'+ name)
                        filenamesStr_ext.append(name)
                    e = len(glob.glob(t))
                    for i in t.split('o_0')*e:
                        ext.append(i)
        count = len(filenamesStr)
        return filenamesStr, filenamesStr_ext, fileswithpath, count, ext

    def sortFilesName(self, reg=(0,0)):
        self.filesNoExt, self.filesSortedIndex = Files.sortListStringHuman(
                self.filesNoExt, reg=reg)
        self.files = [self.files[i] for i in  self.filesSortedIndex]
        self.filesWithPath = [self.filesWithPath[i] for i in  self.filesSortedIndex]
        # print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))
        
    def splitFilesName(self, sp='__'):
        b = self.filesNoExt[0].split(sp)
        for file in self.filesNoExt[1:]:
            name = file.split(sp)
            b = np.column_stack((b, name))
        self.namesElements = b
        return b
   
    @staticmethod
    def sortListStringHuman(text, reg=(0,0)):
        """Sort the string list with human order

        Args:
            text: The input string list

        Returns:
            data: sorted string list

        """
        import re

        def natural_keys(text):
            '''
            alist.sort(key=natural_keys) sorts in human order
            http://nedbatchelder.com/blog/200712/human_sorting.html
            (See Toothy's implementation in the comments)
            float regex comes from https://stackoverflow.com/a/12643073/190597
            '''
            def atof(text):
                try:
                    retval = float(text)
                    # print(retval)
                except ValueError:
                    retval = text
                return retval
            
            return [atof(c) for c in re.split
                    (r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)]
        if reg == (0,0):
            textReg = text
        elif reg[1] == 0:
            textReg = [t[reg[0]:] for t in text]
        else: 
            textReg = [t[reg[0]:reg[1]] for t in text]

        retIndex = sorted(range(len(textReg)), key=lambda k:natural_keys(textReg[k]))
        retV = [text[i] for i in retIndex]
        return retV, retIndex 
   
    @staticmethod
    def mkdirPath(pathstr):
        import os
        path = os.path.normpath(pathstr)
        pathparts = path.split(os.sep)
        # print(pathparts)
        for i in range(2, len(pathparts)):
            crtpath = '/'.join(pathparts[0:i+1])
            print(crtpath, end='    ')
            if os.path.isdir(crtpath):
                print('this is a folder')
                pass
            else:
                print('this is not a folder: ', end=' ')
                os.mkdir(crtpath)
                print('created')
    
    @staticmethod
    def getfilePath():
        '''
        获取该模块的地址
        https://blog.csdn.net/NeverLate_gogogo/article/details/111867089

        Returns
        -------
        path_real : TYPE
            DESCRIPTION.
        dir_real : TYPE
            DESCRIPTION.
        path_abs : TYPE
            DESCRIPTION.
        dir_abs : TYPE
            DESCRIPTION.

        '''
        path_real = os.path.realpath(__file__)
        dir_real = os.path.dirname(path_real)
        path_abs = os.path.abspath(__file__)
        dir_abs = os.path.dirname(__file__)
        
        return path_real, dir_real, path_abs, dir_abs


if __name__=="__main__":

    """ Path Statement """
    
    path_processing = 'D:/Data/2023-Herbicide/processing/'
    ext0 = ['*.xlsx']
    path_result = 'D:/Data/2023-Herbicide/result/'
    ext1 = ['*.csv']
    
    """ Read and Formatting Data """
    
    Files.mkdirPath(path_result)
    indicators = Formatting.readFile2Table(path_processing, ext0, 
                        subfolder=False, header=None, index_col=0, 
                        skiprows=[0], feature_name='Index',
                        dropindex=True)
    df_fn = indicators.DataFrame
    df = Formatting.multiIndexExtract2(indicators.DataFrame.T, 
                                  flags_list = [[0],[1],[3,5]], 
                                  names=['Plot', 'Concentration','Varieties'])
    
    #%% value series (for regression)
    def cut_df(df, n):
        '''
        将 pandas表格按行平均分为 n份

        Parameters
        ----------
        df : TYPE
            DESCRIPTION.
        n : TYPE
            DESCRIPTION.

        Returns
        -------
        df_tem_list : TYPE
            DESCRIPTION.

        '''
        import math
        df_tem_list = []
        df_num = len(df)
        every_epoch_num = math.floor((df_num/n))
        for index in range(n):
            if index < n-1:
                df_tem = df[every_epoch_num * index: every_epoch_num * (index + 1)]
            else:
                df_tem = df[every_epoch_num * index:]
            df_tem_list.append(df_tem)
        return df_tem_list
            
    df_list_frame1 = []
    df_list_frame2 = []
    df_list_frame_new = []
    df_fn_frame = Formatting.factorIterate(df_fn, factor='Filename')
    for df_fn0 in df_fn_frame[1]:
        df_list = cut_df(df_fn0, 2)
        df_list_frame1.extend(df_list)
    for df_fn1 in df_list_frame1:
        indicator = df_fn1.index[0]
        df_fn2 = df_fn1.mean(0)
        df_fn2 = pd.DataFrame(df_fn2, columns=[indicator])
        df_list_frame2.append(df_fn2)
    df_fn_merge = pd.concat(df_list_frame2, axis=1)
    df_fn_merge.T.index.names = ['indicator']
    df_fn_frame2 = Formatting.factorIterate(df_fn_merge.T, factor='indicator')
    for df_filename in df_fn_frame2[1]:
        index_origin = list(df_filename.columns)
        index_new1 = [i + '-1' for i in index_origin]
        index_new2 = [i + '-2' for i in index_origin]
        index_new = index_new1 + index_new2
        df_new = pd.concat([df_filename.iloc[0,:], df_filename.iloc[1,:]], axis=0)
        df_new.index = index_new
        df_list_frame_new.append(df_new)
    df_indicator = pd.concat(df_list_frame_new, axis=1)
    df_indicator.to_csv(path_result+'df_indicator.csv')


    
    #%% !!!!!!!!!!!!
    
    # sort: descending order by 012
    frame=[]
    for name, group in df.groupby(['Area','Varieties']):
        try:
            df_MDA = group['MDA']
            df_np_MDA = np.sort(np.array(df_MDA), axis=0)
            a = group.columns.searchsorted('MDA')
            b = a + df_MDA.shape[1]
            df_np = abs(np.sort(-np.array(group), axis=0))
            df_np[::,a:b] = df_np_MDA
        except KeyError:
            df_np = abs(np.sort(-np.array(group), axis=0))
        df = pd.DataFrame(df_np, index=group.index, columns=group.columns)
        frame.append(df)
    df = pd.concat(frame, axis=0).sort_index()

    # replace: 0 to maximum value (+1%)
    frame=[]
    for name, group in df.groupby(['Area','Varieties']):
        df_np0 = np.array(group)
        try:
            df_MDA = group['MDA']
            df_np_MDA = np.sort(np.array(df_MDA), axis=0)*1.01
            a = group.columns.searchsorted('MDA')
            b = a + df_MDA.shape[1]
            df_SP = group['SP']
            df_np_SP = np.sort(np.array(df_SP), axis=0)*1.01
            c = group.columns.searchsorted('SP')
            d = c + df_SP.shape[1]
            df_np1 = abs(np.sort(-np.array(group), axis=0))*1.01
            df_np1[::,a:b] = df_np_MDA
            df_np1[::,c:d] = df_np_SP
        except KeyError:
            df_np1 = abs(np.sort(-np.array(group), axis=0))
        df_np0[0,:] = df_np1[0]
        df = pd.DataFrame(df_np0, index=group.index, columns=group.columns)
        frame.append(df)
    df = pd.concat(frame, axis=0).sort_index()
    
    #%% Box Plot: Box plot of all data

    indicators.print_object()
    indicators = Formatting.groupBy(df, mode='mean', level='Filename', axis=1, 
                                    path=path_result+'merged_')
    df = indicators.DataFrame
    df = Formatting.featureIterate(df, factor='Concentration', feature='Filename')
    for name, group in zip(df[0], df[1]):
        path_AB_split = path_result+'AB_split/'
        path_AB_merge = path_result+'AB_merge/'
        Files.mkdirPath(path_AB_split)
        Files.mkdirPath(path_AB_merge)
        box = Graph.plotNumericBox(group, xlabel='Concentration', ylabel=name,
                                    testmethod='t-test_ind', figsize=(9,10),
                                    path=path_AB_split+name+'.png')
        group.to_csv(path_AB_split+name+'.csv')
        
        group = group.reset_index(level=[0,2], drop=True).groupby(
                                            level='Varieties',axis=0).agg('mean')
        box = Graph.plotNumericBox(group, xlabel='Concentration', ylabel=name,
                                    testmethod='t-test_ind', figsize=(9,10),
                                    path=path_AB_merge+name+'.png')
        group.to_csv(path_AB_merge+name+'.csv')

    #%% Box Plot: Box plot by varieties
    
    df_Susceptible = Formatting.dataFilter(df, factors=['Varieties'], 
                                         levels=['M1','S2'], 
                                        path = path_result + 'Susceptible.csv')
    df_Resistant = Formatting.dataFilter(df, factors=['Varieties'], 
                                          levels=['R1','R2'], 
                                        path = path_result + 'Resistant.csv')       # Filter Varieties.
    df_Merge = pd.concat([df_Resistant,df_Susceptible], axis=0).sort_index(axis=0)
    df_Merge_mean = Formatting.groupBy(df_Merge, mode='mean', level='Filename',
                                       axis=1, path=path_result+'Merge_')
    
    df_frame = {'Susceptible' : df_Susceptible, 'Resistant' : df_Resistant }
    df_frame_frame = []
    for key, value in df_frame.items():
        indicators = Formatting.groupBy(value, mode='mean', level='Filename', axis=1)
        df = indicators.DataFrame
        df = Formatting.featureIterate(df, factor='Concentration', feature='Filename')
        
        # # Box plot by varieties: Plotting varieties separately
        
        # path = path_result+key+'/'
        # Files.mkdirPath(path)
        # for name, group in zip(df[0], df[1]):
        #     path_AB_split = path+'AB_split/'
        #     path_AB_merge = path+'AB_merge/'
        #     Files.mkdirPath(path_AB_split)
        #     Files.mkdirPath(path_AB_merge)
        #     box = Graph.plotNumericBox(group, xlabel='Concentration', ylabel=name,
        #                                 testmethod='t-test_ind', figsize=(9,10),
        #                                 path=path_AB_split+name+'.png')
        #     group.to_csv(path_AB_split+name+'.csv')
            
        #     group = group.reset_index(level=[0,2], drop=True).groupby(
        #                                         level='Varieties',axis=0).agg('mean')
        #     box = Graph.plotNumericBox(group, xlabel='Concentration', ylabel=name,
        #                                 testmethod='t-test_ind', figsize=(9,10),
        #                                 path=path_AB_merge+name+'.png')
        #     group.to_csv(path_AB_merge+name+'.csv')
        
        # Box plot by varieties: Plotting the integrated varieties

        df_frame_frame.append(df[1])
        Files.mkdirPath(path_result+'Merge/')
    for name, S, R in zip(df[0], df_frame_frame[0], df_frame_frame[1]):
        S = S.reset_index(drop=True)
        R = R.reset_index(drop=True)
        S = Formatting.addIndex(S.T, index='Susceptible', class_name='Filename')
        R = Formatting.addIndex(R.T, index='Resistant', class_name='Filename')
        S = S.swaplevel('Filename', 'Concentration')
        R = R.swaplevel('Filename', 'Concentration')
        merge = pd.concat([S,R], axis=0).sort_index(axis=0).T
        
        # Plotting the integrated varieties: merged box
        merge_box = merge.copy()
        merge_box.columns = ['_'.join(col) for col in merge_box.columns.values]
        box = Graph.plotNumericBox(merge_box, xlabel='Concentration', ylabel=name,
                                        testmethod='t-test_ind', figsize=(15,10),
                                        path=path_result+'Merge/'+name+'.png')
        merge.to_csv(path_result+'Merge/'+name+'.csv', index=0)
        
        # Plotting the integrated varieties: data for describe:
        mean = merge.mean(axis=0)
        std = merge.std(axis=0)
        merge_describe = pd.concat([mean,std], axis=1)
        merge_describe.columns = ['mean', 'std']
        merge_describe.to_csv(path_result+'Merge/'+name+'_describe.csv')

        # Plotting the integrated varieties: data for letter in "Origin"
        # Physiology:
        merge_row = merge.stack([1,0])
        merge_row = merge_row.reset_index(level=0, drop=True).sort_index()
        new_index = ['-'.join(col) for col in merge_row.index.values]
        new_index = ['{} {}'.format(a,'Concentration') for a in new_index]
        merge_row.index = new_index
        merge_row.to_csv(path_result+'Merge/'+name+'_row.csv')
        # # Growth:
        # merge_row = merge.stack([0,1])
        # merge_row = merge_row.reset_index(level=0, drop=True).sort_index()
        # merge_row.to_csv(path_result+'Merge/'+name+'_row.csv')
        


    
    
    
    
    
    
    
    
    