# -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 09:33:37 2022

@author: xtp

"""

import numpy as np
import os
import matplotlib.pyplot as plt 
import pandas as pd
from files import Files
import mifs
import seaborn
import re
import scipy.io as scio
import itertools
import seaborn as sns
from statannotations.Annotator import Annotator
from sklearn.metrics import ConfusionMatrixDisplay

class Formatting():
    '''
    Fluorescence data reading, pre-processing, writing
    
    '''
    
    def __init__(self, dataframe):
        self.DataFrame = dataframe
        self.Factors = list(dataframe.index.names)
        self.Level = list(dataframe.index)
        self.Features = list(dataframe.columns)
        self.Shape = dataframe.shape
    
    def print_object(self):
        print('\n\n'.join(['%s:%s' % item for item in self.__dict__.items()]))

    @classmethod
    def readTXT2Table(cls, path, ext, addroindex=False, 
                        addNaNcolumn=False, dropindex=False):
        '''
        读取 TXT 文件并以其文件名为索引转化为表格

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        path : str
            处理数据路径
        ext : list[str]
            需要筛选的文件/文件类型
        addroindex : bool
            是否添加感兴趣区域作为列索引
        addNaNcolumn : bool
            是否补全空值
        dropindex : bool
            是否舍弃原索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        process = Files(path, ext, flag ='all')
        df_frame = []
        for path, name in zip(process.filesWithPath, process.filesNoExt):  
            df = pd.read_table(path, header=None, index_col=0,
                               sep='\t', skiprows=[0,1,2])
            df.index.name = 'Features'                                          # Set feature class name.
            if addroindex == True:
                df = Formatting._addROIndex(df, 
                            default=['Leaf1','Leaf2','Leaf3','Leaf4','Leaf5'],  # Set default columns 
                            exception=['Leaf2','Leaf3','Leaf4','Leaf5'])        # and exception.
            if addNaNcolumn == True:
                df = Formatting._addNaNcolumn(df, default_number=5, loc=0,      # Set ROI number, NaN position
                                                columns_name='Leaf1')           # and NaN columns name.
            df = df.astype(float).T
            if addroindex == True:
                df.index.name = 'ROI'                                           # Set ROI class name.
            df = Formatting._addIndex(df, index=name, class_name='Filename',    # Set filename class name.
                                        drop=dropindex)
            df_frame.append(df)
        df_table = pd.concat(df_frame, axis=0, ignore_index=False)
        return cls(df_table)
    
    @classmethod
    def resetTableIndex(cls, df, dropindex=False):
        '''
        按文件名重置表格索引

        Parameters
        ----------
        cls : TYPE
            DESCRIPTION.
        df : DataFrame
            以文件名为索引的数据表格
        dropindex : bool
            是否保留文件名索引

        Returns
        -------
        TYPE
            DESCRIPTION.

        '''
        if len(df.index[0]) != 1:
            df = df.reset_index(level=list(range(1, len(df.index[0]))),
                                drop=False)
        flags_list = [[0,3],[3,5],[5,7]]                                        # Enter reset method.
        filter_list = [['20','335','958'],                                      # Example.
                       ['1','2','3'],
                       ['1','2','3','4','5','6']]
        new_index_list = [['JD20','XY335','ZD958'],
                          ['N0','N1','N2'],
                          ['W','W','W','D','D','D']]
        class_name_list = ['Varieties','Nitrogen','Water']
        for flags, filters, new_index, class_name in zip(                       # flags: list[int,int]
                flags_list, filter_list, new_index_list, class_name_list):      # filters: list[str]
            df = Formatting._convertIndex(df, flags=flags, filters=filters,     # new_index: list[str]
                                   new_index=new_index, class_name=class_name)  # class_name: str
        if dropindex == False:
            df = df.reset_index()
            df_multindex = df.set_index(['Filename']+class_name_list+['ROI'])
        else:
            df_multindex = df.set_index(class_name_list+['ROI'])
        return cls(df_multindex)
    
    @staticmethod
    def _addROIndex(df, default=[], exception=[]):
        '''
        手动添加列索引（ROI）

        Parameters
        ----------
        df : DataFrame
            数据表格
        default : list[str]
            默认列索引
        exception : list[str]
            异常列索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        df_arr = np.array(df)
        try:
            df = pd.DataFrame(df_arr, index=df.index, columns=default)
        except ValueError:
            df = pd.DataFrame(df_arr, index=df.index, columns=exception)
        return df
    
    @staticmethod
    def _addNaNcolumn(df, default_number, loc, columns_name=''):
        '''
        异常位置添加空值

        Parameters
        ----------
        df : DataFrame
            数据表格
        default_number : int
            ROI 默认列数
        loc : int
            空值列添加位置
        columns_name : str
            添加列的名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        if len(df.columns) == default_number:
            return df
        else:
            df.insert(loc, columns_name, np.nan)
            return df
        
    @staticmethod
    def _addIndex(df, index='', class_name='', drop=False):
        '''
        给一个数据表格添加相同的索引（Filename）

        Parameters
        ----------
        df : DataFrame
            数据表格
        index : str
            添加的索引
        class_name : str
            添加索引类别名称
        drop : bool
            是否保留原索引

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index_list = [index]*df.shape[0]
        df = df.reset_index()
        df[class_name] = index_list
        if drop == True:
            df = df.set_index(class_name)
        else:
            df = df.set_index([class_name,'ROI'])
        return df
        
    @staticmethod
    def _convertIndex(df, flags=[], filters=[], new_index=[], class_name=''):
        '''
        按已有索引分类，并转化为多级索引（Filename → Factors）

        Parameters
        ----------
        df : DataFrame
            带索引的数据表格
        flags : list[int,int]
            已有索引的筛选区间
        filters : list[str,...,str]
            已有索引的筛选名称
        new_index : list[str,...,str]
            新索引的名称
        class_name : str
            新索引类别名称

        Returns
        -------
        df : TYPE
            DESCRIPTION.

        '''
        index = list(df.index)
        for f, filterr in enumerate(filters):
            for i, index_str in enumerate(index):
                if filterr in index_str[flags[0]:flags[1]]:
                    index[i] = new_index[f]
        df[class_name] = index
        return df
    
    @staticmethod
    def _movingAverage(y, window_size):
        '''
        numpy的卷积函数移动平均（替代边际效应）
        
        Arguments:
            y: 输入数组，np.array
            window_size: 平滑窗口尺寸，int
        Returns:
            y_ma: 平滑后的 y，np.array
            
        '''
        window = np.ones(int(window_size)) / float(window_size)
        y_ma = np.convolve(y, window, 'same')
        y_ma[0] = y[0]
        y_ma[-1] = y[-1]
        return y_ma
    
    @classmethod
    def curveSmooth(cls, df, window_size=3, path=''):
        '''
        曲线卷积平滑处理

        Parameters
        ----------
        window_size : int
            卷积窗口大小
        path : str
            文件夹路径

        Returns
        -------
        df_avg : TYPE
            DESCRIPTION.

        '''
        df.columns = df.columns/1000000
        df_copy = df.copy()
        curve_matrix = df_copy.to_numpy()
        curve_avg_tab = []
        for i in range(curve_matrix.shape[0]):
            curve = curve_matrix[i, :]
            curve_avg = Formatting._movingAverage(y = curve, 
                                                  window_size = window_size)
            curve_avg_tab.append(curve_avg)
        curve_avg_matrix = np.array(curve_avg_tab)
        df_avg = pd.DataFrame(curve_avg_matrix, index=df.index,
                              columns=df.columns)
        if path != '':
            df_avg.to_csv(path + 'kinetic smoothed.csv')
        else:
            return cls(df_avg)
        
    def featuresSelect(self, factor='', method='MRMR', 
                             n_features='auto', path=''):
        '''
        特征选择

        Parameters
        ----------
        factor : str
            待分析特征
        method : str
            特征选择方法
        n_features : str/int
            筛选的特征数
        path : str
            文件夹路径

        Returns
        -------
        df_Selected : TYPE
            DESCRIPTION.

        "  
        input：
            X : array-like, shape = [n_samples, n_features]
                The training input samples.
    
            y : array-like, shape = [n_samples]
                The target values.
        "
        
        '''
        df = self.DataFrame.dropna(axis=0)
        X = np.array(df)
        df = df.reset_index(level=factor)
        df = df.set_index(factor, drop=True)
        y = Formatting._strList2numberList(list(df.index))
        feat_selector = mifs.MutualInformationFeatureSelector(method='MRMR',
                                                        n_features=n_features,
                                                        categorical=True,
                                                        verbose=1, k=5)
        feat_selector.fit(X, y)
        S = feat_selector.ranking_
        MIs = feat_selector.mi_
        feat_selector._print_results(S, MIs)
        df_Selected = df.iloc[:, S]
        if path != '':
            df_Selected.to_csv(path = path + 'selected feature.csv')
        return df_Selected
    
    def featureIterate(self, factor='', features=[], path=''):
        '''
        分析指定因素的指定特征，多个特征按组迭代，分别生成表格并按特征名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        features : list[str]
            指定特征的名称
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        df = self.DataFrame.loc[:, features].stack('Features').unstack(factor)
        for name, group in df.groupby('Features'):
            group.to_csv(path + name + '.csv')
    
    def factorIterate(self, factor='', features=[], path=''):
        '''
        分析指定因素的指定特征，因素按组迭代，分别生成表格并按因素名保存

        Parameters
        ----------
        factor : str
            指定因素的名称
        features : list[str]
            指定特征的名称
        path : str
            默认保存路径

        Returns
        -------
        None.

        '''
        df = self.DataFrame.loc[:, features]
        for name, group in df.groupby(factor):
            group.to_csv(path + name + '.csv')
    
    def dataFilter(self, factors=[], levels=[], 
                   features=[], feature_flag=False, path=''):
        '''
        按因素水平和特征进行数据筛选

        Parameters
        ----------
        levels : list[str]
            因素的水平名称
        features : list[str]
            特征名称
        feature_flag : bool
            是否为特征范围
        path : str
            文件路径
        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''
        df = self.DataFrame.sort_index()
        if factors != []:
            df = df.reset_index(level=factors)
            df = df.set_index(factors, drop=True)
        if levels != []:
            df_copy = df.reset_index()
            for i, f in enumerate(levels):
                for I in self.Factors:
                    if f in list(df_copy[I]):
                        position = self.Factors.index(I)
                        df = df.swaplevel(position, i)
        if features != []:
            if feature_flag == False:
                df = df.loc[tuple(levels), features]
            else:
                df = df.loc[tuple(levels), features[0]:features[1]]
        if path !=  '':
            df.to_csv(path)
        else:
            return df
        
    @staticmethod
    def groupBy(df, mode='', path=''):
        '''
        分组计算

        Parameters
        ----------
        df : DataFrame
            筛选后的数据表格
        mode : str
            计算方法
        path : str
            文件夹路径

        Returns
        -------
        df : DataFrame
            DESCRIPTION.

        '''
        group = df.groupby(df.index.names)
        if mode == 'mean':
            df = group.mean()
        if mode == 'std':
            df = group.std()
        if mode == 'describe':
            df = group.describe().stack()
        if path != '':
            df.to_csv(path + mode + '.csv')
        else:
            return df
        
    @staticmethod
    def _strList2numberList(str_list):
        '''
        列表中相同的字符串转为相同的数字，数字按原列表顺序从1开始
        
        Arguments:
            str_list: list[str,...,str]
        Returns:
            number_list: list[int,...,int]
        
        '''
        number_list = str_list.copy()
        filter_list = list(set(str_list))
        filter_list.sort(key=str_list.index)
        for i, index1 in enumerate(filter_list):
            for j, index2 in enumerate(number_list):
                if index1 == index2:
                    number_list[j] = float(i+1)
        return number_list
    
    @staticmethod
    def saveTable2mat(path, df):
        '''
        提取 table 行列索引、数据并保存为 mat 格式
        
        Arguments: 
            path: 保存文件全路径，str
            df: 数据表格，DataFrame

        '''
        
        index = list(df.index)
        columns = list(df.columns)
        Index = Formatting._strList2numberList(index)
        Index = np.array(Index, dtype=float).reshape(-1,1)
        scio.savemat(path,{'data':df.values,'class':Index,
                           'class_str':index,'variables_str':columns})
        return df.values, Index, index, columns
    
class Processing():
    
    def __init__(self):
        pass

    def plotCurves(df, path=''):
        '''
        曲线绘制(不好看)

        '''
        df.T.plot(x=None, y=None, use_index=True, kind='line', 
                  figsize=(5,5), title=None, grid=None, legend=False,
                  xticks=None, yticks=None, xlim=(0,500), ylim=(0,1200),
                  fontsize=10, colormap=None)
        if path == '':
            plt.show()
        else:
            plt.savefig(path, dpi=600)
            plt.clf()

    def plotNumericBox(df, figsize=(), xlabel='', ylabel='', path='', 
                      fontsize=20, testmethod='Mann-Whitney', testitem='Near'):
        '''
        以数据表格列为横坐标，绘制箱线图

        Parameters
        ----------
        df : DataFrame
            数据表格
        figsize : tuple(a,b)
            画布大小
        xlabel : str
            x轴标签
        ylabel : str
            y轴标签
        path : str
            保存路径
        fontsize : int
            刻度轴及标签字符大小
        testmethod : str
            假设检验方法:
                'Mann-Whitney','Mann-Whitney-gt','Mann-Whitney-ls',
                't-test_ind','t-test_welch','t-test_paired',
                'Wilcoxon','Wilcoxon-legacy',
                'Kruskal','Levene'.
        testitem : str
            假设检验项：'Near','All'

        Returns
        -------
        None.

        '''
        df = df.astype(float)
        plt.figure(figsize=figsize)
        box = sns.boxplot(data=df, palette="Reds", showmeans=True, notch=False)
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.rcParams['axes.unicode_minus']=False
        plt.xlabel(xlabel, fontsize=fontsize)
        plt.ylabel(ylabel, fontsize=fontsize)
        plt.xticks(fontsize=fontsize)
        plt.yticks(fontsize=fontsize)
        index = list(df.columns)
        if testitem == 'All':
            pairs = list(itertools.combinations(index, 2))
        if testitem == 'Near':
            pairs = []
            for i in range(0, len(index)-1):
                pair = tuple(index[i:i+2])
                pairs.append(pair)
        annotator = Annotator(box, pairs, data=df)
        annotator.configure(test=testmethod, text_format='star', 
                            line_height=0.02,line_width=1,fontsize=fontsize)
        annotator.apply_and_annotate()
        if path == '':
            plt.show()
        else:
            plt.savefig(path, dpi=600)
            plt.clf()
            
    def plotHeatmap(table, logarithm=False, cor_transpose=False, 
                    vmax=None, vmin=None, path=''):
        '''
        聚类热图和相关性聚类热图

        Parameters
        ----------
        table : DataFrame
            热图数据表格
        logarithm : bool
            数据是否取对数. The default is False.
        cor_transpose : bool
            相关性聚类热图是否转置. The default is False.
        vmax : int
            聚类热图图例最大值
        vmin : int
            聚类热图图例最小值
        path : str
            保存文件夹路径

        Returns
        -------
        None.

        '''
        if logarithm == True:
            table = table.apply(np.log1p)
        plt.figure()
        heatmap = seaborn.clustermap(table, pivot_kws=None, 
                                     method='average', metric='euclidean', 
                                     z_score=None, standard_scale=None, 
                                     figsize=(10, 10), cbar_kws=None, 
                                     row_cluster=True, col_cluster=True, 
                                     row_linkage=None, col_linkage=None, 
                                     row_colors=None, col_colors=None, 
                                     mask=None, dendrogram_ratio=0.2, 
                                     colors_ratio=0.03, tree_kws=None, 
                                     cbar_pos=(0.02, 0.8, 0.05, 0.18),
                                     vmax=vmax, vmin=vmin)
        if path != '':
            heatmap.savefig(path+'heatmap.png', dpi=300)
        if cor_transpose == True:
            table = table.T
        plt.figure()
        table_corr = table.corr(method='pearson')
        correlation_heatmap = seaborn.clustermap(table_corr)
        if path != '':
            correlation_heatmap.savefig(path+'correlation heatmap.png', dpi=300)
            
    def plotMatrix(file_path='', start='', end='', labels=[], matrix='',
                   show_percentage=True):
        '''
        混淆矩阵的生成

        Parameters
        ----------
        file_path : str
            矩阵文件路径
        start : str
            文件中矩阵开始行
        end : str
            文件中矩阵结束行
        labels : list[str]
            矩阵标签
        matrix : str, e.g.'0.41,0.18,0.41; 0.25,0.63,0.12; 0.18,0,0.82'
            手动输入矩阵
        show_percentage : bool
            百分比显示. The default is True.

        Returns
        -------
        None.

        '''
        if file_path != '':
            data_list = []
            percentage_list = []
            with open(file_path,'r') as f:
                for i, line in enumerate(f.readlines()):
                    m = re.findall('\s*'+ start, line)
                    n = re.findall('\s*' + end, line)
                    if m:
                        x = i
                    if n:
                        y = i
            with open(file_path,'r') as f:  
                for j, lines in enumerate(f.readlines()):
                    if j in range(x, y+1):
                        data = re.findall('(?<= )\d+\.?\d*', lines)
                        if data:
                            data_list.append(data)
                matrix = np.array(data_list).astype(np.float64).T
            if show_percentage == True:
                for i in range(matrix.shape[0]):
                    label_sum = sum(matrix[i,:])
                    percentage = np.divide(matrix[i], label_sum)
                    percentage_list.append(percentage)
                matrix = np.array(percentage_list)    
        else:
            matrix = np.matrix('0.41,0.18,0.41; 0.25,0.63,0.12; 0.18,0,0.82')
        disp = ConfusionMatrixDisplay(confusion_matrix=matrix, 
                                      display_labels=labels)
        disp.plot(cmap='Blues')
        plt.show()
           
def mkDir(path=None):
    '''
    创建文件夹
    
    Arguments:
        path: 文件夹全路径，str
    
    '''
    folder = os.path.exists(path)
    if not folder:
        os.makedirs(path) 
        
        
if __name__=="__main__":
    
    path_processing = 'D:/Data/chlorophyllfluorescence/processing/'
    ext_txt = ['*.TXT']
    path_result = 'D:/Data/chlorophyllfluorescence/Result/'
    ext_csv = ['*.csv']

# =============================================================================
#                                   Formatting
# =============================================================================
    fluorescence = Formatting.readTXT2Table(path_processing, ext_txt,
                                            addroindex=True, addNaNcolumn=True)
    fluorescence = Formatting.resetTableIndex(fluorescence.DataFrame,
                                                dropindex=False)
    fluorescence.print_object()
    df = fluorescence.DataFrame

                                 ##  Kinetic  ##
    
    fluorescence = Formatting.curveSmooth(df, window_size=3)                    # Get smooth curve.
    
    # df = fluorescence.dataFilter(factors=['ROI'])                               # Filter different features.
    
    # Formatting.groupBy(df, mode='mean')                                         # Get table mean.

    fluorescence.featuresSelect(factor='ROI')                                   # Feature selected.

                                 ##  Numeric  ##
    
    # fluorescence.featureIterate('ROI', ['Fo','qL_Lss','qN_Lss','QY_max'],
    #                             path_result)                                    # Get leaves' feature.
    
    # fluorescence.factorIterate('ROI', ['Fo','qL_Lss','qN_Lss','QY_max'],
    #                             path_result)                                    # Get leaf's features.
    
    
    # df = fluorescence.dataFilter(levels=['JD20','W'], features=['Fo'])          # Filter different levels.
    
    # df = fluorescence.dataFilter(factors=['Varieties','Nitrogen','ROI'],
    #                              features=['Fo','Fq_D3'], feature_flag=True)    # Filter different features.
    
    # Formatting.groupBy(df, mode='mean', path=path_result + 'mean.csv')          # Get table mean.








# =============================================================================
#                                   Processing
# =============================================================================
    # result = Files(path_result, ext_csv, flag ='this')                          # Set ext.
    # for path, name in zip(result.filesWithPath, result.filesNoExt):
    #     df = pd.read_table(path, sep=',', header=0, index_col=[0,1,2,3,4])      # Set table parameters.
        
                                 ##  Kinetic  ##
        
        # Processing.plotCurves(df)                                               # Plot curve
        
                                 ##  Numeric  ##
        
        # box = Processing.plotNumericBox(df, xlabel='叶片位置 Leaf position',
        #                     ylabel='荧光参数值 Fluorescence parameter value',
        #                     figsize=(7,8))                                      # Plot boxplot.
        
        # Processing.plotHeatmap(df, logarithm=True, cor_transpose=True,          
        #                        vmax=None, vmin=None, path=path_result)          # Plot heatmaps.
            
                                 ##   Other   ##
        
        # Processing.plotMatrix(file_path=path, start='Confusion Table ',
        #                       end='<html><font color',
        #                       labels=['JD20','XY335','ZD958'], 
        #                       show_percentage=True)                             # Plot Confusion Matrix.


    
    
    
    
    
    
    
    
    
    
    
    
    
     