# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 16:45:36 2019

@author: wangzhendong
"""

"""
eda辅助类，能够处理数据
"""
from scipy.spatial.distance import pdist, squareform
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
class Eda:
    '''
    返回一个list，list中包含非NA列的列名
    '''
    def colWithNNa(self,dataframe):
        return dataframe.dropna(axis=1).columns.tolist()
    """
    返回一个dataframe，包含当前不含NA的列
    """
    def dfWithoutNa(self,dataframe):
        return dataframe.dropna(axis=1)
    """
    返回一个list，list中包含具有NA的列的列名
    @param pd pandas中的dataframe
    @return 返回dataframe中包含NA的列名
    """
    def colWithNa(self,dataframe):
        return dataframe.drop(columns=dataframe.dropna(axis=1).columns).columns.tolist()
    """
    返回一个dataframe,包含当前dataframe中含有NA的列
    """
    def dfWithNa(self,dataframe):
        return dataframe.drop(columns=dataframe.dropna(axis=1).columns)
    """
    将Series中的Factor值转换成相应的Numric值，通常用在属性有Order关系
    """
    def __convert(self,x,source,dest):
        for i in(range(len(source))):
            if(x==source[i]):
                return dest[i]
        return x
    def seConvNum(self,series,source,dest):
        return series.apply(lambda x:self.__convert(x,source,dest))
    """
    在一个dataframe中，使用other列的group平均值填充current列中的空值
    @Param current 需要填充的列名
    @Param other group的列名
    """
    def __fillNAWithOtherG(self,x,groupDataframe,current,other):
        if(pd.isnull(x[current])):
            x.at[current]=float(groupDataframe.loc[x[other],current])
        return x
    def fillNAWithOtherGroupMean(self,dataframe,current,other):
        otherGroupMean = dataframe[[current,other]].groupby(other).mean()
        return dataframe.apply(self.__fillNAWithOtherG,axis=1,args=(otherGroupMean,current,other))
    """
    在一个dataframe中，使用other列的值填充current列中的值
    """
    def __fillNAWithOther(self,x,current,other):
        if(pd.isnull(x[current])):
            if(pd.notnull(x[other])):
                x.loc[current]=x[other]
        return x
    def fillNAWithOther(self,dataframe,current,other):
        return dataframe.apply(self.__fillNAWithOther__,axis=1,args=(current,other))
    """
    将dataframe某行的current值设置为current列最普遍的值
    """
    def fillNAWithCommon(self,dataframe,currentList,indexList):
        for current in currentList:
            for index in indexList:
                val = dataframe[['Id',current]].groupby(current).count().sort_values(by='Id',ascending= False).iloc[0].name
                dataframe.at[index,current]=val
    """
    选取dataframe中的numeric列组成新的dataframe返回
    """
    def numericSet(self,dataframe):
        cols = dataframe.columns.tolist()
        numericNames = []
        for col in cols:
            ty = dataframe[col].dtype
            if(str(ty).startswith('float') or str(ty).startswith('int') or str(ty).startswith('double')):
                numericNames.append(col)
        return dataframe[numericNames]
    """
    将训练集和测试集合并成全集合
    """           
    def allSet(self,train,test,labelName):
        test[labelName] = pd.Series(index=test.index)
        allset = train.append(test,ignore_index=True)
        return allset
    '''
    将dataframe中包含na的列按照包含na值的多少从大到小排名
    '''
    def nameOfNASort(self,dataframe):
        s = dataframe.isna().sum().sort_values(ascending=False)
        return s.where(s>0).dropna()
    '''
    根据idx的值,使用other列的值填充current列值
    '''
    def fillWithOtherByIdx(self,dataframe,idx,current,other):
        dataframe.loc[idx.values.tolist(),[current]]=dataframe.loc[idx.values.tolist(),[other]][other].values.tolist()
    '''
    寻找某一列大于0的idx
    '''
    def idxOfGreatThan0(self,series):
        return series.where(series>0).dropna().index 
    '''
    在idx中返回na的index
    '''
    def idxOfNa(self,dataframe,col,idx):
        nonaidx = dataframe.loc[idx.values.tolist(),[col]].dropna().index
        return dataframe.loc[idx.values.tolist(),[col]].drop(nonaidx.values.tolist()).index
    '''
    在Series中的选取na的idx
    '''
    def ofNa(self,dataframe,col):
        s = dataframe[col].isna()
        return s.where(s==True).dropna().index
    '''
    计算dataframe中的相关系数，并且按照和label的关系排序
    '''
    def corr(self,dataframe,label):
        corrDataframe = dataframe.corr()
        corrDataframe = corrDataframe.apply(lambda x:abs(x))
        corrDataframe = corrDataframe.sort_values(by=label,ascending=False)
        return corrDataframe
    '''
    将枚举型变量做factorize编码
    '''
    def factorize(self,dataframe):
        result = dataframe.copy()
        cols = dataframe.columns.tolist()
        for col in cols:
            ty = dataframe[col].dtype
            if((not str(ty).startswith('float')) and (not str(ty).startswith('int')) and (not str(ty).startswith('double'))):
                labels,indexs = pd.factorize(dataframe[col])
                result[col]=pd.Series(labels)
        return result
    '''
    将枚举类型的列进行OneHot编码
    '''        
    def onehot(self,dataframe):
#        oneht = OneHotEncoder()
        result = dataframe.copy()
        cols = dataframe.columns.tolist()
        for col in cols:
            ty = dataframe[col].dtype
            if((not str(ty).startswith('float')) and (not str(ty).startswith('int')) and (not str(ty).startswith('double'))):
#                onehtArray = oneht.fit_transform(dataframe[col].values)
#                onehotDf = pd.DataFrame(data = onehtArray,columns=list(x for x in range(len(onehtArray))))
                onehotDf = pd.get_dummies(dataframe[col],prefix=col)
                result = result.drop([col], axis=1)
                result = pd.concat([result,onehotDf],axis = 1)
        return result 
    '''
    将特定的series进行OneHot编码
    '''
    def onehotSeries(self,series):
        return pd.get_dummies(series,prefix=series.name)
    
    '''
    Likelihood Encoding，又称为Impacted Encoding。其主要思想是将类别型变量用对应的label变量取值的均值来替代。
    但是这样直接取均值的话，该类别变量就不存在与label直接的信息交换了，也就是该变量的信息没有leak给label，所以
    会导致一些baised estimation。解决办法是使用KFold来交叉取值。而且一般取两层CV
    
    首先将数据集划分为20folds，然后使用fold #2-20来预测fold #1的值；
    将fold #2-20又️划分一层，划分为10 folds；
    计算这10 folds的out of folds值：比如使用fold #2-10 的对应label的均值，来作为fold #1的预测值；
    得到内层CV的10个均值，再将这10个取平均，则得到了外层fold #1的值；
    依次将外层20个fold都可以计算出来。
    dataframe:数据，包含训练数据和target
    col：需要转换的特征
    target：target的name
    '''
    def likelyhoodEncoding(self,dataframe,col,target):
        n_fords = 10
        n_inner_fords = 5
        '''
        返回的结果
        '''
        likelyencoded = pd.Series(name=col)
        '''
        使用target的mean作为外层KFord的默认值
        '''
        oof_default_mean = dataframe[target].mean()
        '''
        n_folds 交叉验证
        '''
        kf = KFold(n_splits=n_fords,shuffle=True)
        '''
        循环遍历每一折交叉验证
        '''
        oof_mean_cv = pd.DataFrame()
        split = 0
        for infold,oof in kf.split(dataframe[col]):
            inner_oof_mean_cv = pd.DataFrame()
            likelihood_encoded_cv = pd.Series()
            inner_split = 0
            '''
            用外层的infold的平均值作为默认值
            '''
            inner_oof_default_mean = dataframe.iloc[infold][target].mean()
            '''
            将infold再进行n_inner_fords折的KFold，即要进行双层的CV
            '''
            inner_kf = KFold(n_splits=n_inner_fords,shuffle=True)
            '''
            遍历内层的inner_kf
            '''
            likelihood_encoded_cv = pd.Series()
            for inner_infold,inner_oof in inner_kf.split(dataframe.iloc[infold]):
                '''
                计算不同category汇总后的平均值
                '''
                oof_mean = dataframe.iloc[inner_infold].groupby(col)[target].mean()
                '''
                将这些平均值赋值给likelihood_encoded_cv
                '''
                likelihood_encoded_cv = likelihood_encoded_cv.append(dataframe.iloc[infold].apply(
                        lambda x : oof_mean[x[col]]
                        if x[col] in oof_mean.index
                        else inner_oof_default_mean
                        ,axis=1))
                inner_oof_mean_cv = inner_oof_mean_cv.join(pd.DataFrame(oof_mean), rsuffix=inner_split, how='outer')
                inner_oof_mean_cv.fillna(inner_oof_default_mean, inplace=True)
                inner_split += 1
            oof_mean_cv = oof_mean_cv.join(pd.DataFrame(inner_oof_mean_cv),rsuffix=split,how='outer')
            oof_mean_cv.fillna(value=oof_default_mean,inplace=True)
            split += 1
            likelyencoded = likelyencoded.append(dataframe.iloc[oof].apply(
                    lambda x: np.mean(inner_oof_mean_cv.ix[x[col]].values)
                    if x[col] in inner_oof_mean_cv.index
                    else oof_default_mean
                    , axis=1
                    ))
        return likelyencoded
    '''
    likely encoding
    '''
    def likelyhoodEncodingFrame(self,dataframe,target):
        result = dataframe.copy()
        cols = dataframe.columns.tolist()
        for col in cols:
            ty = dataframe[col].dtype
            if((not str(ty).startswith('float')) and (not str(ty).startswith('int')) and (not str(ty).startswith('double'))):
                onehotDf = self.likelyhoodEncoding(dataframe,col,target)
                onehotDf.name = col
                result = result.drop([col], axis=1)
                result = pd.concat([result,onehotDf],axis = 1)
        return result  
    '''
    距离相关系数
    '''
    
    def __dcov(self,uArray,vArray):
        """ Compute the distance correlation function
    
        >>> a = [1,2,3,4,5]
        >>> b = np.array([1,2,9,4,4])
        >>> distcorr(a, b)
        0.762676242417
        """
        X = np.atleast_1d(uArray)
        Y = np.atleast_1d(vArray)
        if np.prod(X.shape) == len(X):
            X = X[:, None]
        if np.prod(Y.shape) == len(Y):
            Y = Y[:, None]
        X = np.atleast_2d(X)
        Y = np.atleast_2d(Y)
        n = X.shape[0]
        if Y.shape[0] != X.shape[0]:
            raise ValueError('Number of samples must match')
        a = squareform(pdist(X))
        b = squareform(pdist(Y))
        A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
        B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
        dcov2_xy = (A * B).sum()/float(n * n)
        dcov2_xx = (A * A).sum()/float(n * n)
        dcov2_yy = (B * B).sum()/float(n * n)
        dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
        return dcor
    def disCorr(self,dataframe):
        result = pd.DataFrame(index=dataframe.columns,columns=dataframe.columns)
        for col in result.columns:
            for coll in result.columns:
                uArray = dataframe[col].values
                vArray = dataframe[coll].values
                dcorr = self.__dcov(uArray,vArray)
                result.set_value(col,coll,dcorr)
        return result