from sklearn.model_selection import train_test_split as tt_split
from logging import getLogger
from typing import Union, Tuple
from typing import List, Sequence, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical_dtype, is_string_dtype, is_object_dtype
from pathlib import Path
from sklearn.metrics import roc_curve, auc
logger = getLogger(__name__)
ZH_CN_REGEX = r"([\u4e00-\u9fff]+)"

from collections import namedtuple
from typing import Optional

ModuleMeta = namedtuple('ModuleMeta', ['flag_name', 'en_name', 'cn_name', 'description'])


def get_module_meta(prefix: str) -> Optional[ModuleMeta]:
    """
    Get module meta. (return None if module not found)

    :param prefix: module prefix, e.g. 'badinfo', 'ex', 'sl'
    :return: specific ModuleMeta
    """
    if prefix == 'badinfo':
        return ModuleMeta('flag_badinfo', 'badinfo', '自然人识别', '识别自然人信息')
    if prefix == 'ex':
        return ModuleMeta('flag_execution', 'ex', '法院被执行人—个人版', '查询个人的法院失信被执行人、被执行人的执行案件信息')
    if prefix == 'sl':
        return ModuleMeta('flag_specialList_c', 'sl', '特殊名单验证',
                          '用户本人、联系人、与用户有亲密关系的人（一度关系、二度关系-百融关系库定义）是否疑似命中中风险、一般风险、资信不佳、拒绝、高风险等百融特殊名单、以识别个体是否有虚假申请、欺诈等风险。')
    if prefix == 'bankfourpro':
        return ModuleMeta('flag_bankfourpro', 'bankfourpro', '银行卡四要素验证', '验证用户银行卡号、姓名、身份证号、手机号与银行预留信息是否一致')
    if prefix == 'idtwo':
        return ModuleMeta('flag_idtwo_z', 'idtwo_z', '身份证二要素验证', '核查身份证姓名是否一致')
    if prefix == 'idtwo_z':
        return ModuleMeta('flag_idtwo_z', 'idtwo_z', '身份证二要素验证', '核查身份证姓名是否一致')
    if prefix == 'telCheck':
        return ModuleMeta('flag_telCheck', 'telCheck', '手机三要素-移动联通电信', '验证移动、联通、电信手机号与绑定身份证号和姓名是否一致。')
    if prefix == 'bankthree':
        return ModuleMeta('flag_bankthree', 'bankthree', '银行卡三要素验证', '验证用户银行卡号、姓名、身份证号与银行预留信息是否一致。')
    if prefix == 'cv':
        return ModuleMeta('flag_companyver', 'cv', '单位验证', '单位名称和单位地址一致性验证。')
    if prefix == 'ka':
        return ModuleMeta('flag_keyattribution', 'ka', '身份证号手机号归属地', '？')
    if prefix == 'als':
        return ModuleMeta('flag_applyloanstr', 'als', '借贷意向验证（ApplyLoanStr_同评估报告）',
                          '用户近7/15天、1/3/6个月在百融的虚拟信贷联盟(银行、非银、非银细分类型)中的多次信贷申请情况。')
    if prefix == 'alf':
        return ModuleMeta('flag_ApplyFeature', 'alf', '借贷意向衍生特征', '根据用户过往申请记录生成反应借贷意向的衍生特征。')
    if prefix == 'tl':
        return ModuleMeta('flag_totalloan', 'tl', '借贷行为验证', '用户在百融的虚拟信贷联盟中的借贷行为情况。')
    if prefix == 'tle':
        return ModuleMeta('flag_totalloan', 'tl', '借贷行为验证', '用户在百融的虚拟信贷联盟中的借贷行为情况。')
    if prefix == 'ir':
        return ModuleMeta('flag_inforelation', 'ir', '实名信息验证', '通过验证客户申请信息之间的关联关系、来判断客户的风险。')
    if prefix == 'frg':
        return ModuleMeta('flag_fraudrelation_g', 'frg', '团伙欺诈排查（通用版）', '团伙欺诈排查通用版是基于自有海量数据，通过算法挖掘用户的团伙欺诈行为。')
    if prefix == 'drs':
        return ModuleMeta('flag_debtrepaystress', 'drs', '偿债压力指数', '用户本人当前偿债压力指数的情况。')
    if prefix == 'alu':
        return ModuleMeta('flag_applyloanusury', 'alu', '高风险借贷意向验证', '用户近 7/15 天、1/3/6/12 个月在超利贷机构中的多次申请情况。')
    if prefix == 'cons':
        return ModuleMeta('flag_consumption_c', 'cons', '商品消费指数', '商品消费产品查询用户商品消费行为、是对商品消费次数、金额和类目等维度的统计评估（自然月）。')
    if prefix == 'cf':
        return ModuleMeta('flag_ConsumptionFeature', 'cf', '商品消费衍生特征', '根据用户过往商品消费行为生成反应商品消费的衍生特征。')
    if prefix == 'location':
        return ModuleMeta('flag_location', 'location', '地址信息验证', '用户详细地址信息与百融地址信息库的一致性核查。')
    if prefix == 'stab':
        return ModuleMeta('flag_stability_c', 'stab', '稳定性指数', '用户查询信息与百融行为库中的信息是否匹配、来检验信息的关联性和一致性。')
    if prefix == 'media':
        return ModuleMeta('flag_media_c', 'media', '媒体阅览指数', '媒体阅览评估产品查询用户媒体阅览行为、是对阅览天数、类别等维度的统计评估（自然月）。')
    if prefix == 'ns':
        return ModuleMeta('flag_netshopping', 'ns', '消费指数', '查询个人的网购消费信息。')
    if prefix == 'pc':
        return ModuleMeta('flag_personalcre', 'pc', '个人资质-基础版', '查询用户消费、收入、资产、职业等信息，对用户消费等级、消费偏好、收入稳定性、职业等信息进行评估。')
    if prefix == 'pcp':
        return ModuleMeta('flag_personalcrepro', 'pcp', '个人资质-专业版',
                          '查询用户消费、收入、资产、职业等信息，对用户消费等级、消费偏好、收入稳定性、职业等信息进行评估，对银行卡消费情况的统计评估，展示1/3/6/12个月（自然月）支付行为情况（建议用户填其常用卡）。')
    if prefix == 'gl':
        return ModuleMeta('flag_graylistexpand', '灰名单衍生', 'gl', '根据用户过往的申请行为判断用户的团伙欺诈情况。')
    if prefix == 'sd':
        return ModuleMeta('flag_score', '联合贷标签', 'sd', '根据联合贷标签判断用户的分层。')
    if prefix == 'mma':
        return ModuleMeta('flag_multiplemodela', '入模产品A', 'mma', '入模产品A')
    if prefix == 'mmb':
        return ModuleMeta('flag_multiplemodelb', '入模产品B', 'mmb', '入模产品B')
    if prefix == 'x':
        return ModuleMeta('flag_X', '贷中衍生', 'x', '贷中衍生')
    if prefix == 'pd':
        return ModuleMeta('flag_populationderivation', '人口衍生', 'pd', '人口衍生')
    if prefix == 'ql':
        return ModuleMeta('flag_quantilelevel', '分位数', 'ql', '分位数')  
    if prefix == 'qls':
        return ModuleMeta('flag_quantilelevelstr', '分位数综合版', 'qls', '分位数综合版')     
    if prefix == 'rc':
        return ModuleMeta('flag_riskchar', '借贷风险衍生特征', 'rc', '借贷风险衍生特征')     
    if prefix == 'ae':
        return ModuleMeta('flag_applyevaluate', '借贷意向验证3.0', 'ae', '借贷意向验证3.0')    
    if prefix == 'cc':
        return ModuleMeta('flag_chaincheckevaluate_gm', '机构链条', 'cc', '机构链条')    
    if prefix == 'rpp':
        return ModuleMeta('flag_riskpreferpre', '风险偏好预测', 'rpp', '风险偏好预测')   
    if prefix == 'pef':
        return ModuleMeta('flag_paypowerevafor', '用户购买能力预测', 'pef', '用户购买能力预测') 
    if prefix == 'pp':
        return ModuleMeta('flag_profilepopulation', '人口属性', 'pp', '人口属性') 
    if prefix == 'cof':
        return ModuleMeta('flag_cashonfsleep', '假睡眠识别', 'cof', '假睡眠识别')                           
    if prefix == 'afm':
        return ModuleMeta('flag_applyfeaturemix', '联合贷申请评估', 'afm', '联合贷申请评估')    
    if prefix == 'il':
        return ModuleMeta('flag_institutionloyal', '机构忠诚度', 'il', '机构忠诚度')      
    if prefix == 'aae':
        return ModuleMeta('flag_applyapprovalevaluate', '申请核批验证', 'aae', '申请核批验证')       
    if prefix == 'alfp':
        return ModuleMeta('flag_applyfeaturepro', '借贷意向衍生特征高级版', 'alfp', '借贷意向衍生特征高级版')         
    if prefix == 'aln':
        return ModuleMeta('flag_applyloannewsub', '申请意向细分版', 'aln', '申请意向细分版')           
    if prefix == 'atmaep':
        return ModuleMeta('flag_applytrendmixaepro', '申请趋势预测高级版A', 'atmaep', '申请趋势预测高级版A')    
    if prefix == 'atmalfp':
        return ModuleMeta(' flag_applytrendmixalfpro', '申请趋势预测高级版B', 'atmalfp', '申请趋势预测高级版B')    
    if prefix == 'atmalsp':
        return ModuleMeta('flag_applytrendmixalspro', '申请趋势预测高级版C', 'atmalsp', '申请趋势预测高级版C')    
    if prefix == 'atmqlp':
        return ModuleMeta('flag_applytrendmixqlpro', '申请趋势预测高级版D', 'atmqlp', '申请趋势预测高级版D')    
    if prefix == 'atmtlp':
        return ModuleMeta('flag_applytrendmixtlpro', '申请趋势预测高级版E', 'atmtlp', '申请趋势预测高级版E')   
    if prefix == 'aps':
        return ModuleMeta('flag_applypreferstable', '申请偏好稳定性验证', 'aps', '申请偏好稳定性验证')   
    
    return None


__all__ = ('get_module_meta', )


def get_file_encoding(file: str) -> str:
    """
    Get encoding of the file.

    :param file: absolute path of the file
    :return: file encoding

    :example:

    >>> get_file_encoding('C:/Users/liukai/PycharmProjects/hdfk/hdfk/utils/sys_util.py')
    'ascii
    """
    import chardet
    with open(file, 'rb') as fh:
        result = chardet.detect(fh.read(100))
    return result['encoding']


def get_file_encoding2(file: str) -> str:
    """
    Get encoding of the file.

    :param file: absolute path of the file
    :return: file encoding

    :example:

    >>> get_file_encoding2('C:/Users/liukai/PycharmProjects/hdfk/hdfk/utils/sys_util.py')
    'ascii'
    """
    from chardet.universaldetector import UniversalDetector

    detector = UniversalDetector()
    with open(file, 'rb') as fh:
        for line in fh.readlines():
            detector.feed(line)
            if detector.done:
                break
    detector.close()
    return detector.result['encoding']

def get_file_size(file_name: str) -> float:
    """
    Get system file size.

    :param file_name: file name
    :return: size with Mb
    """
    return os.path.getsize(file_name) / 1024 ** 2

def get_file_suffix(path: str) -> Tuple[str, str]:
    """
    Get file name and file extension.

    :param path: full file path
    :return: file name and file extension

    :example:

    >>> file_name, ext = get_file_suffix(r'C:/Users/liukai/PycharmProjects/hdfk/hdfk/utils/sys_util.py')
    >>> file_name
    'sys_util'
    >>> ext
    '.py'
    """
    p = Path(path)
    # return p.name, p.suffix
    return p.stem, p.suffix


def train_test_split(X: Union[pd.DataFrame, pd.Series], y: Union[pd.DataFrame, pd.Series], test_size: float = None,
                     train_size: float = None, shuffle: bool = True, stratify: np.ndarray = None, *,
                     random_state: int = None) -> Tuple:
    """
    Split dataframe or series into random train and test subsets. (wrap of sklearn `train_test_split` function)
    :param X: whole data
    :param y: whole label
    :param test_size: proportion of the dataset to include in the test split
    :param train_size: proportion of the dataset to include in the train split
    :param shuffle: Whether or not to shuffle the data before splitting. If shuffle=False, then stratify must be None.
    :param stratify: If not None, data is split in a stratified fashion, using this as the class labels.
    :param random_state: the seed used by the random number generator
    :return: List containing train-test split of inputs.
    """
    X_train, X_test, y_train, y_test = tt_split(X, y, test_size=test_size, train_size=train_size, shuffle=shuffle,
                                                stratify=stratify, random_state=random_state)

    logger.info(
        "Splitting original shape X={}, y={} into X_train={}, X_test={}, y_train={}, y_test={}..."
            .format(X.shape, y.shape, X_train.shape, X_test.shape, y_train.shape, y_test.shape)
    )
    return X_train, X_test, y_train, y_test

def del_nan(df: pd.DataFrame, nan_ratio_threshold: float = 0.95, inplace: bool = False) -> Union[None, pd.DataFrame]:
    """
    Delete the columns with too much nan values.

    :param df: data object
    :param nan_ratio_threshold: nan ratio threshold
    :param inplace: inplace change or not
    :return: data with reasonable nan values
    """
    assert isinstance(df, pd.DataFrame), "Input should be a pandas dataframe!"
    nan_series = df.isna().mean()
    for feature, ratio in nan_series.iteritems():
        passed = ratio <= nan_ratio_threshold
        logger.info(
            "{} Feature {} {} with nan ratio = {:.3f}..."
            .format('√' if passed else '×', repr(feature), 'passed' if passed else 'denied', ratio)
        )
    if inplace:
        drop_features = (nan_series > nan_ratio_threshold).index
        df.drop(columns=drop_features, inplace=True)
        logger.info('删除缺失率在{}以上的变量, 还剩余{}个候选变量'.format(nan_ratio_threshold, len(df.columns)))
        return None
    else:
        res = df.loc[:, nan_series.values <= nan_ratio_threshold].copy()
        logger.info('删除缺失率在{}以上的变量, 还剩余{}个候选变量'.format(nan_ratio_threshold, len(res.columns)))
        return res

def del_mode(df: pd.DataFrame, mode_ratio_threshold: float = 0.95, inplace=False) -> Union[None, pd.DataFrame]:
    """
    Delete the columns with too much mode values. (mode does not contain nan)

    :param df: data object
    :param mode_ratio_threshold: mode ratio threshold
    :param inplace: inplace change or not
    :return: data with reasonable mode values
    """
    assert isinstance(df, pd.DataFrame), "Input should be a pandas dataframe!"

    def col_mode_ratio(col: pd.Series) -> float:
        summary = col.value_counts()
        ratio = summary.iloc[0] / sum(summary) if len(summary) > 0 else 1.  # 分母不包含空值
        passed = ratio <= mode_ratio_threshold
        logger.info(
            "{} Feature {} {} with mode ratio = {:.3f}..."
            .format('√' if passed else '×', repr(col.name), 'passed' if passed else 'denied', ratio)
        )
        return ratio

    if inplace:
        df.drop(columns=(f for f in df.columns if col_mode_ratio(df[f]) > mode_ratio_threshold and 'flag_' not in f),
                inplace=True)
        logger.info('删除同值比例超过{}的变量后，还剩余{}个候选变量'.format(mode_ratio_threshold, len(df.columns)))
        return None
    else:
        res = df[(f for f in df.columns if col_mode_ratio(df[f]) <= mode_ratio_threshold or 'flag_' in f)].copy()
        logger.info('删除同值比例超过{}的变量后，还剩余{}个候选变量'.format(mode_ratio_threshold, len(res.columns)))
        return res

def del_cat(df: pd.DataFrame, cat_threshold: int = 10, inplace: bool = False) -> Union[None, pd.DataFrame]:
    """
    Delete the columns with too much category.

    :param df: data object
    :param cat_threshold: category count threshold
    :param inplace: inplace change or not
    :return: data with reasonable category count
    """

    def col_nunique(col: pd.Series) -> int:
        # make sure is slimed
        assert col.notna().values.all(), f"Column {col.name} should be filled with nan replacements: i.e. -99 or 'blank'!"
        cat_num = col.nunique(dropna=True)
        passed = cat_num <= cat_threshold
        logger.info(
            "{} Feature {} {} with {} unique values, exclude `np.nan`..."
            .format('√' if passed else '×', repr(col.name), 'passed' if passed else 'denied', cat_num)
        )
        return cat_num

    cat_features = [f for f in df.columns if is_categorical_dtype(df[f])]
    if inplace:
        df.drop(columns=(f for f in cat_features if col_nunique(df[f]) > cat_threshold), inplace=True)
        logger.info('删除类别型个数超过{}的变量后，还剩余{}个候选变量'.format(cat_threshold, len(df.columns)))
        return None
    else:
        df = df.drop(columns=(f for f in cat_features if col_nunique(df[f]) > cat_threshold), inplace=False).copy()
        logger.info('删除类别型个数超过{}的变量后，还剩余{}个候选变量'.format(cat_threshold, len(df.columns)))
        return df



def get_dummied(X: pd.DataFrame) -> pd.DataFrame:
    """"
    Get dummied data.

    :param X: data object
    :return: dummied data

    :example:

    >>> import pandas as pd
    >>> X = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=list('CD'))
    >>> get_dummied(X)
       A  B
    C  1  2
    D  3  4
    """
    # 此步必须在进行类别变量转换之前
    num_features, cat_features = positions_of(X, kind='num'), positions_of(X, kind='cat')
    X_num, X_cat = X[num_features], X[cat_features]
    if len(cat_features) == 0:
        logger.warning("No category feature in X, return original dataframe...")
        return X_num
    else:
        # 对类别型变量进行one-hot编码, 转化成dummy变量并生成原始变量到dummy变量的映射表
        logger.info("Found {} category features in X, applying dummy transform...".format(len(cat_features)))
        X_cat_dummied = pd.get_dummies(X_cat)
        res = X_num.join(X_cat_dummied)
        logger.info("After dummy transform, there are {} features...".format(len(res.columns)))
        return res

def subset(ls1, ls2) -> bool:
    """
    Decide whether ls1 is subset of ls2.

    :param ls1: first iterable list
    :param ls2: second iterable list
    :return: subset check result

    :example:
    >>> subset([1, 2, 3], [3, 2, 1])
    True
    """
    l1, l2 = len(ls1), len(ls2)
    if l1 == 0:
        return True
    if l2 == 0 or l2 < l1:
        return False

    return all(e in ls2 for e in ls1)

def get_hit_indices(X_raw: pd.DataFrame, final_flags: List[str]) -> np.ndarray:
    """
    Get sample hit indices.

    :param X_raw: raw data with all flag features
    :param final_flags: features flags
    :return: hit indices of all samples

    :example:

    >>> import pandas as pd
    >>> X = pd.DataFrame([[1, 0], [0, 1]], columns=list('AB'), index=list('CD'))
    >>> get_hit_indices(X, ['A', 'B'])
    array(['C', 'D'], dtype=object)
    """
    assert subset(final_flags, X_raw.columns), "X_raw should contain all chosen feature of X!"
    flags = X_raw[final_flags]
    hit_pos = np.sum(flags.values, axis=1) >= 1
    hit_num, tot_num, hit_ratio = np.sum(hit_pos), len(hit_pos), np.average(hit_pos)
    logger.info("Current data hit result: hits={}, total={}, ratio={}...".format(hit_num, tot_num, hit_ratio))
    indices = flags.index.values[hit_pos]
    return indices


from logging import getLogger
from typing import Union, Tuple

import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype, is_string_dtype, is_categorical_dtype, is_object_dtype


logger = getLogger(__name__)

def positions_of(df: pd.DataFrame, *, kind: str = 'nan') -> List[str]:
    """
    Get features of a specific kind.

    :param df: pandas data object
    :param kind: feature kind ('cat' or 'num' or 'nan')
    :return: feature list with specific type

    :example:
    >>> import pandas as pd
    >>> d = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=list('CD'))
    >>> features_of(d, kind='nan')
    []
    """
    assert isinstance(df, pd.DataFrame), "Input should be a pandas dataframe!"
    df_features = df.columns
    num_kinds = ('i', 'u', 'f', 'c')
    cat_kinds = ('b', 'O', 'S', 'U')
    if kind == 'nan':
        res = [f for f in df_features if df[f].isna().values.any()]
    elif kind == 'num':
        res = [f for f in df_features if df[f].dtype.kind in num_kinds]
    elif kind == 'cat':
        res = [f for f in df_features if df[f].dtype.kind in cat_kinds]
    else:
        raise ValueError(f"Unknown kind {kind} encountered, use 'cat', 'num' or 'nan' instead!")
    logger.info("Found {} {} features...".format(len(res), kind))
    return res


# DEPRECATED: float32 will lose precision
def slim_col_new(x: pd.Series, nan_replacements: Tuple[int, str] = (-99, 'blank'), inplace=False) -> pd.Series:
    num_nan, cat_nan = nan_replacements
    origin_type = x.dtype.name
    x_has_nan = x.isna().values.any()
    if is_numeric_dtype(x):
        if x_has_nan:
            if inplace:
                x.fillna(value=num_nan, inplace=inplace)
            else:
                x = x.fillna(value=num_nan, inplace=inplace)
        as_int = x.fillna(0).astype(np.int64)
        is_int = np.allclose(x, as_int)
        # is_int = True if -0.01 < sum(x - as_int) < 0.01 else False
        mn, mx = x.min(), x.max()
        if is_int:
            if mn >= 0:
                if mx < 255:
                    x = x.astype(np.uint8)
                elif mx < 65535:
                    x = x.astype(np.uint16)
                elif mx < 4294967295:
                    x = x.astype(np.uint32)
                else:
                    x = x.astype(np.uint64)
            else:
                if -128 < mn and mx < 127:
                    x = x.astype(np.int8)
                elif -32768 < mn and mx < 32767:
                    x = x.astype(np.int16)
                elif -2147483648 < mn and mx < 2147483647:
                    x = x.astype(np.int32)
                elif -9223372036854775808 < mn and mx < 9223372036854775807:
                    x = x.astype(np.int64)
        else:
            if np.finfo(np.float16).min < mn and mx < np.finfo(np.float16).max:
                x = x.astype(np.float16)
            elif np.finfo(np.float32).min < mn and mx < np.finfo(np.float32).max:
                x = x.astype(np.float32)
            elif np.finfo(np.float64).min < mn and mx < np.finfo(np.float64).max:
                x = x.astype(np.float64)
    # 若为分类型, 则填充为'blank'并将列类型设置为category
    elif is_string_dtype(x) or is_categorical_dtype(x) or is_object_dtype(x):
        if x_has_nan:
            if inplace:
                x.fillna(value=cat_nan, inplace=inplace)
            else:
                x = x.fillna(value=cat_nan, inplace=inplace)
        x = x.astype('category')
    logger.info(
        "Column {} data type changes from `{}` to `{}`..."
            .format(repr(x.name), origin_type, x.dtype.name)
    )
    return x


def slim_col(x: pd.Series, nan_replacements: Tuple[int, str] = (-99, 'blank'), inplace: bool = False) -> pd.Series:
    """
    Type fit function to slim a specific column.

    :param x: data column
    :param nan_replacements: column nan replacements, including numeric nan and category nan values, i.e. -99 or 'blank'
    :param inplace: inplace change or not (default is False)
    :return: slimed column
    """
    num_nan, cat_nan = nan_replacements
    origin_type = x.dtype.name
    x_has_nan = x.isna().values.any()
    if is_numeric_dtype(x):
        if x_has_nan:
            if inplace:
                x.fillna(value=num_nan, inplace=inplace)
            else:
                x = x.fillna(value=num_nan, inplace=inplace)
        as_int = x.fillna(0).astype(np.int64)
        is_int = np.allclose(x, as_int)
        mn, mx = x.min(), x.max()
        if is_int:
            if mn >= 0:
                if mx < 255:
                    x = x.astype(np.uint8)
                elif mx < 65535:
                    x = x.astype(np.uint16)
                elif mx < 4294967295:
                    x = x.astype(np.uint32)
                else:
                    x = x.astype(np.uint64)
            else:
                if -128 < mn and mx < 127:
                    x = x.astype(np.int8)
                elif -32768 < mn and mx < 32767:
                    x = x.astype(np.int16)
                elif -2147483648 < mn and mx < 2147483647:
                    x = x.astype(np.int32)
                elif -9223372036854775808 < mn and mx < 9223372036854775807:
                    x = x.astype(np.int64)
                else:
                    raise OverflowError("Integer overflow encountered!")
    # 若为分类型, 则填充为'blank'并将列类型设置为category
    elif is_string_dtype(x) or is_categorical_dtype(x) or is_object_dtype(x):
        if x_has_nan:
            if inplace:
                x.fillna(value=cat_nan, inplace=inplace)
            else:
                x = x.fillna(value=cat_nan, inplace=inplace)
        x = x.astype('category')

    logger.info(
        "Column {} data type changes from `{}` to `{}`..."
        .format(repr(x.name), origin_type, x.dtype.name)
    )
    return x


def slim_assign(df: Union[pd.DataFrame, pd.Series], nan_replacements: Tuple[int, str] = (-99, 'blank'),
                inplace: bool = False) -> Union[pd.DataFrame, pd.Series]:
    """
    Type fit function to slim the whole data object.

    :param df: pandas data object, can be DataFrame or Series
    :param nan_replacements: column nan replacements, including numeric nan and category nan values, i.e. -99 or 'blank'
    :param inplace: inplace change or not (default is False)
    :return: slimed data

    :example:
    >>> import numpy as np
    >>> import random
    >>> np.random.seed(7)
    >>> random.seed(7)
    >>> df = pd.DataFrame(np.random.random((2, 2)) * 2 + 10)
    >>> df.columns = [f"f{i}" for i in range(len(df.columns))]
    >>> df = df.assign(cat=random.choices(['a', 'b', np.nan, 'c'], k=2))
    >>> slim_assign(df)
                    f0               f1 cat
    0  10.152616578748  11.559837584480   b
    1  10.876818462882  11.446930355662   a
    """
    if isinstance(df, pd.Series):
        res = slim_col(df, nan_replacements=nan_replacements, inplace=inplace)
    elif isinstance(df, pd.DataFrame):
        # FIXED: assign() keywords must be strings
        assert all(isinstance(f, str) for f in df.columns), "`slim_assign` must use string as keywords, check columns!"
        kwargs = {
            f: slim_col(df[f], nan_replacements=nan_replacements, inplace=inplace)
            for f in df.columns
        }
        res = df.assign(**kwargs)
    else:
        raise TypeError(f"Unsupported type({type(df)}) for slim operation!")
    return res


def slim_df(df: Union[pd.DataFrame, pd.Series], nan_replacements: Tuple[int, str] = (-99, 'blank'),
            inplace: bool = False) -> Union[pd.DataFrame, pd.Series]:
    """
    Type fit function to slim the whole data object.

    :param df: pandas data object, can be DataFrame or Series
    :param nan_replacements: column nan replacements, including numeric nan and category nan values, i.e. -99 or 'blank'
    :param inplace: inplace change or not (default is False)
    :return: slimed data

    :example:
    >>> import numpy as np
    >>> import random
    >>> np.random.seed(7)
    >>> random.seed(7)
    >>> df = pd.DataFrame(np.random.random((2, 2)) * 2 + 10)
    >>> df.columns = [f"f{i}" for i in range(len(df.columns))]
    >>> df = df.assign(cat=random.choices(['a', 'b', np.nan, 'c'], k=2))
    >>> slim_df(df)
                    f0               f1 cat
    0  10.152616578748  11.559837584480   b
    1  10.876818462882  11.446930355662   a
    """
    if isinstance(df, pd.Series):
        res = slim_col(df, nan_replacements=nan_replacements, inplace=inplace)
    elif isinstance(df, pd.DataFrame):
        res = pd.DataFrame(
            {f: slim_col(df[f], nan_replacements=nan_replacements, inplace=inplace) for f in
             df.columns}, index=df.index)
    else:
        raise TypeError(f"Unsupported type({type(df)}) for slim operation!")
    return res


# 填充缺失值 + 数据瘦身
def slim(df: Union[pd.DataFrame, pd.Series], nan_replacements: Tuple[int, str] = (-99, 'blank'),
         inplace: bool = False) -> Union[pd.DataFrame, pd.Series]:
    """
    Type fit function to slim the whole data object.

    :param df: pandas data object, can be DataFrame or Series
    :param nan_replacements: column nan replacements, including numeric nan and category nan values, i.e. -99 or 'blank'
    :param inplace: inplace change or not (default is False)
    :return: slimed data

    :example:
    >>> import numpy as np
    >>> import random
    >>> np.random.seed(7)
    >>> random.seed(7)
    >>> df = pd.DataFrame(np.random.random((2, 2)) * 2 + 10)
    >>> df.columns = [f"f{i}" for i in range(len(df.columns))]
    >>> df = df.assign(cat=random.choices(['a', 'b', np.nan, 'c'], k=2))
    >>> slim(df)
                    f0               f1 cat
    0  10.152616578748  11.559837584480   b
    1  10.876818462882  11.446930355662   a
    """
    if isinstance(df, pd.Series):
        res = slim_col(df, nan_replacements=nan_replacements, inplace=inplace)
    elif isinstance(df, pd.DataFrame):
        res = pd.DataFrame(
            {f: slim_col(df[f], nan_replacements=nan_replacements, inplace=inplace) for f in
             df.columns}, index=df.index)
    else:
        raise TypeError(f"Unsupported type({type(df)}) for slim operation!")
    return res


__all__ = ('slim', 'slim_assign', 'slim_df', 'slim_col')

import ntpath
import os
import re
from logging import getLogger
from typing import List, Union, Tuple

import numpy as np
import pandas as pd


logger = getLogger(__name__)

NONSENSE_COLS = ['apply_addr', 'home_addr', 'per_addr', 'biz_addr', 'oth_addr', 'city', 'cityCode', 'vehicle_id', 'car_code', 'driver_number', 'type_vehicle_id', 'biz_name', 'reg_num', 'org_num', 'biz_workfor', 'KeyNo', 'biz_regnum', 'biz_Legalperson', 'event', 'af_swift_number', 'apply_type', 'device_type', 'device_id', 'house_type', 'CommName', 'area', 'room_type', 'floor_building', 'builted_time', 'floor', 'totalfloor', 'toward', 'special_factors', 'hall', 'toilet', 'house_number', 'position', 'renovation', 'id', 'cell', 'mail', 'name', 'bank_card1', 'user_date', 'user_time', 'sl_user_date', 'group', 'tel', 'time_range', 'beginTime', 'endTime', 'custApiCode', 'month', 'page', 'range', 'apply_date', 'other_var1', 'other_var2', 'other_var3', 'other_var4', 'other_var5', 'city_id', 'taskid', 'IP', 'longitude', 'latitude', 'no', 'days', 'remainder', 'link_name1', 'link_id1', 'link_cell1', 'link_name2', 'link_id2', 'link_cell2', 'link_name3', 'link_id3', 'link_cell3', 'gid', 'ExtData_CAR_PRICE', 'ExtData_MARRY_STATUS', 'ExtData_HOUSE_PROPERTY', 'ExtData_SEX', 'ExtData_HUKOU', 'ExtData_age', 'ExtData_loan_reject_num', 'ExtData_rh_max_card_bal_per_m12', 'ExtData_rh_max_loan_bal', 'ExtData_rh_tot_card_amt', 'id_photo', 'daily_photo', 'other_var6', 'other_var7', 'other_var8', 'other_var9', 'other_var10', 'other_var11', 'other_var12', 'other_var13', 'other_var14', 'other_var15', 'other_var16', 'other_var17', 'other_var18', 'other_var19', 'other_var20', 'other_var21', 'other_var22', 'other_var23', 'other_var24', 'other_var25', 'other_var26', 'other_var27', 'other_var28', 'other_var29', 'other_var30', 'start_date', 'bank_photo', 'passDate', 'cus_group', 'vin', 'carName', 'trimId', 'mileage', 'buyCarDate', 'cityId', 'provinceId', 'colorId', 'address', 'idCard', 'bank_id', 'addrType', 'coordinate', 'coordinateType', 'loan_type', 'name2', 'qq', 'passport_number', 'tel_home', 'tel_biz', 'id_type', 'bank_card2', 'linkman_cell','cus_username','swift_number','code']
THREE_ELEMENT_DERIVE = ['id_province', 'id_city_level', 'cell_province', 'cell_city_level', 'cell_comp', 'gender',
                        'apply_age', 'id_region', 'cell_region', 'is_id_capital', 'is_cell_capital', 'person',
                        'level_cell_city', 'level_id_city', 'level_id_cell', 'is_province', 'is_city', 'province_city',
                        'id_city_level_blank', 'id_city_level_一线', 'id_city_level_三线', 'id_city_level_二线', 'id_city_level_五线', 'id_city_level_其他', 'id_city_level_四线',
 'cell_city_level_blank', 'cell_city_level_一线', 'cell_city_level_三线', 'cell_city_level_二线', 'cell_city_level_五线', 'cell_city_level_其他', 'cell_city_level_四线',
 'cell_comp_blank', 'cell_comp_电信', 'cell_comp_移动', 'cell_comp_联通', 'gender_blank','gender_女', 'gender_男',
 'id_region_blank', 'id_region_东北地区', 'id_region_华东地区', 'id_region_华中地区', 'id_region_华北地区', 'id_region_华南地区', 'id_region_西北地区', 'id_region_西南地区',
 'cell_region_blank', 'cell_region_东北地区', 'cell_region_华东地区', 'cell_region_华中地区', 'cell_region_华北地区', 'cell_region_华南地区', 'cell_region_西北地区', 'cell_region_西南地区',
 'person_blank','person_中年女性', 'person_中年男性', 'person_青年女性', 'person_青年男性',
 'level_id_cell_blank', 'level_id_cell_向上迁移', 'level_id_cell_向下迁移', 'level_id_cell_平行迁移',
 'is_province_blank','is_province_一致', 'is_province_不一致', 'is_city_blank','is_city_一致', 'is_city_不一致', 'province_city_blank','province_city_城市与省份均迁移', 'province_city_城市迁移', 'province_city_未迁移', 'province_city_省份迁移']

def get_headers(data: str, with_name: bool = True) -> Union[int, Tuple[int, list]]:
    """
    Detect whether a dataframe should have one or two headers.

    :param data: input data, should be csv or excel
    :param with_name: if with name is True, return header names also
    :return: 1 if one header else 2
    """
    _, file_type = get_file_suffix(data)
    if file_type == '.csv':
        read_func = pd.read_csv
        encoding = get_file_encoding(data)
    elif file_type in ('.xlsx', '.xls'):
        read_func = pd.read_excel
        # for excel encoding, it should be 'UTF-8'
        encoding = 'UTF-8'
    else:
        raise ValueError("Input data should be a csv/xlsx file!")

    with open(data, 'rb') as f:
        tmp_df = read_func(f, header=[0], nrows=3, encoding=encoding)

    header_names = tmp_df.columns.values.tolist()
    zh_cn_regex = re.compile(ZH_CN_REGEX)

    # 若第一行全是中文
    if all(zh_cn_regex.search(f) for f in tmp_df.columns):
        headers = 1
    # 若第一行不全是中文, 且用一行header读取, 数据类型全是object且原数据第二行有中文
    elif np.all(tmp_df.dtypes == 'object') and any(zh_cn_regex.search(f) for f in tmp_df.iloc[0, :].values):
        headers = 2
    # 用一行header读取, 数据类型不全是object且原数据第二行没有中文
    else:
        headers = 1

    if with_name:
        return headers, header_names
    return headers



def read_from(data: str, nonsense_cols: List[str] = NONSENSE_COLS, skip_cols: List[str] = None,
              verbose: bool = True) -> pd.DataFrame:
    """
    Read data intelligently. (only read in the possible models features, nonsense columns are omitted!)

    :param data: input csv/excel data
    :param nonsense_cols: not useful columns
    :param skip_cols: skip columns, supplement to nonsense_cols
    :param verbose: show extra info
    :return: pandas dataframe object

    :example:
    >>> _ = read_from(r'C:/Users/liukai/PycharmProjects/hdfk/data/input/建模数据匹配结果.csv')
    Time elapse of function call `read_from`: 13.197 s.
    """
    if not os.path.exists(data):
        raise FileNotFoundError("Invalid file input!")
    os.chmod(data, 0o777)
    data_name, file_type = get_file_suffix(data)
    file_size = get_file_size(data)
    if verbose:
        logger.info("Input file({}) has {:.3f} M.".format(data_name, file_size))

    # detect encoding and reading function
    if file_type == '.csv':
        read_func = pd.read_csv
        encoding = get_file_encoding(data)
    elif file_type in ('.xlsx', '.xls'):
        read_func = pd.read_excel
        # for excel encoding, it should be 'UTF-8'
        encoding = 'utf-8'
    else:
        raise ValueError("Input data should be a csv/xlsx file!")

    # detect header
    headers, header_names = get_headers(data)
    if headers == 2:
        header = 0
        skiprows = [1]
    else:
        header = 0
        skiprows = None

    # if has en/cn index
    index_col = 'cus_num' if 'cus_num' in header_names else '客户数据编号' if '客户数据编号' in header_names else None
    if nonsense_cols is None:
        nonsense_cols = []
    if verbose and len(nonsense_cols) > 0:
        logger.warning("Nonsense columns founded, result will skip all these columns.")
        # print("Nonsense columns founded, result will skip all these columns.")

    # ['id', 'cell', 'name', 'NAME_KEY', 'nas_month', 'num', 'user_date', 'sl_user_date', 'swift_number', 'cus_username', 'nasrdw_mobile_phone_jm', 'customer_id_jm', 'code', '客户编号', '身份证号', '手机号', '姓名', '申请日期', '特殊名单申请日期']
    if skip_cols is None:
        skip_cols = []
    use_cols = [col for col in header_names if col not in nonsense_cols + skip_cols]
    read_options = {
        'header': header,
        'skiprows': skiprows,
        'index_col': index_col,
        'usecols': use_cols,
        'encoding': encoding
    }

    if file_size < 50:
        with open(data, 'rb') as f:
            res = read_func(f, **read_options)
    else:
        read_options['chunksize'] = 10000
        with open(data, 'rb') as f:
            dfs = read_func(f, **read_options)
            res = pd.concat(df for df in dfs)
    return res

def get_flag(feature: str) -> str:
    """
    Get feature's flag.

    :param feature: feature name
    :return: flag of this feature
    :example:

    >>> get_flag('cons_m3_RYBH_pay')
    'flag_consumption_c'
    >>> get_flag('flag_consumption')
    'flag_consumption'
    """
    if feature.startswith('flag_'):
        return feature
    module = feature.split('_')[0]
    module_meta = get_module_meta(module)
    if module_meta is not None:
        return module_meta.flag_name
    else:
        raise ValueError(f"Unknown feature {repr(feature)} with no flag module!")

def get_flags(features: Sequence[str]) -> List[str]:
    """
    Get features' flags.

    :param features: list of feature names
    :return: flags of these features

    :example:

    >>> get_flags(['cons_m3_RYBH_pay', 'flag_consumption'])
    ['flag_consumption_c', 'flag_consumption']
    """
    flags = list(set(get_flag(f) for f in features if f not in THREE_ELEMENT_DERIVE))
    logger.info("Current features' corresponding flags are {}".format(flags))
    return flags


def get_sorted_feature(origin_features: Union[pd.Index, np.ndarray, list], importances: np.ndarray,
                       threshold: Union[int, float] = None, operator: str = '>') -> List[str]:
    """
    Sort feature by feature importance, if threshold provided, return the sorted feature only satisfying the threshold.

    :param origin_features: original features, extracted from dataframe
    :param importances: feature importance, obtain by model or rules
    :param threshold: flexible number to decide whether to accept a feature
    :param operator: greater than threshold or not, default is '>'
    :return: sorted feature, also satisfying the threshold

    :example:

    >>> import numpy as np
    >>> features = ['f1', 'f2', 'f3', 'f4']
    >>> importances = np.array([0.4, 1.2, -0.2, 1.0])
    >>> get_sorted_feature(features, importances, threshold=0)
    ['f2', 'f4', 'f1']
    """
    from operator import le, lt, eq, ne, ge, gt
    operator_map = {
        '<=': le,
        '<': lt,
        '=': eq,
        '!=': ne,
        '>=': ge,
        '>': gt
    }
    if threshold is None:
        feature_importance = {f: imp for f, imp in zip(origin_features, importances)}
    else:
        feature_importance = {f: imp for f, imp in zip(origin_features, importances) if operator_map[operator](imp, threshold)}
    res = sorted(feature_importance.keys(), key=feature_importance.__getitem__, reverse=True)
    logger.info("Number of valid feature(importance {} {}) is {}".format(operator, threshold, len(res)))
    return res
