import pandas
def missing_rate(df: pandas.core.frame.DataFrame) -> dict():
    """
    Calculate Missing Rate
    -----
    This function is used to calculate the missing rate of the given features.
    
    Params
    -----
    df: pandas.core.frame.DataFrame
        A DataFrame object contains with data.

    Returns
    -----
    missing_rate_dict: dict{str: float}
        The dictionary of the missing rate of each feature. {feature_name:missing_rate}
    
    Examples
    -----
    missing_rate_dict = missing_rate(df[var_list])
    var_list = [var for var in var_list if missing_rate_dict[var] <= missing_threshold]
    """
    N = df.shape[0]
    missing_rate_dict = {var: df[var].isna().sum() / N for var in df.columns.values}
    
    return missing_rate_dict

import pandas
def identical_rate(df: pandas.core.frame.DataFrame) -> dict():
    """
    Calculate Identical Rate
    -----
    This function is used to calculate the identical rate of the given features.
    
    Params
    -----
    df: pandas.core.frame.DataFrame
        A DataFrame object contains with data.

    Returns
    -----
    identical_rate_dict: dict{str: float}
        The dictionary of the identical rate of each feature. {feature_name:identical_rate}
    
    Examples
    -----
    identical_rate_dict = identical_rate(df[var_list])
    var_list = [var for var in var_list if identical_rate_dict[var] <= identical_threshold]
    """
    N = df.shape[0]
    identical_rate_dict = {var: df[var].value_counts().max() / N for var in df.columns.values}
    
    return identical_rate_dict

import numpy
import pandas
def WOE(
    x: list or pandas.core.series.Series or numpy.ndarray, 
    y: list or pandas.core.series.Series or numpy.ndarray
) -> float:
    """
    Calculate Weight of Evidence
    -----
    This function is used to calculate the weight of evidence of the given feature x with given y label.
    
    Params
    -----
    x: list or pandas.core.series.Series or numpy.ndarray
        An orderable object contains with x data.
    y: list or pandas.core.series.Series or numpy.ndarray
        An orderable object contains with y label.

    Returns
    -----
    woe: pandas.core.series.Series
        The the weight of evidence of the feature.
    data_bad_ratio: pandas.core.series.Series
        The bad ratio: bad ratio in each category / total sample space.
    data_good_ratio: pandas.core.series.Series
        The good ratio: good ratio in each category / total sample space.
    
    Examples
    -----
    
    """
    # validation
    assert len(x) == len(y), "The given x and y have different length"
    assert type(x) == set, "The given x is an instance of set type, which is unorderable."
    assert type(y) == set, "The given y is an instance of set type, which is unorderable."
    
    bad_sum = y.sum()
    good_sum = len(y) - bad_sum
    df = pandas.DataFrame({'x':x,'y':y})
    
    # calculate woe
    data = pandas.DataFrame({'total':df.groupby('x').x.count(), 'bad':df.groupby('x').y.sum()})
    data['good'] = data.total - data.bad
    data = data.replace(0, 0.9)
    data.bad = data.bad / bad_sum
    data.good = data.good / good_sum
    woe = numpy.log(data.bad / data.good)
    
    return woe, data.bad, data.good

import numpy
import pandas
def IV(
    x: list or pandas.core.series.Series or numpy.ndarray, 
    y: list or pandas.core.series.Series or numpy.ndarray,
    return_woe: bool = False
) -> float:
    """
    Calculate Information Value
    -----
    This function is used to calculate the information value of the given feature x with given y label.
    
    Params
    -----
    x: list or pandas.core.series.Series or numpy.ndarray
        An orderable object contains with x data.
    y: list or pandas.core.series.Series or numpy.ndarray
        An orderable object contains with y label.
    return_woe: bool
        Whether to return the detailed weight of evidence values of each category in the feature.

    Returns
    -----
    information_value: float
        The the information value of the feature.
    woe: pandas.core.series.Series
        The weight of evidence value of each category.
    
    Examples
    -----
    
    """
    # validation
    assert len(x) == len(y), "The given x and y have different length"
    assert type(x) == set, "The given x is an instance of set type, which is unorderable."
    assert type(y) == set, "The given y is an instance of set type, which is unorderable."
    
    # calculate woe & iv
    woe, bad_ratio, good_ratio = WOE(x, y)
    iv = ((bad_ratio-good_ratio)*woe).sum()
    
    if return_woe:
        return iv, woe
    else:
        return iv
    
import pandas
from tqdm import tqdm
def information_value(df: pandas.core.frame.DataFrame, y_label: str, if_pre_binning: bool = False, max_numerical_bins: int = 20, max_categorical_bins: int = 100) -> dict():
    """
    Calculate Information Value
    -----
    This function is used to calculate the information value of the given features with given y label.
    
    Params
    -----
    df: pandas.core.frame.DataFrame
        A DataFrame object contains with data.
    y_label: str
        The name of the y label.
    if_pre_binning: bool
        Whether to apply pre-binning method. Default is False.
    max_numerical_bins: int
        The maximum number of bins on numerical values. Default is 20.
    max_categorical_bins: int
        The maximum number of bins on categorical values. Default is 100.

    Returns
    -----
    information_value_dict: dict{str: float}
        The dictionary of the information value of each feature. {feature_name:information_value}
    
    Examples
    -----
    information_value_dict = information_value(df[var_list + [y_label]], y_label)
    var_list = [var for var in var_list if information_value_dict[var] >= information_value_threshold]
    """
    if not if_pre_binning:
        information_value_dict = {var: IV(df[var], df[y_label]) for var in tqdm(df.columns) if var != y_label}
    else:
        information_value_dict = {}
        features = df.columns.tolist()
        features.remove(y_label)
        for var in tqdm(features):
            temp = df[var]
            
            # categorical value
            if temp.dtype == object:
                pass

            # numerical value
            if temp.dtype in (float, int):
                var_bin = pre_binning(temp, max_numerical_bins)
                temp = pandas.cut(temp, bins=var_bin)
                
            information_value_dict[var] = IV(temp, df[y_label])
    
    return information_value_dict

import pandas
import time
def variable_filter(df: pandas.core.frame.DataFrame, y: str, x: str or list = None, time_column: str = None, if_pre_binning: bool = False,
                    iv_threshold: float = 0.02, missing_threshold: float = 0.95, identical_threshold: float = 0.95,
                    correlation_threshold: float = 0.7,
                    timing_sequencial_psi_threshold: float = 1e-4, max_numerical_bins: int = 20, max_categorical_bins: int = 100,
                    remove_vars: list = [], preserve_vars: list = []) -> pandas.core.frame.DataFrame:
    """
    Variable Filter
    -----
    This function is used to filter variables based on specific conditions, including iv, missing rate, identical rate etc. 
    
    Params
    -----
    df: pandas.core.frame.DataFrame
        A DataFrame object contains with data. Including x (feature/predictor) and y (label/response) variables.
    y: str
        The name of y variable.
    x: str or list
        The name(s) of x variables. Default is None. If x is None, then all variables except y are considered as x variables.
    time_column: str
        The column name of the time strings. Default is None.
    if_pre_binning: bool
        Whether to apply pre-binning method. Default is False.
    iv_threshold: float
        The threshold of information value that each variable should have. Default 0.02
    missing_threshold: float
        The threshold of missing rate that each variable should have. Default 0.95
    identical_threshold: float
        The threshold of identical rate that each variable should have, Default 0.95
    correlation_threshold: float
        The threshold of the correlation value between each feature pair. Default 0.7
    timing_sequencial_psi_threshold: float
        The threshold of the minimum psi. Default is 0.0001.
    max_numerical_bins: int
        The maximum binning number of the numerical data. Default is 20.
    max_categorical_bins: int
        The maximum binning number of the categorical data. Default is 100.
    remove_vars: list
        The list of variables' names that to be removed compulsory.
    preserve_vars: list
        The list of variables' names that to be preserved compulsory.
        
    Examples
    -----
    filtered_data = scorecardpy.variable_filter(dataframe, y='flagy')
    """
    start_time = time.time()
    print('[INFO] filtering variables ...')
    
    # Get variables set
    var_set = set(df.columns.tolist())
    
    # validation
    assert y in var_set, "The given y " + y + " is not a valid column in the given DataFrame"
    if isinstance(x, str):
        x = [x]
        assert not x.difference(var_set), "The given x contains invalid variable(s) of the given DataFrame."
    assert y not in remove_vars, "The given y is a variable in remove list."
    assert y not in preserve_vars, "The given y is a variable in preserve list."
    
    # remove variables
    remove_vars.append(time_column)
    var_set.remove(y)
    var_set = var_set.difference(set(remove_vars))
    
    # select x
    if x:
        var_set = set(x).intersection(var_set)
    
    # missing rate
    if missing_threshold > 0:
        missing_rate_dict = missing_rate(df[var_set])
        var_set = {var for var in var_set if missing_rate_dict[var] <= missing_threshold}
    
    # identical rate
    if identical_threshold > 0:
        identical_rate_dict = identical_rate(df[var_set])
        var_set = {var for var in var_set if identical_rate_dict[var] <= identical_threshold}
    
    # information value
    if iv_threshold > 0:
        iv_dict = information_value(df[list(var_set) + [y]], y, if_pre_binning=if_pre_binning)
        var_set = {var for var in var_set if iv_dict[var] >= iv_threshold}
    
    # correlation value
    if correlation_threshold > 0:
        # if not calcualted iv
        if iv_threshold == 0:
            iv_dict = information_value(df[list(var_set) + [y]], y, if_pre_binning=if_pre_binning)
        iv_dict = {feature:iv for feature, iv in iv_dict.items() if feature in var_set}
        remove_var_set = correlation_filter(df[list(var_set)], iv_dict, correlation_threshold)
        var_set = var_set - remove_var_set
        
    # timing sequencial psi
    if time_column and timing_sequencial_psi_threshold > 0:
        assert time_column in df, "The provided time column is not a valid column name in the provided dataframe."
        psi_dict = timing_sequencial_psi(df, time_column, y, x, dict_only=True)
        var_set = [var for var in psi_dict if psi_dict[var] <= timing_sequencial_psi_threshold]
    
    # preserved variables
    var_set = var_set.union(set(preserve_vars).intersection(set(df.columns.tolist())))
    
    print('Variable filtered on {} rows and {} columns in {} \n{} variables are removed'.format(df.shape[0], df.shape[1], time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)), df.shape[1]-len(var_set)-1))
    
    var_set.add(y)
    return df[var_set]



#=========================================================================================================================


import pandas
from tqdm import tqdm

def correlation_filter(X: pandas.core.frame.DataFrame, iv_dict: dict, correlation_threshold: float = 0.9):
    """
    The correlation filter to eliminate the feature pairs with correlation value larger than the given threshold.
    ---
    1. start with the feature with smallest IV. 
    2. Calculate the correlation with any others, remove it if there is any correlation value larger than the threshold.
    3. Repeat 1 & 2 to apply the procedure on all features.
    
    Parameters:
    ----
    X: pandas.core.frame.DataFrame
        A DataFrame object contains with feature/predictor variables.
    iv_dict: dict
        The dictionary of the information value to each feature.
    correlation_threshold: float
        The threshold of the correlation value between each feature pair. Default 0.9
        
    Returns:
    ----
    remove_var_set: set
        The removed variable set after the filtering procedure.
    
    Example:
    ----
    # if not calcualted iv
    if iv_threshold == 0:
        iv_dict = information_value(df[list(var_set) + [y]], y, if_pre_binning=if_pre_binning)
    remove_var_set = correlation_filter(df[list(var_set) + [y]], y, iv_dict, correlation_threshold)
    var_set = var_set - remove_var_set
    """
    assert set(X.columns.tolist()).difference(set(iv_dict.keys())) == {} and set(iv_dict.keys()).difference(set(X.columns.tolist())) == {}, "The provided iv dictionary and provided features are not matched."
    
    # numerical features only
    iv_dict = {feature:iv for feature, iv in iv_dict.items() if X[feature].dtype in (float, int)}
    X = X[list(iv_dict)]
    
    remove_var_set = set()
    pbar = tqdm(total=len(iv_dict))
    pbar.set_description("Correlation Filtering")
    while len(iv_dict) >= 2:
        pbar.update(1)
        min_feature_name = min(iv_dict, key=lambda x:iv_dict[x])
        iv_dict.pop(min_feature_name)
        min_feature = X[min_feature_name]
        for feature in iv_dict:
            corr = abs(min_feature.corr(X[feature]))
            if corr > correlation_threshold:
                remove_var_set.add(min_feature_name)
                break
    
    return remove_var_set


#=========================================================================================================================

import numpy
import pandas

def pre_binning(data: pandas.core.series.Series, max_numerical_bins: int = 20, max_categorical_bins: int = 100):
    """
    Apply binning method to the given feature with given maximal bins.
    """
    # categorical value, Todo
    if data.dtype == object:
        return None
    
    # numerical value
    if data.dtype in (float, int):
        _, var_bin = pandas.qcut(data, max_numerical_bins, precision = 0, labels=None, retbins=True, duplicates='drop')
        var_bin = list(var_bin)
        var_bin.sort()
        return var_bin
    
    return None
    

import numpy
import pandas

from tqdm import tqdm

def calculate_psi(data1: pandas.core.series.Series, data2: pandas.core.series.Series, max_numerical_bins: int = 20, max_categorical_bins: int = 100):
    """
    This function is used to calculate the psi of two given distributions. Binning with equal quantile.
    -----
    PSI = (a-b)*log(a/b)

    Parameters:
        data1: pandas.core.series.Series
            the first distribution
        data2: pandas.core.series.Series
            the second distribution
        max_numerical_bins: int
            The maximum binning number of the numerical data. Default is 20.
        max_categorical_bins: int
            The maximum binning number of the categorical data. Default is 100.

    Return:
        psi: float
            The psi value of the two distributions.
    """
    assert data1.dtype == data2.dtype, "The two provided series have different dtypes, which is invalid for psi calculation."

    # categorical value
    if data1.dtype == object and data2.dtype == object:
        assert len(set(data1.unique()).union(set(data2.unique()))) > max_categorical_bins, "The provided data is categorical value and has more than the maximal bins number limitation. Please apply preprocessing methods to reduce the category number."
 
    # numerical value
    elif data1.dtype in (float, int) and data2.dtype in (float, int):
        var_bin = pre_binning(data1, max_numerical_bins)
        data1 = pandas.cut(data1, bins=var_bin)
        data2 = pandas.cut(data2, bins=var_bin)

    else:
        raise "The provided data has invalid dtype."

    data1_temp = data1.value_counts(normalize=True)
    data2_temp = data2.value_counts(normalize=True)
    count_df = pandas.merge(data1_temp, data2_temp, left_index=True, right_index=True, how='outer').fillna(0).replace(0, 1e-9)
    psi = ((count_df.iloc[:,1] - count_df.iloc[:,0]) * numpy.log(count_df.iloc[:,1] / count_df.iloc[:,0])).sum()

    return psi

def timing_sequencial_psi(data: pandas.core.frame.DataFrame, time_feature: str, y_label: str = None, X_features: list = None, threshold: float = 0.0001, dict_only: bool = False, max_numerical_bins: int = 20, max_categorical_bins: int = 100):
    """
    This method is designed to calculate the psi over the timing sequencial split.
    ---
    Parameters:
        data: pandas.core.frame.DataFrame
            The provided dataset.
        time_feature: str
            The feature string of the time feature.
        y_label: str
            The name of y label. Default is None.
        X_features: list
            The names of under filtering features. Default is None.
        threshold: float
            The threshold of the minimum psi. Default is 0.0001.
        dict_only: bool
            If only returns the dictionary. Default False. If True, the threshold is masked.
        max_numerical_bins: int
            The maximum binning number of the numerical data. Default is 20.
        max_categorical_bins: int
            The maximum binning number of the categorical data. Default is 100.
        
    Returns:
        data: pandas.core.frame.DataFrame
            The dataset after filtering out the low psi features.
        psi_dict: dict{str : float}
            The dictionary of the psi value to each provided feature.
            
    Examples:
    ---
    data_total = pd.read_csv("./data_als_derive.csv",sep=',',encoding='utf-8')
    data_out, psi_dict = timing_sequencial_psi(data_total, 'user_date', 'flagy', ['tl_cell_t5_nbank_num'], threshold=0.0001)
    """
    assert time_feature in data.columns, "The provided time feature name is not included in the provided dataset."
    assert len(X_features) and not set(X_features).difference(data.columns), "The provided X feature names has invalid feature name."
    assert y_label and y_label not in X_features, "The provided y label exists in the X features."
    
    if X_features == None:
        X_features = data.columns.tolist()
        X_features.remove(time_feature)
        if y_label: X_features.remove(y_label)
    
    date = pandas.to_datetime(data[time_feature])
    
    middle = pandas.to_datetime('%.0f' % ((date.min().value + date.max().value) / 2 / 1e9), unit='s')
    split = pandas.to_datetime(date) > middle
    
    psi_dict = {feature:calculate_psi(data[feature].iloc[~split.values], data[feature].iloc[split.values], max_numerical_bins=max_numerical_bins, max_categorical_bins=max_categorical_bins) for feature in tqdm(X_features)}
    
    if dict_only:
        return psi_dict
    
    drop_features = [feature for feature, psi in psi_dict.items() if psi > threshold]
    data = data.drop(columns=drop_features)
    
    return data, psi_dict


#=========================================================================================================================





import json
import numpy
def transform_category_to_numeric(df: pandas.core.frame.DataFrame, y_label: str, mapping_dict_path: str = None):
    """
    Transform the categorical values to the numerical values.
    -----
    This function is used to transform the categorical values to the numerical values. 
        The priority mapping is based on the mapping dictonary.
    For the categorical values that are not appeared from the mapping dictionary, 
        the values are sorted based on the bad rate in ascending order and assinged the
        order to the category.
        
    Params
    -----
    df: pandas.core.frame.DataFrame
        The DataFrame of the whole dataset, including the X and Y.
        
    y_label: str
        The label string of y.
    
    mapping_dict_path: str
        The file path of the mapping dictionary file. The dicitonary is saved as json.
        
    Returns
    -----
    X_transformed: pandas.core.frame.DataFrame
        The DataFrame that is applied the transformation.
        
    transform_rule: dict{var_name:{cat:value}}
        The transform method (rule) of the newly transformed variables.
    
    Examples
    -----
    dt_s = pd.read_csv('data.csv')
    y_label = "flagy"
    X_transformed, transform_rule = transform_category_to_numeric(dt_s, y_label, "./类别映射关系.json")
    
    """
    assert y_label in df.columns, "The name of provided y label is not in the DataFrame."
    if (df.dtypes == "object").sum() == 0:
        print("There is no categorical variables in this dataset.")
        return df, None
    
    # get mapping dict
    mapping_dict = {}
    if mapping_dict_path:
        with open(mapping_dict_path, 'r') as file_reader:
            mapping_dict = file_reader.read()
            mapping_dict = json.loads(mapping_dict)

    # get categorical features' names
    object_variables = df.dtypes[df.dtypes == "object"].index.tolist()
    object_variables = set(object_variables)
    
    mapped = object_variables.intersection(set(mapping_dict.keys()))
    unmapped = object_variables - mapped
    print('There are ' + str(len(unmapped)) + ' categorical variables to be transformed into numerical.')
    
    # calculate the bad rate of each category
    # And assign the order as its numerical value
    new_mapping_dict = {}
    for var in unmapped:
        mapped_df = df[[var, y_label]].groupby(var) \
            .apply(lambda x:(x[y_label] == 1).sum()/x.shape[0]).sort_values() \
            .reset_index().assign(value = lambda x:x.index + 1)
        new_mapping_dict[var] = mapped_df[[var,'value']].set_index(var)['value'].to_dict()
    
    # map all the categorical value to numerical
    for var in mapped:
        df[var] = df[var].apply(lambda x:mapping_dict[var][x] if pandas.notna(x) else x)
    for var in unmapped:
        df[var] = df[var].apply(lambda x:new_mapping_dict[var][x] if pandas.notna(x) else x)
    
    assert (df.dtypes == 'object').sum() == 0, "Not all categorical values are transformed to numerical."
    return df, new_mapping_dict



#=========================================================================================================================



