import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, normalize


def number_statistic(data, axis=0):
    """
    连续性数值统计信息
    :param data:支持DataFrame、Series、List、ndarray
    :return:和、均值、最大值、最小值、标准差、中位数
    """
    if isinstance(data, list) or isinstance(data, pd.Series) \
            or isinstance(data, pd.DataFrame) or isinstance(data, np.ndarray):
        return np.sum(data, axis=axis), np.mean(data, axis=axis), np.max(data, axis=axis), \
               np.min(data, axis=axis), np.std(data, axis=axis), np.median(data, axis=axis),
    raise TypeError('invalid data type(s)')


def number_mode(data):
    """
    统计众数
    :param data: 支持DataFrame、Series、List
    :return: 众数
    """
    if isinstance(data, list):
        data = pd.Series(data)
    if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
        value = data.mode()
        if value.empty:
            return np.nan
        else:
            return value.values[0]
    raise TypeError('invalid data type(s)')


def one_hot(data, columns, prefixs, drop=False):
    """
    one-hot处理
    :param data:原始DataFrame
    :param columns:需要one-hot的列
    :param prefixs:one-hot之后需要显示的列名
    :param drop:是否需要删除原来的列
    :return:one-hot之后的DataFrame
    """
    if len(columns) != len(prefixs):
        raise Exception('the length of args unequal')
    for c, p in zip(columns, prefixs):
        one_hot_c = pd.get_dummies(data[c], prefix=p)
        if drop:
            data = data.drop(columns=[c], axis=1)
        data = pd.concat([data, one_hot_c], axis=1)
    return data


def fillna_value(data, columns, modes):
    """
    填充缺省值，columns, modes必须传递list且长度相同
    :param data:原始DataFrame
    :param columns:需要填充的columns
    :param modes:填充模式，支持mean, median，或自定义值
    :return:填充后的DataFrame
    """

    def fill_value(x, value):
        if x is None or len(str(x).strip()) == 0 or str(x).strip() == 'null':
            return value
        return x

    if len(columns) != len(modes):
        raise Exception('the length of args unequal')

    # columns_dic = dict(zip(columns, modes))
    # if columns_dic.get('mean'):
    #     mean = np.mean(columns_dic.get('mean'))
    #     data[c].fillna(mean, inplace=True)
    #     data[c] = data[c].apply(fill_value, args=(mean,))

    for c, m in zip(columns, modes):
        if c not in data.columns:
            continue
        if m == 'mean':
            mean = np.mean(data[c])
            data[c].fillna(mean, inplace=True)
            data[c] = data[c].apply(fill_value, args=(mean,))
        elif m == 'median':
            median = np.median(data[c])
            data[c].fillna(median, inplace=True)
            data[c] = data[c].apply(fill_value, args=(median,))
        elif m == 'mode':
            mode = number_mode(data[c])
            data[c].fillna(mode, inplace=True)
            data[c] = data[c].apply(fill_value, args=(mode,))
        else:
            data[c].fillna(m, inplace=True)
            data[c] = data[c].apply(fill_value, args=(m,))

    return data


def standard_scale(x):
    """
    标准化为均值为0方差为1的数据
    :param x: 输入数据，可以是list，numpy，Series
    :return: 标准化后的值
    """
    scaler = StandardScaler()
    return scaler.fit_transform(x)


def num_normalize(x):
    """
    归一化数据
    :param x: 输入数据，可以是list，numpy，Series
    :return: 归一化后的值
    """
    return normalize(x)


def expand_month_feature(data, columns, current_month=0):
    """
    数据扩展，3、6、12个月的和、均值、最大值、最小值、标准差、中位数
    :param data: 必须是DataFrame格式
    :param columns: 12个月的数据的列名，如果有多种特征，则列表里套列表 eg:[['01m_b','02m_b'],['01m_c','02m_c']]
    :param current_month: 当前月份，不传则说明是训练阶段
    :return: 扩展后的DataFrame
    """
    tags = ['sum', 'mean', 'max', 'min', 'std', 'median']
    c_names = []
    for column in columns:
        month_data = np.zeros(shape=(len(data), 12))
        column_name = 'None'
        for c in column:
            month = int(c.split('_')[0][1:3])
            column_name = '_'.join(c.split('_')[1:])
            month_data[:, month - 1] = data[c]
        if not current_month:
            result_3 = number_statistic(month_data[:, -3:], axis=1)
            result_6 = number_statistic(month_data[:, -6:], axis=1)
            result_12 = number_statistic(month_data, axis=1)
        else:
            # cur_month = datetime.datetime.now().month
            # 防止数组溢出，直接扩展三倍，避免计算下标
            month_data = np.tile(month_data, 3)
            result_3 = number_statistic(month_data[:, current_month - 2:current_month + 1], axis=1)
            result_6 = number_statistic(month_data[:, current_month - 5:current_month + 1], axis=1)
            result_12 = number_statistic(month_data[:, current_month - 11:current_month + 1], axis=1)

        for month, result in zip([3, 6, 12], [result_3, result_6, result_12]):
            for r, t in zip(result, tags):
                c_name = f'{month}m_{column_name}_{t}'
                c_names.append(c_name)
                data[c_name] = r

    return data, c_names


def expand_month_mean_feature(data, columns, current_month=0):
    """
    数据扩展，3、6、12个月的和、均值、最大值、最小值、标准差、中位数
    :param data: 必须是DataFrame格式
    :param columns: 12个月的数据的列名，如果有多种特征，则列表里套列表 eg:[['01m_b','02m_b'],['01m_c','02m_c']]
    :param current_month: 当前月份，不传则说明是训练阶段
    :return: 扩展后的DataFrame
    """
    c_names = []
    for column in columns:
        month_data = np.zeros(shape=(len(data), 12))
        column_name = 'None'
        for c in column:
            month = int(c.split('_')[0][1:3])
            column_name = '_'.join(c.split('_')[1:])
            month_data[:, month - 1] = data[c]
        if not current_month:
            result_3 = np.mean(month_data[:, -3:], axis=1)
            result_6 = np.mean(month_data[:, -6:], axis=1)
            result_12 = np.mean(month_data, axis=1)
        else:
            # cur_month = datetime.datetime.now().month
            # 防止数组溢出，直接扩展三倍，避免计算下标
            month_data = np.tile(month_data, 3)
            result_3 = np.mean(month_data[:, current_month - 2:current_month + 1], axis=1)
            result_6 = np.mean(month_data[:, current_month - 5:current_month + 1], axis=1)
            result_12 = np.mean(month_data[:, current_month - 11:current_month + 1], axis=1)

        for month, result in zip([3, 6, 12], [result_3, result_6, result_12]):
            c_name = f'{month}m_{column_name}_mean'
            c_names.append(c_name)
            data.loc[:, c_name] = result
            # data[c_name] = result

    return data, c_names


def expand_agg_feature(data, by, func=None):
    """
    聚合特征
    :param data: 必须是DataFrame格式
    :param by: 需要聚合的特证名
    :param func: 自定义函数
    :return: agg后的DataFrame
    """
    if not isinstance(data, pd.DataFrame):
        raise TypeError('the data must be DataFrame')

    if func:
        data = data.groupby(by).agg(func)
    else:
        data = data.groupby(by).agg([np.sum, np.mean, np.max, np.min, np.std, np.median])
    columns = []
    for c in data.columns:
        columns.append(f'{c[0]}_{c[1]}')
    data.columns = columns
    data.reset_index(inplace=True)
    return data


def expand_feature(data, discrete_columns, continuous_columns):
    """
    特征统计
    :param data:  必须是DataFrame格式
    :param discrete_columns: list:离散值列名
    :param continuous_columns: list:连续值列名
    :return: 统计之后的值的DataFrame
    """
    if not isinstance(data, pd.DataFrame):
        raise TypeError('the data must be DataFrame')
    df = pd.DataFrame()
    discrete_names = []
    continuous_names = []
    for c in discrete_columns:
        name = f'{c}_mode'
        discrete_names.append(name)
        df[name] = data[c].mode().iloc[0]
    for c in continuous_columns:
        name = [f'{c}_sum', f'{c}_mean', f'{c}_max', f'{c}_min', f'{c}_std', f'{c}_median']
        continuous_names.extend(name)
        df[f'{c}_sum'], df[f'{c}_mean'], df[f'{c}_max'], df[f'{c}_min'], \
        df[f'{c}_std'], df[f'{c}_median'] = list(map(lambda x: pd.Series(x), number_statistic(data[c])))

    return df, discrete_names, continuous_names


def transCategory2ID(cat_df):
    cat_dict = {}
    cat_df = cat_df.applymap(str)
    for columnname in cat_df.columns:
        word = set(cat_df[columnname])
        word2id = {str(j): i for i, j in enumerate(word)}
        cat_dict[columnname] = word2id

        cat_df.loc[:, columnname] = cat_df[columnname].map(word2id)

    return cat_df, cat_dict


if __name__ == '__main__':
    df = pd.DataFrame({'A01A': ['1', '2', '3', '4', '5', '6', '7', '8'],
    'A02A': ['1', '2', '1', '2', '1', '2', '1', '1'],
    'A03A': np.random.randn(8),
    'A04A': np.random.randn(8),
    'A05A': ['1', '2', '3', '4', '5', '6', '7', '8'],
    'A06A': ['1', '2', '1', '2', '1', '2', '1', '1'],
    'A07A': np.random.randn(8),
    'A08A': np.random.randn(8),
    'A09A': np.random.randn(8),
    'A10A': ['1', '2', '3', '4', '5', '6', '7', '8'],
    'A11A': ['1', '2', '1', '2', '1', '2', '1', '1'],
    'A12A': np.random.randn(8)})
    print(df)
    a = expand_month_mean_feature(df, ['A01A','A02A','A03A','A04A','A05A','A06A','A07A','A08A','A09A','A10A','A11A','A12A'])
    # print(a)
    #
    # print(a)
    # a.sample(5)
    # from sklearn.utils import shuffle
    #
    # # columns_age = []
    # # columns_name = []
    # # for i in range(1, 13):
    # #     columns_age.append(f'{i}m_age')
    # # for i in range(1, 13):
    # #     columns_name.append(f'{i}m_income')
    # #
    # # df = pd.DataFrame(columns=columns_age + columns_name)
    # #
    # # for c in columns_age:
    # #     df[c] = [np.random.randint(10, 70, 1)[0] for _ in range(100)]
    # # for c in columns_name:
    # #     df[c] = [np.random.randint(3000, 100000, 1)[0] for _ in range(100)]
    # # import time
    # #
    # # start = time.time()
    # # df = expand_month_feature(df, columns=[columns_age, columns_name])
    # # print(time.time() - start)
    # # df.to_csv('data.csv', index=False, encoding='utf_8_sig')
    # # print(df)
    # from sklearn.impute import SimpleImputer
    # from  sklearn.model_selection import GridSearchCV
