import hashlib
import time

import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder


def fit_split_points_bucket(data, max_interval):
    pd_bins = pd.cut(data, max_interval)
    points = []
    for v in pd_bins.unique():
        points.append(v.left)
    return np.array(sorted(points))


def get_interval(points):
    intervals = []
    for n, i in enumerate(points):
        if n + 1 == len(points):
            iv = pd.Interval(left=points[n], right=np.inf, closed='left')
        else:
            iv = pd.Interval(
                left=points[n], right=points[n + 1], closed='left')
        intervals.append(iv)
    return intervals


def transform(data, points):
    assert data.min() >= min(points)
    intervals = get_interval(points)

    def _fn(col: pd.Series):
        row = None
        for iv in intervals:
            if col in iv:
                row = iv
                break

        return pd.Series([
            row,
        ])

    data = data.apply(_fn)
    return data


def ordinal(series):
    oe = OrdinalEncoder()
    trans = oe.fit_transform(series.values.reshape(-1, 1))
    res = trans.reshape(1, -1)[0]
    trans_dict = {}
    for n, i in enumerate(oe.categories_[0]):
        trans_dict.update({i: n})
    return res, trans_dict


class BinningDetail(object):

    def __init__(self, pd_binning: pd.DataFrame, pd_target=None):
        self.en_mapping = {}
        self.de_mapping = {}

        self.result = {}

        self.pd_target = pd_target.copy()
        self.pd_binning = pd_binning.copy()

        self.feature_names = [x for x in self.pd_binning.columns if x != 'id']

    @staticmethod
    def md5(msg):
        return hashlib.md5(msg.encode('utf-8')).hexdigest()

    def encrypted_feature(self):
        for col in self.feature_names:
            single_feature_en = {}
            single_feature_de = {}
            for v in self.pd_binning[col].unique():
                v_hash = self.md5(f'{time.time()}_{v}')
                single_feature_en.update({v: v_hash})
                single_feature_de.update({v_hash: v})

            self.pd_binning[col] = self.pd_binning[col].map(single_feature_en)
            self.en_mapping.update({col: single_feature_en})
            self.de_mapping.update({col: single_feature_de})

    def decrypted_feature(self):
        for col in self.feature_names:
            single_feature_de = self.de_mapping[col]
            self.pd_binning[col] = self.pd_binning[col].map(single_feature_de)

    def decrypted_result(self, result_pd: pd.DataFrame, col):
        single_feature_de = self.de_mapping[col]
        result_pd['label'] = result_pd['label'].map(single_feature_de)
        # return result_pd

    def run(self):
        pd_data = self.pd_binning.join(
            self.pd_target.set_index('id'),
            on='id',
            rsuffix='table',
            how='inner')

        positive_number_all = pd_data.query('y == 1').shape[0]

        negative_number_all = pd_data.query('y == 0').shape[0]

        for f_col in self.feature_names:

            binning_result = {
                'label': [],
                'woe': [],
                'number_total': [],
                'number_positive': [],
                'number_negative': [],
                'rate_total': [],
                'rate_positive': [],
                'rate_negative': [],
                'percentage_positive': [],
                'iv': []
            }

            for v in self.pd_binning[f_col].unique():
                binning_result['label'].append(v)
                if isinstance(v, str):
                    binning_pd = pd_data.query(f"{f_col} == '{v}'")
                else:
                    binning_pd = pd_data.query(f'{f_col} == {v}')

                total = binning_pd.shape[0]

                positive_number = binning_pd.query('y == 1').shape[0]
                negative_number = binning_pd.query('y == 0').shape[0]

                pos_cal = positive_number / positive_number_all
                neg_cal = negative_number / negative_number_all

                woe = np.log(pos_cal / neg_cal)

                iv = (pos_cal - neg_cal) * woe

                binning_result['woe'].append(woe)

                binning_result['number_total'].append(total)
                binning_result['number_positive'].append(positive_number)
                binning_result['number_negative'].append(negative_number)

                binning_result['rate_total'].append(total /
                                                    self.pd_binning.shape[0])
                binning_result['rate_positive'].append(pos_cal)
                binning_result['rate_negative'].append(neg_cal)

                binning_result['percentage_positive'].append(positive_number /
                                                             total)
                binning_result['iv'].append(iv)

            self.result[f_col] = binning_result

    def export(self):
        for f_col in self.feature_names:
            df = pd.DataFrame(self.result[f_col])

            if f_col in self.de_mapping.keys():
                self.decrypted_result(df, f_col)

            print(df)

            print(df.to_dict('index'))

            print(df['iv'].sum())

            for k, v in df.to_dict('index').items():

                v.update({'index': k})
                print(v)


if __name__ == '__main__':
    path_csv = r'D:\project\ark\datasets\feature\apollo_data_data_set_8f65f496f49d4fc28dbbdc7c2b832c14.csv'
    pd_data_raw = pd.read_csv(path_csv)

    pd_data_raw['y'] = np.random.randint(0, 2, size=pd_data_raw.shape[0])

    print(pd_data_raw)

    pd_data_bucket = pd_data_raw.copy()
    points = fit_split_points_bucket(pd_data_raw['x0'], 2)
    pd_data_trans_1 = transform(pd_data_raw['x0'], points)
    pd_data_bucket['x0'], relation = ordinal(pd_data_trans_1)

    points = fit_split_points_bucket(pd_data_raw['x1'], 4)
    pd_data_trans_1 = transform(pd_data_raw['x1'], points)
    pd_data_bucket['x1'], _ = ordinal(pd_data_trans_1)
    print('bucket binning: \n', pd_data_bucket)

    bd = BinningDetail(pd_data_bucket[['id', 'x0', 'x1']],
                       pd_data_bucket[['id', 'y']])

    bd.encrypted_feature()

    print(bd.pd_binning)

    # bd.decrypted_feature()
    #
    # print(bd.pd_binning)
    #
    # exit(34)

    bd.run()

    print(bd.result)

    # bd.export()
