import math
import warnings

import numpy as np
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
from sklearn.preprocessing import OrdinalEncoder

warnings.simplefilter('ignore', SettingWithCopyWarning)

pd_data_raw = pd.DataFrame({
    'col1': ['good', 'good', 'bad'],
    'col2': [4, 5, 6],
    'col3': [0, 1, 0],
    'y': [0, 1, 1]
})

pd_data_bucket = pd_data_raw.copy()
pd_data_quantile = pd_data_raw.copy()
pd_data_chi2 = pd_data_raw.copy()
pd_data_custom = pd_data_raw.copy()


def fit_split_points_bucket(data, max_interval):
    pd_bins = pd.cut(data, max_interval)
    points = []
    for v in pd_bins.unique():
        points.append(v.left)
    return np.array(sorted(points))


def fit_split_points_quantile(data, max_interval):
    pd_bins = pd.qcut(data, max_interval, duplicates='drop')
    points = []
    for v in pd_bins.unique():
        points.append(v.left)
    return np.array(sorted(points))


def get_interval(points):
    intervals = []
    for n, i in enumerate(points):
        if n + 1 == len(points):
            iv = pd.Interval(left=points[n], right=np.inf, closed='left')
        else:
            iv = pd.Interval(
                left=points[n], right=points[n + 1], closed='left')
        intervals.append(iv)
    return intervals


def transform(data, points):
    assert data.min() >= min(points)
    intervals = get_interval(points)

    def _fn(col: pd.Series):
        row = None
        for iv in intervals:
            if col in iv:
                row = iv
                break

        return pd.Series([
            row,
        ])

    data = data.apply(_fn)
    return data


def ordinal(series):
    oe = OrdinalEncoder()
    trans = oe.fit_transform(series.values.reshape(-1, 1))
    res = trans.reshape(1, -1)[0]
    trans_dict = {}
    for n, i in enumerate(oe.categories_[0]):
        trans_dict.update({i: n})
    return res, trans_dict


def create_contingency_table(data, feature_column, target_val_headers: list,
                             target_column, target_val):
    distinct_values = sorted(set(data[feature_column]), reverse=False)

    col_names = [feature_column]
    col_names.extend(target_val_headers)
    col_names.append('chi2')
    my_contingency = pd.DataFrame(columns=col_names)

    for i in range(len(distinct_values)):
        temp_df = data.loc[data[feature_column] == distinct_values[i]]
        count_dict = temp_df[target_column].value_counts().to_dict()

        target_val_count = [0] * len(target_val)

        new_row = [distinct_values[i]]

        for n, c in enumerate(target_val):
            if c in count_dict:
                target_val_count[n] = count_dict[c]
            new_row.append(target_val_count[n])
        new_row.append(0)
        my_contingency.loc[len(my_contingency)] = new_row

    return my_contingency


def calc_chi2(array):
    shape = array.shape
    n = float(array.sum())
    row = {}
    column = {}

    for i in range(shape[0]):
        row[i] = array[i].sum()

    for j in range(shape[1]):
        column[j] = array[:, j].sum()

    chi2 = 0

    for i in range(shape[0]):
        for j in range(shape[1]):
            eij = row[i] * column[j] / n
            oij = array[i, j]
            if eij == 0.:
                chi2 += 0.
            else:
                chi2 += math.pow((oij - eij), 2) / float(eij)

    return chi2


def update_chi2_column(contingency_table, target_val_headers):
    for index, row in contingency_table.iterrows():
        if (index != contingency_table.shape[0] - 1):
            list1 = []
            list2 = []
            for i in target_val_headers:
                list1.append(contingency_table.loc[index][i])
                list2.append(contingency_table.loc[index + 1][i])
            prep_chi2 = np.array([np.array(list1), np.array(list2)])

            c2 = calc_chi2(prep_chi2)

            contingency_table.loc[index]['chi2'] = c2
    return contingency_table


def merge_rows(df, feature_column, target_val_headers: list):
    tdf = df[:-1]
    distinct_values = sorted(set(tdf['chi2']), reverse=False)

    col_names = [feature_column]
    col_names.extend(target_val_headers)
    col_names.append('chi2')
    updated_df = pd.DataFrame(columns=col_names)

    updated_df_index = 0
    for index, row in df.iterrows():
        if (index == 0):
            updated_df.loc[len(updated_df)] = df.loc[index]
            updated_df_index += 1
        else:
            if (df.loc[index - 1]['chi2'] == distinct_values[0]):
                for i in target_val_headers:
                    updated_df.loc[updated_df_index - 1][i] += df.loc[index][i]
            else:
                updated_df.loc[len(updated_df)] = df.loc[index]
                updated_df_index += 1

    updated_df['chi2'] = 0.

    return updated_df


def fit_split_points_chi(pd_data,
                         feature_column,
                         max_interval,
                         target_column='y'):
    df = pd_data.sort_values(by=[feature_column], ascending=True).reset_index()

    target_val = pd_data[target_column].unique()

    target_val_headers = [f'y_{i}' for i in target_val]

    contingency_table = create_contingency_table(df, feature_column,
                                                 target_val_headers,
                                                 target_column, target_val)

    num_intervals = contingency_table.shape[0]

    while num_intervals > max_interval:
        chi2_df = update_chi2_column(contingency_table, target_val_headers)
        contingency_table = merge_rows(chi2_df, feature_column,
                                       target_val_headers)
        num_intervals = contingency_table.shape[0]

    return contingency_table[feature_column].values


print('raw data: \n', pd_data_raw)

points = fit_split_points_bucket(pd_data_raw['col2'], 2)
pd_data_trans_1 = transform(pd_data_raw['col2'], points)
pd_data_bucket['col2'], _ = ordinal(pd_data_trans_1)

print('bucket binning: \n', pd_data_bucket)

points = fit_split_points_quantile(pd_data_raw['col2'], 2)

pd_data_trans_1 = transform(pd_data_raw['col2'], points)
pd_data_quantile['col2'], _ = ordinal(pd_data_trans_1)

print('quantile binning: \n', pd_data_quantile)

points = fit_split_points_chi(pd_data_chi2, 'col2', 2, 'y')
print(points)
pd_data_trans_1 = transform(pd_data_raw['col2'], points)
pd_data_chi2['col2'], _ = ordinal(pd_data_trans_1)

print('chi2 binning: \n', pd_data_chi2)

custom_expr = '-10,4,5,6'
points = sorted([float(i) for i in custom_expr.split(',')])
pd_data_trans_1 = transform(pd_data_custom['col2'], points)
pd_data_custom['col2'], _ = ordinal(pd_data_trans_1)

print('custom binning: \n', pd_data_custom)
