import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from python_ai.common.xcommon import sep

pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000, 'display.expand_frame_repr', False)
plt.rcParams['font.sans-serif'] = ['Simhei']
plt.rcParams['axes.unicode_minus'] = False


class NaiveBayesImplWithSmoothAgain(object):

    def __init__(self, lam=1):
        self.lam_ = lam

    def fit(self, df):
        y_p_series = {}
        P = {}

        df_len = len(df)
        y_count_series = df.iloc[:, -1].value_counts()
        y_labels = y_count_series.index
        y_labels_len = len(y_labels)
        y_p_series = (y_count_series + self.lam_) / (df_len + self.lam_ * y_labels_len)

        x_labels = df.columns[:-1]

        for y_v in y_labels:
            df2 = df[df.iloc[:, -1] == y_v]
            df2_len = len(df2)

            for x in x_labels:
                x_count_series = df2[x].value_counts()
                x_len = len(x_count_series.index)
                x_p_series = (x_count_series + self.lam_) / (df2_len + self.lam_ * x_len)

                k = (x, None, y_v)
                P[k] = self.lam_ / (df2_len + self.lam_ * x_len)
                for x_v in x_count_series.index:
                    k = (x, x_v, y_v)
                    P[k] = x_p_series[x_v]

        self.y_p_series_ = y_p_series
        self.P_ = P
        self.x_labels_ = x_labels
        self.y_labels_ = y_labels
        print(self.y_p_series_)
        print(self.P_)

    def get_key(self, k):
        P = self.P_
        x, x_v, y_v = k
        if k in P:
            return P[k]
        else:
            return P[x, None, y_v]

    def predict(self, X):
        result = []
        for y_v in self.y_labels_:
            p = self.y_p_series_[y_v]
            # print(f'p:{p}')
            for i, x_v in enumerate(X):
                x = self.x_labels_[i]
                k = x, x_v, y_v
                v = self.get_key(k)
                # print(v)
                p *= v
                # print(f'p[after_j]:{p}')
            result.append([y_v, p])
        result = np.array(result)
        idx_sorted = result[:, 1].argsort()[-1::-1]
        result = result[idx_sorted]
        return result[0, 0], result


if '__main__' == __name__:
    df = pd.read_csv('../follow_teacher/bayes_lihang.txt', header=0)
    print(len(df))
    print(df[:5])

    model = NaiveBayesImplWithSmoothAgain(lam=1)
    model.fit(df)


    def test_bayes(X):
        result = model.predict(X)
        print(f'{X} => {result}')


    Xs = [[2, 'S'], [2, 'N'], [200, 'N'], [2, 'L']]
    for X in Xs:
        test_bayes(X)

    # failed experiment
    #
    # from sklearn.naive_bayes import GaussianNB
    # from sklearn.preprocessing import LabelEncoder
    # model = GaussianNB(var_smoothing=1.0)
    # enc = LabelEncoder()
    # df['X2'] = enc.fit_transform(df['X2'])
    # model.fit(df.iloc[:, :-1], df.iloc[:, -1])
    #
    #
    # def test_bayes(X):
    #     try:
    #         X[1] = enc.transform(X[1])
    #     except ValueError:
    #         X[1] = -1
    #     result = model.predict([X])
    #     print(f'{X} => {result}')
    #
    #
    # for X in Xs:
    #     test_bayes(X)
