import numpy as np

np.random.seed(0)


class NaiveBayesClassifier:
    def __init__(self):
        # 属性（变量或方法）名以__开头的会被设置为私有属性，类的实例不可访问
        self.__n = 0  # 样本总数
        self.__Nk = list()  # 存放每个标签对应的样本数(即不同类别样本的数量)
        self.__label2samples = dict()  # 字典，键值为：整数标签-样本特征列表
        self.__K = 0  # 标签的种数
        self.__P_Ck = list()  # 存放每个标签的出现频率
        self.__Aj = list()  # 记录样本特征的每个特征的所有可能取值的个数

    def testPrint(self):
        t = f"""
        样本总数：{self.__n}
        每个标签对应的样本数(即不同类别样本的数量)：{self.__Nk}
        标签的种数：{self.__K}
        每个标签的出现频率：{self.__P_Ck}
        共有{len(self.__Aj)}个特征
        每个特征的所有可能取值的个数：{self.__Aj}
        """
        print(t)

    def fit(self, X, Y):
        """
        :param X: 形状为（n,m）的numpy数组，n为样本数量，m为特征维度
        :param Y: 形状为（n,）的数组，存放样本标签
        :return: None
        """
        self.__n = X.shape[0]
        self.__K = max(Y) + 1  # 标签的种类为0，1，2，..., __K - 1
        self.__Nk = [0] * self.__K  # 初始化列表及其元素初值为0
        self.__P_Ck = self.__Nk.copy()

        # 遍历每个样本及其标签，统计相关信息
        for i in range(self.__n):
            self.__Nk[Y[i]] += 1  # 标签为Y[i]的样本数量加 1
            if Y[i] not in self.__label2samples:
                self.__label2samples[Y[i]] = list()
            self.__label2samples[Y[i]].append(X[i])

        # 计算每个特征的可能取值的个数
        for j in range(X.shape[1]):
            A = len(set(X[:, j])) + 1  # X[:,j]取出（numpy）二维数组X的第j列数据
            self.__Aj.append(A)

        # 计算每个标签的出现频率（应用了拉普拉斯平滑）
        nAddK = float(self.__n + self.__K)
        for Ck in range(self.__K):
            self.__P_Ck[Ck] = (self.__Nk[Ck] + 1) / nAddK

    def predict(self, X):
        """
        :param X: 形状为（n,m）的数组，n为样本数量，m为特征维度；存放n个样本
        :return: 预测的标签Y：形状为（n,）的数组（确切地说是列表）
        """
        Y_pred = [self.__predictOne(X0) for X0 in X]
        return Y_pred

    def __predictOne(self, X0):
        """
        :param X0: 一个样本特征，X0 = [X01,X02,...,X0m]
        :return:
        """
        pMax = -1  # 记录最大“概率”值
        label = -1  # 最大“概率”值对应的标签
        p = 1  # 当前“概率”值
        m = X0.shape[0]

        for Ck in range(self.__K):
            # 取出属于当前标签Ck的样本，放入numpy数组是为了通过列索引快速获取特征数据
            samples_Ck = np.array(self.__label2samples[Ck])
            for j in range(m):
                p *= sum(samples_Ck[:, j] == X0[j]) / self.__Nk[Ck]
            p *= self.__P_Ck[Ck]
            if p > pMax:
                pMax = p
                label = Ck
            p = 1
        return label

    def score(self, X_samples, Y_labels):
        Y_pred = self.predict(X_samples)
        # 开始的一个错误：表达式“Y_pred == Y_labels”得到的是一个布尔值；纠正：np.array(Y_pred) == np.array(Y_labels)得到了一个布尔值数组
        acc = (np.array(Y_pred) == np.array(Y_labels)).sum() / len(Y_labels) * 1.0
        return acc


if __name__ == '__main__':
    from datasProcess import X_train, y_train, X_test, y_test
    import time

    start_time = time.time()

    model = NaiveBayesClassifier()
    model.fit(X_train, y_train)

    end_time = time.time()
    print(f"The fit() process took about {end_time - start_time} seconds")

    model.testPrint()

    start_time = time.time()
    trainAcc = model.score(X_train, y_train)
    print('trainAcc:', trainAcc)
    testAcc = model.score(X_test, y_test)
    print('testAcc:', testAcc)
    end_time = time.time()
    print(f"The two score() process took about {end_time - start_time} seconds")
