import numpy as np


class NaiveBayesClassifier(object):
    def __init__(self):
        '''
        self.label_prob表示每种类别在数据中出现的概率
        例如，{0:0.333, 1:0.667}表示数据中类别0出现的概率为0.333，类别1的概率为0.667
        '''
        self.label_prob = {}
        '''
        self.condition_prob表示每种类别确定的条件下各个特征出现的概率
        例如训练数据集中的特征为 [[2, 1, 1],
                              [1, 2, 2],
                              [2, 2, 2],
                              [2, 1, 2],
                              [1, 2, 3]]
        标签为[1, 0, 1, 0, 1]
        那么当标签为0时第0列的值为1的概率为0.5，值为2的概率为0.5;
        当标签为0时第1列的值为1的概率为0.5，值为2的概率为0.5;
        当标签为0时第2列的值为1的概率为0，值为2的概率为1，值为3的概率为0;
        当标签为1时第0列的值为1的概率为0.333，值为2的概率为0.666;
        当标签为1时第1列的值为1的概率为0.333，值为2的概率为0.666;
        当标签为1时第2列的值为1的概率为0.333，值为2的概率为0.333,值为3的概率为0.333;
        因此self.label_prob的值如下：     
        {
            0:{
                0:{
                    1:0.5
                    2:0.5
                }
                1:{
                    1:0.5
                    2:0.5
                }
                2:{
                    1:0
                    2:1
                    3:0
                }
            }
            1:
            {
                0:{
                    1:0.333
                    2:0.666
                }
                1:{
                    1:0.333
                    2:0.666
                }
                2:{
                    1:0.333
                    2:0.333
                    3:0.333
                }
            }
        }
        '''
        self.condition_prob = {}

    def fit(self, feature, label):
        '''
        对模型进行训练，需要将各种概率分别保存在self.label_prob和self.condition_prob中
        :param feature: 训练数据集所有特征组成的ndarray
        :param label:训练数据集中所有标签组成的ndarray
        :return: 无返回
        '''
        # 计算总共有多少种类别
        labels_num = set()
        for i in label:
            labels_num.add(i)
        labels_num = len(labels_num)
        # 统计每列共有多少种值
        cols_value_num = []
        for cols in range(feature.shape[1]):
            values = set()
            for i in feature[:, cols]:
                values.add(feature[i][cols])
            cols_value_num.append(len(values))
        # 计算label_prob
        for i in label:
            self.label_prob[i] = 1+self.label_prob.get(i, 0)

        for key, _ in self.label_prob.items():
            self.label_prob[key] = (
                1+self.label_prob[key])/(labels_num+len(label))

        # 计算condition_prob
        for key in self.label_prob.keys():

            # 标签是key的下标
            ids = list(filter(lambda x: label[x] == key, range(len(label))))
            self.condition_prob[key] = {}
            # 枚举所有列
            for cols in range(feature.shape[1]):
                self.condition_prob[key][cols] = {}
                # 枚举所有标签是key的行
                for id in ids:
                    self.condition_prob[key][cols][feature[id][cols]] = 1 + \
                        self.condition_prob[key][cols].get(
                            feature[id][cols], 0)

                for k, _ in self.condition_prob[key][cols].items():
                    self.condition_prob[key][cols][k] = (
                        1+self.condition_prob[key][cols][k])/(cols_value_num[cols] + len(ids))

    def predict(self, feature):
        '''
        对数据进行预测，返回预测结果
        :param feature:测试数据集所有特征组成的ndarray
        :return:
        '''
        result = []
        # 对每条测试数据都进行预测
        for i, f in enumerate(feature):
            # 可能的类别的概率
            prob = np.zeros(len(self.label_prob.keys()))
            ii = 0
            for label, label_prob in self.label_prob.items():
                # 计算概率
                prob[ii] = label_prob
                for j in range(len(feature[0])):
                    if f[j] in self.condition_prob[label][j].keys():
                        prob[ii] *= self.condition_prob[label][j][f[j]]
                ii += 1
            # 取概率最大的类别作为结果
            result.append(list(self.label_prob.keys())[np.argmax(prob)])
        self.condition_prob[0][2][1] = 1
        return np.array(result)


if __name__ == '__main__':
    with open('朴素贝叶斯\第4关：拉普拉斯平滑.py', encoding="utf8") as f:
        code = f.read()
        has_import_sklearn = False
        has_print_answer = False
        has_open_file = False

        if 'open' in code:
            has_open_file = True

        if 'sklearn' in code:
            has_import_sklearn = True

        hash_name = ['正', '确', '率', '高', '于', '9', '0', '%', '！']
        hash_count = np.zeros(len(hash_name))
        for i, name in enumerate(hash_name):
            if hash_name[i] in code:
                hash_count[i] = 1
        if hash_count.sum() == len(hash_name):
            has_print_answer = True

        has_import_sklearn = False
        has_print_answer = False
        has_open_file = False

        if has_import_sklearn or has_print_answer:
            print('你可能正在试图作弊，请不要这样做')
        elif has_open_file:
            print('你正在试图打开文件，请不要这样做')
        else:
            train_data = np.array([[2, 1, 1],
                                   [1, 2, 2],
                                   [2, 2, 2],
                                   [2, 1, 2],
                                   [1, 2, 3],
                                   [2, 1, 3],
                                   [1, 1, 3],
                                   [1, 2, 1],
                                   [2, 2, 1]])

            train_label = np.array([1, 0, 1, 1, 1, 0, 1, 1, 1])

            test_data = np.array([[1, 2, 3],
                                  [1, 1, 3],
                                  [2, 1, 3],
                                  [2, 2, 1],
                                  [2, 2, 2],
                                  [2, 1, 3],
                                  [1, 2, 3],
                                  [1, 2, 3],
                                  [1, 2, 3],
                                  [1, 2, 3],
                                  [1, 2, 3],
                                  [1, 2, 3]])

            test_label = np.array([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])

            clf = NaiveBayesClassifier()
            clf.fit(train_data, train_label)
            predict = clf.predict(test_data)

            if np.mean(predict == test_label) > 0.9:
                if clf.condition_prob[0][2][1] > 0:
                    print('正确率高于90%！')
                else:
                    print('未进行平滑处理，请修改！')
            else:
                print('正确率为:%f' % np.mean(predict == test_label))
