import numpy as np
import pandas as pd
from operator import mul
from functools import reduce


# 构造NB分类器
def Train(X_train, Y_train, feature, label):

    # 先求先验概率

    class_num = len(label)  # 分类数目
    prior_probability = np.zeros(class_num)  # 初始化先验概率
    label_count = np.zeros(class_num)  # 存放各类型的数目
    for i in range(len(Y_train)):
        for j in range(class_num):
            if Y_train[i] == label[j]:
                label_count[j] += 1
                continue
    for i in range(class_num):
        prior_probability[i] = label_count[i] / len(Y_train)  # 求得先验概率

    '''
    然后求条件概率
    conditional_probability是一个三维列表，第一维是类别分类，第二维和第三维是特征分类
    '''

    feature_len = len(feature)  # 特征长度
    maxlen_feature_cell = np.max([len(x) for x in feature])  # 特征值的最大取值范围长度，用于下面的条件概率初始化

    conditional_probability = np.zeros((class_num, feature_len, maxlen_feature_cell))  # 初始化条件概率

    # 分为两个类别 先计数
    for i in range(class_num):
        # 对特征按行遍历
        for j in range(feature_len):
            # 遍历数据集，并依次做判断
            for k in range(len(Y_train)):
                if Y_train[k] == label[i]:  # 相同类别
                    for m in range(len(feature[j])):
                        if X_train[k][j] == feature[j][m]:
                            conditional_probability[i][j][m] += 1

    for i in range(class_num):
        for j in range(feature_len):
            for m in range(len(feature[j])):
                # print('标签：'+feature[j][m] +' 类别：'+ label[i]+'  条件概率:'+str(conditional_probability[i][j][m]) + '/'+str(label_count[i]))
                conditional_probability[i][j][m] = (conditional_probability[i][j][m]+1) / (label_count[i]+feature_len)  # 求得i类j行第m个特征的条件概率 加入拉普拉斯平滑

    return prior_probability, conditional_probability


# 给定数据进行分类
def Predict(testset, prior_probability, conditional_probability, feature, label):
    result = np.zeros(len(label))
    class_num = len(label)  # 分类数目
    for i in range(class_num):
        conditional = np.ones(4)
        for j in range(len(feature)):
            for m in range(len(feature[j])):
                if feature[j][m] == testset[j]:
                    conditional[j] = conditional_probability[i][j][m]
                    continue
        result[i] = reduce(mul, conditional) * prior_probability[i]
    best_index = np.argmax(result)
    result = np.vstack([label, result])

    return label[best_index], result


def main():
    csv_file = '../../data/bayes_test.csv'
    ds = loadData(csv_file, ' ')
    cols = ds.columns.tolist()  # 表头
    X_train = ds[cols[:-1]].values.tolist()     # 训练数据 标签
    Y_train = ds[cols[-1]].values.tolist()     # 训练数据 标签对应的分类
    cols_set = list()
    for i in range(len(cols)):
        c = ds[cols[i]].values.tolist()
        cols_set.append(list(set(c)))
    feature = cols_set[:-1]     # 各标签的取值范围
    label = cols_set[-1]     # 类别

    # 待测数据
    testset = ['<30', '中', '是', '一般']

    # 计算先验概率和条件概率
    prior_probability, conditional_probability = Train(X_train, Y_train, feature, label)

    # 测试数据分类结果
    best, result = Predict(testset, prior_probability, conditional_probability, feature, label)
    print('计算出类别对应的条件概率：')
    print(result)
    print('因此，'+str(testset)+'分类结果为：'+best)

def loadData(filename, _separator=','):
    '''
    输入：filename #文件  _separator   # csv单元数据间的分隔符
    输出：csv数据集
    '''
    dataset = pd.read_csv(filename, _separator)
    return dataset

if __name__ == '__main__':
    main()
