import numpy as np
import pandas as pd


def main():
    print("Begin naive bayes classification ")
    data = np.loadtxt("./data.txt", dtype=np.str, delimiter=",")
    print("Data looks like: ")
    for i in range(5):
        print(data[i])
    print(". . . \n")

    nx = 3  # 要预测的变量的个数
    nc = 2  # 分类个数
    N = 20  # 样本数

    # 联合计数变量初始化
    joint_cts = np.zeros((nx, nc), dtype=np.int)
    y_cts = np.zeros(nc, dtype=np.int)

    # 要预测的样本
    X = ['dentist', 'hazel', 'italy']
    print("Item to classify: ")
    print(X)

    for i in range(N):
        y = int(data[i][nx])  # target
        y_cts[y] += 1  # 每个类别的样本数累加

        # 统计要预测的样本的每个变量的取值，target的样本数
        for j in range(nx):
            if data[i][j] == X[j]:
                joint_cts[j][y] += 1

    joint_cts += 1  # 拉普拉斯平滑

    print("\nJoint counts: ")
    print(joint_cts)

    print("\nJoint counts(dataframe format): ")
    df = pd.DataFrame(joint_cts, index=['dentist', 'hazel', 'italy'], columns=['class_0', 'class_1'])
    print(df)

    print("\nClass counts: ")
    print(y_cts)

    # compute evidence terms using log trick
    e_terms = np.zeros(nc, dtype=np.float32)
    for k in range(nc):
        v = 0.0
        for j in range(nx):
            v += np.log(joint_cts[j][k]) - np.log(y_cts[k] + nx)
        v += np.log(y_cts[k]) - np.log(N)
        e_terms[k] = np.exp(v)

    np.set_printoptions(4)
    print("\nEvidence terms: ")
    print(e_terms)

    evidence = np.sum(e_terms)
    probs = np.zeros(nc, dtype=np.float32)
    for k in range(nc):
        probs[k] = e_terms[k] / evidence

    print("\nPseudo-probabilities: ")
    print(probs)

    pc = np.argmax(probs)
    print("\nPredicted class: ")
    print(pc)

    print("\nEnd naive Bayes demo ")


if __name__ == "__main__":
    main()