import pickle
import numpy as np
import gzip
from sklearn.model_selection import train_test_split


def normalize(data):
    """
    二值化处理

    :param data: 支持索引操作的二维数组数据
    :return: 二值化处理后的二维数组数据
    """
    return (data > 0.0).astype(np.int32)


def prepare_mnist_data(data_path):
    """
    预处理 MNIST 数据集

    :param data_path: 数据集文件路径
    :return: 经过二值化处理后的训练集特征 / 测试集特征 / 训练集标签 / 测试集标签
    """
    mnist = gzip.open(data_path, 'rb')
    train_set, valid_set, test_set = pickle.load(mnist, encoding="bytes")  # 训练集/验证集/测试集
    features_train, _, labels_train, _ = train_test_split(train_set[0], train_set[1], test_size=0.2, random_state=42)
    print("------训练集二值化------")
    normalized_features_train = normalize(features_train)
    normalized_labels_train = labels_train
    features_test, _, labels_test, _ = train_test_split(test_set[0], test_set[1], test_size=0.9, random_state=42)
    print("------测试集二值化------")
    normalized_features_test = normalize(features_test)
    normalized_labels_test = labels_test
    return normalized_features_train, normalized_features_test, normalized_labels_train, normalized_labels_test


def calculate_naive_bayes(features_train, labels_train):
    """
    计算类别的先验概率和特征值的条件概率

    :param features_train: 训练集特征
    :param labels_train: 训练集标签
    :return: 类别的先验概率和特征值的条件概率
    """
    num_samples, num_features = features_train.shape
    num_classes = len(set(labels_train))  # 统计标签总数
    # 初始化先验概率和条件概率
    prior_prob = np.zeros(num_classes)
    conditional_prob_matrix = np.zeros((num_classes, num_features))
    print("------开始训练------")
    for i in range(num_samples):
        label = labels_train[i]
        prior_prob[label] += 1
        # 统计每个类别中每个特征值出现的次数
        for j in range(num_features):
            conditional_prob_matrix[label][j] += features_train[i][j]
    # Laplace 平滑处理
    prior_prob_smoothed = (prior_prob + 1) / (num_samples + num_classes)  # 先验概率平滑
    conditional_prob_matrix_smoothed = (conditional_prob_matrix.T + 1) / (prior_prob + 2)  # 条件概率平滑
    return prior_prob_smoothed, conditional_prob_matrix_smoothed


def calculate_conditional_prob_log(feature_value, conditional_prob_feature_value_of_label):
    """
    计算特征值在特定类别下的条件概率的对数

    :param feature_value: 特征值
    :param conditional_prob_feature_value_of_label: 特征值在特定类别下的条件概率
    :return: 特征值在特定类别下的条件概率的对数
    """
    return feature_value * np.log(conditional_prob_feature_value_of_label) + (1 - feature_value) * np.log(
        1 - conditional_prob_feature_value_of_label)


def test(features_test, labels_test, conditional_prob_matrix, prior_prob):
    num_samples, num_features = features_test.shape
    num_classes = len(set(labels_train))
    accuracy = 0
    print("------开始测试样本------")
    for i in range(num_samples):
        feature = features_test[i]
        log_prior_prob = np.log(prior_prob)
        for j in range(num_classes):
            for k in range(num_features):
                feature_value = feature[k]
                log_prior_prob[j] += np.sum(
                    calculate_conditional_prob_log(feature_value, conditional_prob_matrix[j][k]))
        predict_label = np.argmax(log_prior_prob)
        accuracy += (predict_label == labels_test[i])
    print("测试准确率:", accuracy / num_samples)


if __name__ == '__main__':
    mnist_data_path = "mnist.pkl.gz"
    features_train, features_test, labels_train, labels_test = prepare_mnist_data(mnist_data_path)
    prior_prob, conditional_prob_matrix = calculate_naive_bayes(features_train, labels_train)
    test(features_test, labels_test, conditional_prob_matrix.T, prior_prob)
