from Bagging_ForP import Bagging
import time
import numpy as np
import gzip

print('---------------------------------')

def load_mnist(train_images_path, train_labels_path, test_images_path, test_labels_path):
    """
    从本地读取MNIST数据集的压缩文件，解压后转换为numpy数组形式

    np.frombuffer函数用于从就是读取的文件内容，创建一个numpy数组
    为了数据处理的方便，需要把28x28的图片（二维数组）展开成1x784的一维数组
    每一行代表一张图片，其中每张图片就有784个特征，1个标签（0-9共10个标签）

    28x28的图像 1通道[0-255]
    所以使用np.uint8

    offset=16表示从读取内容的第 16 个字节开始进行数组的构建，这是因为 MNIST 数据集的图像数据文件格式中，
    前面部分字节可能包含一些文件头信息等，从第 16 字节开始才是真正的图像数据，标签也是同理
    """
    with gzip.open(train_images_path, 'rb') as f:
        train_images = np.frombuffer(f.read(), dtype=np.uint8, offset=16).reshape(-1, 784)
    with gzip.open(train_labels_path, 'rb') as f:
        train_labels = np.frombuffer(f.read(), dtype=np.uint8, offset=8)
    with gzip.open(test_images_path, 'rb') as f:
        test_images = np.frombuffer(f.read(), dtype=np.uint8, offset=16).reshape(-1, 784)
    with gzip.open(test_labels_path, 'rb') as f:
        test_labels = np.frombuffer(f.read(), dtype=np.uint8, offset=8)
    return train_images, train_labels, test_images, test_labels

# 设置MNIST数据集在本地的路径
train_images_path = r'SuccessForP\MNIST\raw\train-images-idx3-ubyte.gz'
train_labels_path = r'SuccessForP\MNIST\raw\train-labels-idx1-ubyte.gz'
test_images_path = r'SuccessForP\MNIST\raw\t10k-images-idx3-ubyte.gz'
test_labels_path = r'SuccessForP\MNIST\raw\t10k-labels-idx1-ubyte.gz'

# 加载MNIST数据集
train_images, train_labels, test_images, test_labels = load_mnist(train_images_path, train_labels_path, test_images_path, test_labels_path)
print('原数据集样本数:',train_images.shape[0]+test_images.shape[0])
print('训练集样本数:',train_images.shape[0])
print('测试集样本数:',test_images.shape[0])

print('数据集加载成功')
print('---------------------------------')

# 归一化数据
train_images = train_images / 255.0
test_images = test_images / 255.0
print('数据集处理完成')
print('---------------------------------')


''''''''''''''''''''''''''''''''''''''''''''''''''''''

# 创建Bagging分类器对象
# Bagging和RandomForest的区别是，RandomForest在自举的基础上，加上了随机特征抽取的操作
# max_features=None，使用全部特征进行训练，不随机抽取部分特征进行训练
bg = Bagging(num_trees=50, max_depth=20, max_features=None, bootstrap=True, random_state=55)
print('Bagging分类器构建成功')
print('---------------------------------')

# 在训练集上训练模型b
# 记录训练开始时间
print('开始训练')
start_train_time = time.time()
bg.fit(train_images, train_labels)
# 记录训练结束时间并计算训练用时
train_time = time.time() - start_train_time
print('训练完成')
print("训练时间: {:.2f} s".format(train_time))
print('---------------------------------')

# 在测试集上进行预测
# 记录预测开始时间
print('开始预测')
start_predict_time = time.time()
y_pred = bg.predict(test_images)
# 记录预测结束时间并计算预测用时
predict_time = time.time() - start_predict_time
print('预测完成')
print("预测时间: {:.2f} s".format(predict_time))
print('---------------------------------')

# 计算预测正确的样本数，通过比较预测结果y_pred和真实标签test_y中相等的元素个数来统计
correct_count = np.sum(test_labels == y_pred)
# 计算准确率，将预测正确的样本数除以测试集样本总数（test_y的长度）
accuracy = correct_count / len(test_labels)
print("准确率: {:.2f}%".format(accuracy * 100))

# 获取类别数量，假设类别标签是从0开始连续的整数，通过取最大标签值加1来获取类别数量
num_classes = int(np.max(test_labels)) + 1
# 初始化混淆矩阵，形状为（类别数量，类别数量），初始值全为0
confusion_matrix = np.zeros((num_classes, num_classes))
# 遍历预测结果和真实标签，填充混淆矩阵
for true_label, pred_label in zip(test_labels, y_pred):
    confusion_matrix[true_label][pred_label] += 1

# 计算精确率
precision_sum = 0
for i in range(num_classes):
    true_positives = confusion_matrix[i][i]
    predicted_positives = np.sum(confusion_matrix[:, i])
    if predicted_positives > 0:
        precision_sum += true_positives / predicted_positives
precision = precision_sum / num_classes
print("精确率: {:.2f}%".format(precision * 100))


# 计算召回率
recall_sum = 0
for i in range(num_classes):
    true_positives = confusion_matrix[i][i]
    actual_positives = np.sum(confusion_matrix[i, :])
    if actual_positives > 0:
        recall_sum += true_positives / actual_positives
recall = recall_sum / num_classes
print("召回率: {:.2f}%".format(recall * 100))


# 计算F1分数
if precision + recall > 0:
    f1_score = 2 * (precision * recall) / (precision + recall)
    print("F1分数: {:.2f}%".format(f1_score * 100))
else:
    print("由于精确率和召回率之和为0，无法计算F1分数")
print('---------------------------------')


