# -*- ecoding: utf-8 -*-
# @ModuleName: train_CNN
# @Function: 
# @Author: Liweijian
# @Time: 2024/12/20 14:47
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from time import time
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from concurrent.futures import ThreadPoolExecutor

print("本机上可用的GPU个数：", len(tf.config.list_physical_devices('GPU')))

def load_image(img_path, img_height, img_width):
    img = load_img(img_path, target_size=(img_height, img_width), color_mode='grayscale')
    img_array = img_to_array(img)
    return img_array #图像转化为数组

"""
多线程加载图片处理标签
"""
def load_images_threading(data_dir, img_height, img_width, max_workers=4):
    images = []
    labels = []

    filenames = [f for f in os.listdir(data_dir) if f.endswith('.jpg')]

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = []
        for filename in filenames:
            label = int(filename.split('_')[-1].replace('.jpg', ''))
            img_path = os.path.join(data_dir, filename)
            future = executor.submit(load_image, img_path, img_height, img_width)
            futures.append((future, label))

        for future, label in futures:
            img_array = future.result()
            images.append(img_array)
            labels.append(label)

    images = np.array(images) / 255.0 #255归一化【0-1】
    labels = tf.keras.utils.to_categorical(np.array(labels), num_classes=10) #标签转换为独立热编码

    return images, labels

def data_load_threading(train_data_dir, test_data_dir, img_height, img_width, batch_size):
    train_images, train_labels = load_images_threading(train_data_dir, img_height, img_width)
    test_images, test_labels = load_images_threading(test_data_dir, img_height, img_width)

    #显示进度
    print("Training images loaded:", len(train_images))
    print("Test images loaded:", len(test_images))

    #创建数据集
    train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(batch_size)
    val_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)

    return train_ds, val_ds

# 构建CNN模型
def model_load(IMG_SHAPE=(28, 28, 1), class_num=10):
    model = tf.keras.models.Sequential([
        # 将输入图像像素值缩放到 [0, 1] 之间
        tf.keras.layers.Rescaling(1. / 255, input_shape=IMG_SHAPE),

        # 第一个卷积层，使用 32 个 3x3 的过滤器
        # ReLU 函数会将负值转换为零，正值保持不变，从而增加非线性，使模型能够更好地学习复杂的模式
        tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
        tf.keras.layers.BatchNormalization(), #归一化
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), #池化

        # 第二个卷积层，使用 64 个 3x3 的过滤器
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),

        # 第三个卷积层，使用 128 个 3x3 的过滤器
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),

        # 将三维特征图展平为一维数组以供全连接层使用
        tf.keras.layers.Flatten(), #特征转化为一维数组

        # 全连接层，具有 256 个神经元
        tf.keras.layers.Dense(256, activation='relu'), #每个神经元都要接受展平的特征图的输入特征
        # Dropout 层
        tf.keras.layers.Dropout(0.4),

        # 输出层，使用 softmax 激活函数进行多类分类
        tf.keras.layers.Dense(class_num, activation='softmax')
    ])
    model.summary()

    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), #Adam优化器学习率为0.001
                  loss='categorical_crossentropy', #指定损失函数
                  metrics=['accuracy']) #准确率作为监测指标

    return model


# 展示训练过程的曲线
def show_loss_acc(history):
    acc = history.history['accuracy'] #训练集准确率
    val_acc = history.history['val_accuracy'] #测试集准确率
    loss = history.history['loss'] #训练集损失率
    val_loss = history.history['val_loss'] #测试集损失率

    plt.figure(figsize=(8, 8))
    plt.subplot(2, 1, 1)
    plt.plot(acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.ylabel('Accuracy')
    plt.ylim([0, 1])
    plt.title('Training and Validation Accuracy')

    plt.subplot(2, 1, 2)
    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.ylabel('Cross Entropy')
    plt.title('Training and Validation Loss')
    plt.xlabel('epoch')
    plt.savefig('results/results_cnn.png', dpi=100)

def train(epochs):
    begin_time = time()
    train_ds, val_ds = data_load_threading(
        train_data_dir="D:/pythonProject/Final_MNIST/data/train_images",
        test_data_dir="D:/pythonProject/Final_MNIST/data/test_images",
        img_height=28,
        img_width=28,
        batch_size=32 #每个训练步骤处理样本的数量
    )

    model = model_load(class_num=10)
    history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)

    model.save("models/cnn_mnist.h5")

    end_time = time()
    run_time = end_time - begin_time
    print('该循环程序运行时间：', run_time, "s")
    show_loss_acc(history)

if __name__ == '__main__':
    train(epochs=8)
