import tensorflow as tf
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import os

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)


class TrainingModel:

    @staticmethod
    def train(path='model.keras'):
        (x_train, y_train), (x_test, y_test) = mnist.load_data()

        # 将数据类型转换为float32
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')

        # 归一化数据到0-1
        x_train /= 255
        x_test /= 255

        # 将标签转换为one-hot编码（转矩阵）
        y_train = to_categorical(y_train, 10)
        y_test = to_categorical(y_test, 10)

        # 将数据转换为四维，第一维是样本数量，最后一维表明颜色通道(灰度图像只有一个通道)
        x_train = x_train.reshape(60000, 28, 28, 1)
        x_test = x_test.reshape(10000, 28, 28, 1)

        # 数据增强
        datagen = ImageDataGenerator(
            rotation_range=10,  # 在范围（度，0到180）内随机旋转图像
            zoom_range=0.1,  # 随机缩放图像
            width_shift_range=0.1,  # 随机水平移动图像（总宽度的一部分）
            height_shift_range=0.1,  # 随机垂直移动图像（总高度的一部分）
        )

        datagen.fit(x_train)

        # 建立模型
        model = Sequential()
        model.add(
            Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1), kernel_regularizer=l2(0.01)))
        model.add(Conv2D(64, (3, 3), activation='relu', kernel_regularizer=l2(0.01)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(128, activation='relu', kernel_regularizer=l2(0.01)))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', kernel_regularizer=l2(0.01)))

        # 使用ExponentialDecay学习率调度
        lr_schedule = ExponentialDecay(
            initial_learning_rate=0.3,  # 初始学习率
            decay_steps=10000,  # 学习率衰减一次所需的步数
            decay_rate=0.96)  # 衰减的指数基数

        # 使用SGD梯度随机下降优化器
        optimizer = SGD(learning_rate=lr_schedule)

        # 编译模型
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

        # 训练模型
        model.fit_generator(datagen.flow(x_train, y_train, batch_size=128),
                            validation_data=(x_test, y_test),
                            steps_per_epoch=len(x_train) / 128, epochs=10, verbose=1,
                            callbacks=[EarlyStopping(monitor='val_loss', patience=1)])

        # 评估模型
        score = model.evaluate(x_test, y_test, verbose=0)

        # 打印评估结果
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

        # 保存模型
        model.save(path)

        os.system('echo "=====================TRAINING COMPLETED====================="')
