# 针对 五类花卉数据集 采用AlexNet架构
# 本代码做了封装, 可同时运行在本地与Colab

import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"  # 本地机只能cpu加载
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, ReLU
from tensorflow.keras.layers import BatchNormalization
import pathlib
import random
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator

print(tf.config.list_physical_devices('GPU'))
print(tf.__version__)


# 下载数据集
def downloadDataset(image_path):
    os.makedirs(image_path)
    keras.utils.get_file(fname=image_path,
                         origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
                         untar=True)


# 加载数据函数  ImageDataGenerator
def loadData(batch_size, validation_split):
    image_path = "F:/python/dataset/flower_photos/"  # 我的数据是放在 本地电脑的这个目录

    if not os.path.exists(image_path):
        downloadDataset(image_path)

    im_height = 227
    im_width = 227
    image_generator = ImageDataGenerator(rescale=1. / 255., validation_split=validation_split)

    # 训练集数据生成器，one-hot编码，打乱数据
    train_data_gen = image_generator.flow_from_directory(directory=image_path,
                                                         batch_size=batch_size,
                                                         shuffle=True,
                                                         target_size=(im_height, im_width),
                                                         class_mode='categorical',
                                                         subset='training')
    # 测试集数据生成器，one-hot编码
    valid_data_gen = image_generator.flow_from_directory(directory=image_path,
                                                         batch_size=batch_size*4,
                                                         shuffle=False,
                                                         target_size=(im_height, im_width),
                                                         class_mode='categorical',
                                                         subset='validation')

    # tf.keras.preprocessing.image_dataset_from_directory()


    return train_data_gen, valid_data_gen


# 创建模型
def createModel(labels):
    model = Sequential([
        Conv2D(96, kernel_size=11, strides=4, input_shape=[227, 227, 3], padding='valid'),  # 输出特征图像大小为 54X54X96
        ReLU(),
        MaxPooling2D(pool_size=3, strides=2),  # 输出特征图像大小为 26X26X96
        BatchNormalization(),

        Conv2D(256, kernel_size=5, strides=1, padding='same'),  # 输出特征图像大小为 22X22X256
        ReLU(),
        MaxPooling2D(pool_size=3, strides=2),
        BatchNormalization(),

        Conv2D(384, kernel_size=3, strides=1, padding='same'),  # 输出特征图像大小为 8X8X256
        ReLU(),

        Conv2D(384, kernel_size=3, strides=1, padding='same'),  # 输出特征图像大小为 8X8X256
        ReLU(),

        Conv2D(256, kernel_size=3, strides=1, padding='same'),  # 输出特征图像大小为 6X6X256
        ReLU(),
        MaxPooling2D(pool_size=3, strides=2),

        Flatten(),

        # Colab需要设置为4096 才能得到高精确率
        # Dense(4096, activation='relu'),
        # Dropout(0.5),
        # Dense(4096, activation='relu'),
        # Dropout(0.5),
        # Dense(labels, activation='softmax')

        # 为了方便笔记本运行，额外缩减了神经网络训练规模
        Dense(768, activation='relu'),
        Dropout(0.5),
        Dense(256, activation='relu'),
        Dropout(0.5),
        Dense(labels, activation='softmax')
    ], name='AlexNet')
    return model


# 训练模型
def trainingModel(model, train_data_gen, training_epoches):
    model.compile(loss="categorical_crossentropy",
                  optimizer=tf.optimizers.SGD(0.001),  # 步长要设置0.0000002以下
                  metrics=['accuracy'])

    # steps_per_epoch = train_data_gen.n // train_data_gen.batch_size
    from math import ceil
    steps_per_epoch = ceil(train_data_gen.n / train_data_gen.batch_size)

    history = model.fit(train_data_gen, steps_per_epoch=steps_per_epoch,
                        epochs=training_epoches, verbose=1)

    return history


# 获得保存文件的路径
def getSavePath():
    path = './data/alnex_model.h5'
    return path


# 获得本地
def getLocalPath(reload=False):
    import os
    if reload == True:
        return 0

    where = 1
    path = './data'        # 如果本地不存在data子目录，就创建
    if os.path.exists(path) == False:
        os.makedirs(path)
        print(f'Make Dir {path}')


# # # # # # # # # # # # # # # # # # # # # # # # # #
def main():
    # Step 1: 超参数设置
    batch_size = 32    # 每批训练的样本数量
    training_epoches = 5  # Colab需要修改
    load_weights = False  # 是否读取已训练权重
    validation_split = 0.2
    labels = 5  # 设定类别个数为5个

    # Step 2: 加载数据，划分数据
    getLocalPath()  # 创建路径

    training_epoches = 2  # 本地机
    validation_split = 0.9  ##本地抽取验证集比例
    load_weights = False

    np.random.seed(1000)  # 随机种子
    train_data_gen, valid_data_gen = loadData(batch_size=batch_size, validation_split=validation_split)

    # return

    # Step 3: 生成模型 训练模型
    model = createModel(labels)
    # keras.utils.plot_model(model)
    model.summary()

    # return
    if load_weights == True:  # 加载已训练权重
        path = getSavePath()
        model.load_weights(path)  ##重建完整模型，但不重建，False只能predict， True可以evaluate

    history = trainingModel(model, train_data_gen, training_epoches)
    # return  # 针对Colab在这里，在预测测试数据时，需要先运行以下

    # Step4: 损失下降曲线 与 模型保存
    plt.plot(history.history['loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')

    print('save weights')
    filePath = getSavePath()
    print(filePath)
    model.save_weights(filePath)

    print('model evaluate')
    from math import ceil
    steps = ceil(valid_data_gen.n / valid_data_gen.batch_size)
    model.evaluate(valid_data_gen, steps=steps)


# 判断是否是直接运行，还是被调用
if __name__ == "__main__":
    import time

    time_start = time.time()

    main()  # 调用main函数

    # 计时
    time_end = time.time()
    print('time cost', time_end - time_start, 's')
    plt.show()
