# 针对 五类花卉数据集 采用VGG-16架构，预训练网络
# 本代码做了封装, 可同时运行在本地与Colab
# 数据集采用image_generator.flow_from_directory
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, ReLU, GlobalAveragePooling2D
from tensorflow.keras.layers import BatchNormalization
import pathlib
import random
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import plot_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator

print(tf.executing_eagerly())
print(tf.__version__)


# 下载数据集
def downloadDataset(image_path):
    os.makedirs(image_path)
    data_dir = tf.keras.utils.get_file(fname=image_path,
                                       origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
                                       untar=True)
    # data_dir = pathlib.Path(data_dir)
    #
    # label_names = {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}
    # label_key = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
    #
    # all_images = list(data_dir.glob('*/*'))


# 加载数据函数  ImageDataGenerator
def loadData(where, batch_size, validation_split):
    # AUTOTUNE = tf.data.experimental.AUTOTUNE
    if where == 2:
        image_path = '/content/drive/MyDrive/tf/data/flower_photos/'
    else:
        image_path = "F:/python/dataset/flower_photos/"  # 本地电脑先建好这个目录

    if not os.path.exists(image_path):
        downloadDataset(image_path)

    im_height = 224
    im_width = 224
    image_generator = ImageDataGenerator(rescale=1. / 255., validation_split=validation_split)

    # 训练集数据生成器，one-hot编码，打乱数据
    train_data_gen = image_generator.flow_from_directory(directory=image_path,
                                                         batch_size=batch_size,
                                                         shuffle=True,
                                                         target_size=(im_height, im_width),
                                                         class_mode='categorical',
                                                         subset='training')
    # 测试集数据生成器，one-hot编码
    valid_data_gen = image_generator.flow_from_directory(directory=image_path,
                                                         batch_size=batch_size,
                                                         shuffle=False,
                                                         target_size=(im_height, im_width),
                                                         class_mode='categorical',
                                                         subset='validation')
    input_shape = (im_height, im_width, 3)
    return train_data_gen, valid_data_gen#, input_shape


# 创建模型
def createModel(labels):
    # 方法1：复合构造方法， 方法没问题
    # VGG16预训练网络
    # inputs = tf.keras.Input(shape=(224, 224, 3), name='Inputs')
    #
    # covn_base = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=False,
    #                                               input_shape = (224, 224, 3))
    # covn_base.trainable = True
    # for layers in covn_base.layers[:-4]:
    #     layers.trainable = False
    #
    # x = covn_base(inputs)
    #
    # covn_base.summary()
    #
    # model1 = Sequential([
    #     Flatten(),
    #     Dense(2048, activation='relu'),
    #     Dropout(0.5),
    #     Dense(labels, activation='softmax'),
    # ])
    # outputs = model1(x)
    #
    # model = tf.keras.models.Model(inputs = inputs, outputs=outputs, name='VGG-16')
    #
    # return model



    #方法2： VGG16预训练网络
    covn_base = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=False,
                                                  input_shape=(224, 224, 3))
    # tf.keras.applications.mobilenet

    covn_base.trainable = True

    for layers in covn_base.layers[:-4]:
        layers.trainable = False

    covn_base.summary()

    model = Sequential([
        covn_base,
        # tf.keras.layers.GlobalAveragePooling2D(),
        Flatten(),
        Dense(2048, activation='relu'),
        Dropout(0.5),
        Dense(256, activation='relu'),
        Dropout(0.5),
        Dense(labels, activation='softmax'),
    ], name='VGG-16')
    return model


# 训练模型
def trainingModel(model, train_data_gen, training_epoches):
    model.compile(loss="categorical_crossentropy",
                  optimizer=tf.optimizers.SGD(0.001),  # 步长要设置0.0001以下
                  metrics=['accuracy'])

    steps_per_epoch = train_data_gen.n // train_data_gen.batch_size
    history = model.fit(train_data_gen, steps_per_epoch=steps_per_epoch,
                        epochs=training_epoches, verbose=1)
    return history


# 获得保存文件的路径
def getSavePath(where):
    if where == 2:  # 云盘
        path = '/content/drive/MyDrive/tf/course9_3_model.h5'
    else:  # 本地
        path = '../data/course9_3_model.h5'
    return path


# 获得本地与云盘的判断
def getLocalOrCloud(reload=False):
    dirs = '/content'
    import os
    if reload == True:
        return 0
    if os.path.exists(dirs):
        where = 2
    else:
        where = 1
        path = '../data'
        if os.path.exists(path) == False:
            os.makedirs(path)
            print(f'Make Dir {path}')
    return where



# # # # # # # # # # # # # # # # # # # # # # # # # #
def main():
    # 0. 超参数
    batch_size = 32  # 超参数
    training_epoches = 30  # Colab需要修改
    load_weights = False  # 是否读取已训练权重
    validation_split = 0.3
    labels = 5   # 类别
    # 1. 加载数据，划分数据
    # where = 1  # 0-要下载，1-本地数据，2-云盘数据
    where = getLocalOrCloud(False)  # 获得路径

    if where != 2:   # 本地机的配置
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # 本地机只能cpu加载
        training_epoches = 2                      # 本地机
        validation_split = 0.9  ##本地抽取验证集比例
        load_weights = False

    # 2. 加载数据，划分数据
    np.random.seed(1000)  # 随机种子
    train_data_gen, valid_data_gen = loadData(where=where, batch_size=batch_size, validation_split=validation_split)

    # return

    # 3. 生成模型 训练模型
    model = createModel(labels)
    # model = createModel2(labels, )
    model.summary()
    # plot_model(model, to_file='aaa.png')

    # return  # 提前中断
    if load_weights == True:    # 加载已训练权重
        path = getSavePath(where=where)
        model.load_weights(path)  ##重建完整模型，但不重建，False只能predict， True可以evaluate


    history = trainingModel(model, train_data_gen, training_epoches)
    # return  # 提前中断

    # 4. 损失下降曲线
    plt.plot(history.history['loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')

    print('save weights')
    filePath = getSavePath(where)
    print(filePath)
    model.save_weights(filePath)

    print('model evaluate')
    steps = valid_data_gen.n // valid_data_gen.batch_size
    model.evaluate(valid_data_gen, steps=steps)


# 判断是否是直接运行，还是被调用
if __name__ == "__main__":
    import time

    time_start = time.time()

    main()  # 调用main函数

    # 计时
    time_end = time.time()
    print('time cost', time_end - time_start, 's')
    plt.show()
