import os

import tensorflow as tf
from tensorflow.python.keras.applications.densenet import DenseNet169, DenseNet121
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, MaxPooling2D, Flatten, \
    Rescaling, BatchNormalization
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.optimizer_v2.adam import Adam

from util import read_data, get_data_new, get_data

AUTOTUNE = tf.data.experimental.AUTOTUNE

if __name__ == '__main__':
    train_ds, val_ds = get_data()

    train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
    val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
    normalization_layer = Rescaling(1. / 255)
    normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
    image_batch, labels_batch = next(iter(normalized_ds))

    # 初始化DenseNet21网络(卷积神经网络的一种)
    mobile_net = DenseNet121(input_shape=(300, 300, 3), include_top=False)
    # 固定参数
    mobile_net.trainable = False

    model = Sequential([
        mobile_net,
        MaxPooling2D(),
        Flatten(),
        Dense(1000, activation='relu'),
        BatchNormalization(),
        Dense(200, activation='relu'),
        BatchNormalization(),
        Dense(2, activation='softmax')])
    adam = Adam()
    # model.compile(optimizer=adam,
    #               loss='categorical_crossentropy',
    #               metrics=['accuracy'])
    # # print(model.summary())
    # # steps_per_epoch = tf.math.ceil(len(all_image_paths) / BATCH_SIZE).numpy()
    #
    # early_stopping = EarlyStopping(
    #     monitor='val_accuracy',
    #     verbose=1,
    #     patience=10,
    #     mode='max',
    #     restore_best_weights=True
    # )
    num_0 = len(os.listdir('original_data/0'))
    num_1 = len(os.listdir('original_data/1'))
    # num_2 = len(os.listdir('original_data/2'))
    # num_3 = len(os.listdir('original_data/3'))
    total = num_0 + num_1
    weight_for_0 = total / num_0 / 2.0
    weight_for_1 = total / num_1 / 2.0
    # weight_for_2 = total / num_2 / 4.0
    # weight_for_3 = total / num_3 / 4.0
    #
    # print(num_0)
    # print(weight_for_0)
    # print(num_1)
    # print(weight_for_1)
    #
    # class_weight = {0: weight_for_0, 1: weight_for_1, 2: weight_for_2, 3: weight_for_3}

    # class_weight = {0: weight_for_0, 1: weight_for_1}
    # print(class_weight)
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( min_lr=0.00001,
                                                     factor=0.2)

    model.compile(optimizer = 'adam',
                  loss = 'sparse_categorical_crossentropy',
                  metrics = ['accuracy'])

    early_stopping = EarlyStopping(
        monitor = 'val_accuracy',
        verbose = 1,
        patience = 40,
        # mode = 'min',
        restore_best_weights = True
    )
    # 迭代次数2000，准确率还可以，耐心等待
    history = model.fit(train_ds, epochs = 2000, callbacks = [early_stopping,reduce_lr],
                        validation_data = val_ds)
    model.save('ResNet50.h5')