import os

import keras.metrics
from keras import Sequential
from keras_preprocessing.image import ImageDataGenerator

from tensorflow.keras.layers import Convolution2D, BatchNormalization, Activation, MaxPooling2D, Dropout, Flatten, Dense
import tensorflow as tf




class Model(object):
    def build_model(self):
        self.model = Sequential()
        self.model.add(
            Convolution2D(
                filters=32,
                kernel_size=(5, 5),
                padding='same',
                input_shape=(150, 150, 3))
        )
        self.model.add( BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(
            MaxPooling2D(
                pool_size=(2, 2),
                strides=(2, 2),
                padding='same')
        )


        self.model.add(Convolution2D(filters=64, kernel_size=(5, 5), padding='same'))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.15))


        self.model.add(Convolution2D(filters=64, kernel_size=(5, 5), padding='same'))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
        self.model.add(Dropout(0.15))


        self.model.add(Flatten())
        self.model.add(Dense(512))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))

        self.model.add(Dense(128))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))

        self.model.add(Dense(2))
        self.model.add(BatchNormalization())
        self.model.add(Activation('softmax'))
        self.model.summary()
        return self.model



model = Model()
model = model.build_model()
model.load_weights("Pre-VGG.h5")

validation_image_generator = ImageDataGenerator(rescale=1. / 255,
                                                validation_split=0.3)

image_path = os.path.join("train")
validation_dir = os.path.join(image_path, "val")
batchsize =  64
train = validation_image_generator.flow_from_directory(directory=validation_dir,
                                                              batch_size=batchsize,
                                                              shuffle=False,
                                                              target_size=(150, 150),
                                                              class_mode='categorical',
                                                              subset='training'
                                                            )
val_data_gen = validation_image_generator.flow_from_directory(directory=validation_dir,
                                                              batch_size=batchsize,
                                                              shuffle=False,
                                                              target_size=(150, 150),
                                                              class_mode='categorical',
                                                              subset="validation",
                                                            )
total_val = val_data_gen.n
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0005),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
              metrics=["accuracy",keras.metrics.Precision(), keras.metrics.Recall()])
# model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
#               loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
#               metrics=["accuracy"])
# 评估模型,不输出预测结果
loss, accuracy,precision,recall = model.evaluate(val_data_gen)
print('\ntest loss', loss)
print('accuracy', accuracy)
print('precision',precision)
print('recall',recall)
print('F-measure',(2*recall*precision)/(recall + precision))