import datetime
from pathlib import Path

import tensorflow as tf
from tensorboard import notebook
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
import pandas as pd


BATCH_SIZE = 64


def plot_metric(history, metric):
    train_metrics = history.history[metric]
    val_metrics = history.history['val_' + metric]
    epochs = range(1, len(train_metrics) + 1)
    plt.plot(epochs, train_metrics, 'bo--')
    plt.plot(epochs, val_metrics, 'ro-')
    plt.title('Training and validation ' + metric)
    plt.xlabel("Epochs")
    plt.ylabel(metric)
    plt.legend(["train_" + metric, 'val_' + metric])
    plt.show()


def load_image(path, size=(32, 32)):
    label = tf.constant(1, tf.int8) if tf.strings.regex_full_match(path, ".*Positive.*") \
        else tf.constant(0, tf.int8)
    img = tf.io.read_file(path)
    img = tf.image.decode_jpeg(img)  # 注意此处为jpeg格式
    img = tf.image.resize(img, size) / 255.0
    return img, label


# 使用并行化预处理num_parallel_calls 和预存数据prefetch来提升性能
ds_train = tf.data.Dataset.list_files("train/*/*.jpg") \
    .map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
    .shuffle(buffer_size=1000).batch(BATCH_SIZE) \
    .prefetch(tf.data.experimental.AUTOTUNE)

ds_test = tf.data.Dataset.list_files("test/*/*.jpg") \
    .map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
    .batch(BATCH_SIZE) \
    .prefetch(tf.data.experimental.AUTOTUNE)

# 取得dr_train的第一个batch
# (100, 32, 32, 3) (100,)
# x, y 各有100 图片大小为 32 * 32 * 3
for x, y in ds_train.take(1):
    print(x.shape, y.shape)

tf.keras.backend.clear_session()

inputs = layers.Input(shape=(32, 32, 3))
x = layers.Conv2D(32, kernel_size=(5, 5), activation='relu', padding='same', bias_initializer='zero',
                  use_bias=True)(inputs)
x = layers.MaxPool2D(padding='valid')(x)
x = layers.Conv2D(64, kernel_size=(5, 5), activation='relu', padding='same')(x)
x = layers.MaxPool2D(padding='valid')(x)
x = layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = layers.MaxPool2D(padding='valid')(x)
x = layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = layers.MaxPool2D(padding='valid')(x)
x = layers.Dropout(rate=0.1)(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dense(64, activation='relu')(x)
x = layers.Dense(32, activation='relu')(x)
outputs = layers.Dense(1, activation='sigmoid')(x)

model = models.Model(inputs=inputs, outputs=outputs)
model.summary()

# 训练模型
# 在 Python3 下建议使用 pathlib 修正各操作系统的路径
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = str(Path('model/autograph/' + stamp))

tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)

model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
    loss='binary_crossentropy',
    metrics=["AUC"]
)

history = model.fit(ds_train, epochs=20, validation_data=ds_test,
                    callbacks=[tensorboard_callback], workers=4)

notebook.list()
# 在tensorboard中查看模型
notebook.start("--logdir model/autograph/")

dfhistory = pd.DataFrame(history.history)
dfhistory.index = range(1, len(dfhistory) + 1)
dfhistory.index.name = 'epoch'

print(dfhistory)

plot_metric(history, "loss")
plot_metric(history, "auc")
val_loss, val_accuracy = model.evaluate(ds_test, workers=4)

print(val_loss, val_accuracy)

model.save('model/tf_model_savedmodel', save_format="tf")
print('export saved model.')

# 加载模型
# model_loaded = tf.keras.models.load_model('model/tf_model_savedmodel')
# model_loaded.evaluate(ds_test)
