import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, optimizers, losses, metrics, activations, callbacks
import matplotlib.pyplot as plt

np.random.seed(777)
tf.random.set_seed(777)
filename = os.path.basename(__file__)

batch_size = 128
n_epoch = 20
alpha = 0.001

# 1.	按照要求，使用cifar2数据集完成cnn处理
# (1)	数据管道处理
# ①	使用数据管道获取文件
# ②	并按照文件名称进行分类
# ③	将训练集和测试集数据进行缓存处理
dir_base = '../../../../large_data/DL2/_many_files/cifar2/'
dir_train = dir_base + 'train/'
dir_test = dir_base + 'test/'
dir_train_auto = dir_train + 'automobile'
dir_train_plane = dir_train + 'airplane'
dir_test_auto = dir_test + 'automobile'
dir_test_plane = dir_test + 'airplane'


def my_read_dir(dir):
    data = []
    for basename in os.listdir(dir):
        path = os.path.join(dir, basename)
        data.append(plt.imread(path))
    data = np.array(data, dtype=np.float32)
    data /= 255.0
    return data


print('Loading ...')
x_train_auto = my_read_dir(dir_train_auto)
x_train_plane = my_read_dir(dir_train_plane)
x_test_auto = my_read_dir(dir_test_auto)
x_test_plane = my_read_dir(dir_test_plane)
print('Loaded.')
print('x_train_auto', x_train_auto.shape)
print('x_train_plane', x_train_plane.shape)
print('x_test_auto', x_test_auto.shape)
print('x_test_plane', x_test_plane.shape)
y_train_auto = np.zeros([len(x_train_auto), 1])
y_train_plane = np.ones([len(x_train_plane), 1])
y_test_auto = np.zeros([len(x_test_auto), 1])
y_test_plane = np.ones([len(x_test_plane), 1])
print('y_train_auto', y_train_auto.shape)
print('y_train_plane', y_train_plane.shape)
print('y_test_auto', y_test_auto.shape)
print('y_test_plane', y_test_plane.shape)

x_train = np.r_[x_train_auto, x_train_plane]
y_train = np.r_[y_train_auto, y_train_plane]
a = np.random.permutation(len(x_train))
x_train = x_train[a]
y_train = y_train[a]
x_test = np.r_[x_test_auto, x_test_plane]
y_test = np.r_[y_test_auto, y_test_plane]

# ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\
#     .shuffle(len(x_train))\
#     .batch(batch_size)\
#     .prefetch(tf.data.experimental.AUTOTUNE)
# ds_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\
#     .shuffle(len(x_test))\
#     .batch(batch_size)\
#     .prefetch(tf.data.experimental.AUTOTUNE)

# (2)	模型处理
# ①	添加一个3*3卷积，输出32通道，使用最大池化处理
# ②	添加一个5*5卷积，输出64通道，使用最大池化处理
# ③	按照0.1比例失活处理后，将数据展平（全连接）
# ④	做全连接处理，使用二分类处理
model = tf.keras.Sequential([
    layers.Conv2D(32, (3, 3), strides=1, padding='valid'),
    layers.BatchNormalization(),
    layers.Activation(activations.relu),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),
    layers.Conv2D(64, (5, 5), strides=1, padding='valid'),
    layers.BatchNormalization(),
    layers.Activation(activations.relu),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),
    layers.Dropout(0.1),
    layers.Flatten(),
    layers.Dense(2, activation=activations.sigmoid)
])

# ⑤	编译模型，使用adam优化
model.compile(
    optimizer=optimizers.Adam(learning_rate=alpha),
    loss=losses.sparse_categorical_crossentropy,
    metrics=[metrics.sparse_categorical_accuracy]
)

# ⑥	训练模型
ver = 'v2.1'
logdir = os.path.join('_log', filename, ver)
print(logdir)
tb_callback = callbacks.TensorBoard(log_dir=logdir, update_freq='batch')


class CustomStopper(callbacks.EarlyStopping):
    def __init__(self, monitor_min_value, **kwargs): # add argument for starting epoch
        super(CustomStopper, self).__init__(**kwargs)
        self.monitor_min_value = monitor_min_value

    def on_epoch_end(self, epoch, logs=None):
        current = self.get_monitor_value(logs)
        if current > self.monitor_min_value:
            super().on_epoch_end(epoch, logs)


early_stop = CustomStopper(monitor='val_sparse_categorical_accuracy',
                           min_delta=1e-2, patience=2, verbose=1,
                           baseline=0.75, monitor_min_value=0.75)
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=n_epoch,
          verbose=1,
          callbacks=[tb_callback, early_stop],
          validation_split=0.1)

# ⑦	使用测试集作为验证集，打印最终结果
print('使用测试集作为验证集，打印最终结果')
model.evaluate(x_test, y_test, verbose=1)
