import os
import tensorflow as tf
import numpy as np


class MyLeNet(tf.keras.Model):

    def __init__(self, input_shape, n_cls):
        super(MyLeNet, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(filters=6,
                                         kernel_size=[5, 5],
                                         strides=[1, 1],
                                         padding='valid',
                                         activation=tf.nn.relu,
                                         input_shape=input_shape)

        self.s2 = tf.keras.layers.AvgPool2D(pool_size=[2, 2],
                                            strides=[2, 2],
                                            padding='same')

        self.c3 = tf.keras.layers.Conv2D(filters=16,
                                         kernel_size=[5, 5],
                                         strides=[1, 1],
                                         padding='valid',
                                         activation=tf.nn.relu)

        self.s4 = tf.keras.layers.AvgPool2D(pool_size=[2, 2],
                                            strides=[2, 2],
                                            padding='same')

        self.flat = tf.keras.layers.Flatten()

        self.fc5 = tf.keras.layers.Dense(120, activation=tf.nn.relu)

        self.fc6 = tf.keras.layers.Dense(84, activation=tf.nn.relu)

        self.fc7 = tf.keras.layers.Dense(n_cls, activation=tf.nn.softmax)

    def call(self, inputs, training=None, mask=None):
        o = self.c1(inputs)
        o = self.s2(o)
        o = self.c3(o)
        o = self.s4(o)
        o = self.flat(o)
        o = self.fc5(o)
        o = self.fc6(o)
        o = self.fc7(o)
        return o


if '__main__' == __name__:

    np.random.seed(777)
    tf.random.set_seed(777)

    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

    print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)

    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)

    x_train /= 255.
    x_test /= 255.

    x_train = x_train.reshape(-1, 28, 28, 1)
    x_test = x_test.reshape(-1, 28, 28, 1)

    m_train, _, _, _ = x_train.shape
    print(m_train)
    a = np.random.permutation(m_train)
    x_train = x_train[a]
    y_train = y_train[a]
    m_val = int(np.ceil(0.1 * m_train))
    x_train, x_val = np.split(x_train, [-m_val], axis=0)
    y_train, y_val = np.split(y_train, [-m_val], axis=0)
    print(x_train.shape, y_train.shape, x_val.shape, y_val.shape)

    model = MyLeNet([28, 28, 1], 10)

    ver = 'v1.0'
    alpha = 0.001
    batch_size = 128
    n_epochs = 2
    filename = os.path.basename((__file__))
    logdir = '_log\\' + filename + '\\' + ver
    os.makedirs(logdir, exist_ok=True)

    tb_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir,
                                                 update_freq='batch')
    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_sparse_categorical_accuracy',
                                                  min_delta=1e-5,
                                                  patience=2,
                                                  verbose=1)

    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=alpha),
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        metrics=[tf.keras.metrics.sparse_categorical_accuracy]
    )

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=n_epochs,
              verbose=1,
              validation_data=(x_val, y_val),
              callbacks=[tb_callback, early_stop])

    model.evaluate(x_test, y_test)
