# import warnings
# warnings.filterwarnings('ignore')
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
# tf.compat.v1.logging.set_verbosity(40)

from tensorflow.keras import utils, models, optimizers, losses, metrics
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten

batch_size = 128
nb_output = 10
epochs = 3

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255

y_train = utils.to_categorical(y_train, nb_output)
y_test = utils.to_categorical(y_test, nb_output)

class LeNet5(models.Model):
    def __init__(self, nb_output):
        super (LeNet5, self).__init__()
        self.nb_output = nb_output
        self.conv1 = Conv2D(6, (5, 5), activation='relu')
        self.conv2 = Conv2D(16, (5, 5), activation='relu')
        self.maxpool = MaxPooling2D((2, 2))  # ATTENTION define once, use multiple times

        self.flatten = Flatten()
        self.fc3 = Dense(120, activation='relu')
        self.fc4 = Dense(84, activation='relu')
        self.fc5 = Dense(self.nb_output, activation='softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        x = self.maxpool(x)
        x = self.flatten(x)
        x = self.fc3(x)
        x = self.fc4(x)
        x = self.fc5(x)
        return x

if __name__ == '__main__':

    model = LeNet5(nb_output)

    model.build(input_shape=(None, 28, 28, 1))

    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(0.001),
                  metrics=['accuracy'])

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)

    score = model.evaluate(x_test, y_test)
    print('loss:', score[0])
    print('accuracy:', score[1])

