import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
import matplotlib.pyplot as plt


class ConvCell(keras.Model):

    def __init__(self, filters, ksize=(3, 3), strides=(1, 1), padding='valid', **kwargs):
        super().__init__(**kwargs)
        self.seq = keras.Sequential([
            layers.Conv2D(filters, ksize, strides, padding, use_bias=False),
            layers.BatchNormalization(),
            layers.ReLU()
        ])

    def call(self, inputs, training=None, mask=None):
        return self.seq(inputs, training=training)


class Lenet5(keras.Model):

    def __init__(self, n_cls, **kwargs):
        super().__init__(**kwargs)
        self.conv1 = ConvCell(6, (5, 5), (1, 1), 'same')
        self.pool1 = layers.AvgPool2D((2, 2), (2, 2), 'same')
        self.conv2 = ConvCell(16, (5, 5))
        self.pool2 = layers.AvgPool2D((2, 2), (2, 2), 'same')
        self.fc1 = layers.Dense(120, activation=activations.relu)
        self.fc2 = layers.Dense(84, activation=activations.relu)
        self.fc3 = layers.Dense(n_cls, activation=activations.softmax)

    def call(self, inputs, training=None, mask=None):
        x = self.conv1(inputs, training=training)
        x = self.pool1(x, training=training)
        x = self.conv2(x, training=training)
        x = self.pool2(x, training=training)
        x = layers.Flatten()(x)
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return x


if '__main__' == __name__:
    BATCH_SIZE = 128
    N_EPOCH = 2
    ALPHA = 1e-3

    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
    print('x_train', x_train.shape)
    print('y_train', y_train.shape, np.unique(y_train))
    print('x_test', x_test.shape)
    print('y_test', y_test.shape, np.unique(y_test))
    x_train = tf.expand_dims(x_train, axis=3)
    x_test = tf.expand_dims(x_test, axis=3)
    y_train = np.int64(y_train == 5)
    y_test = np.int64(y_test == 5)
    print('x_train', x_train.shape)
    print('y_train', y_train.shape, np.unique(y_train))
    print('x_test', x_test.shape)
    print('y_test', y_test.shape, np.unique(y_test))

    dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
    dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

    model = Lenet5(2)
    model.build(input_shape=(None, *(x_train.shape[1:])))
    model.summary()

    model.compile(
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        loss=losses.sparse_categorical_crossentropy,
        metrics=[metrics.sparse_categorical_accuracy]
    )

    his = model.fit(
        dl_train,
        epochs=N_EPOCH,
    )
    his = his.history

    result = model.evaluate(dl_test)
    print(result)

    pred = model.predict(dl_test)
    pred = pred.argmax(axis=1)
    mat = confusion_matrix(y_test, pred)
    print(mat)
    print('precision_score', precision_score(y_test, pred))
    print('recall_score', recall_score(y_test, pred))
    print('f1_score', f1_score(y_test, pred))

    spr = 1
    spc = 2
    spn = 0
    plt.figure(figsize=[12, 6])
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('Loss')
    plt.plot(his['loss'])
    plt.grid()
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('Accuracy')
    plt.plot(his['sparse_categorical_accuracy'])
    plt.grid()
    plt.show()
