#!/usr/bin/python3
# -*- coding: utf-8 -*-

"""
Created on 2021/11/6
@author: Yuze Xuan
"""

import struct

import numpy as np
from keras.losses import SparseCategoricalCrossentropy
from matplotlib import pyplot as plt
from tensorflow.python.keras.callbacks import TensorBoard
from tensorflow.python.keras.layers import Conv2D, MaxPool2D, Flatten, Dense
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.optimizer_v2.adam import Adam


def load_file(file_path, file_type) -> np.ndarray:
    """
    :param file_path: Data file path.
    :param file_type: data for idx3_ubyte_file, label for idx1_ubyte_file.
    :return: A np.ndarray.
    """
    # Validate parameters
    assert file_type in ('data', 'label'), 'param file_type only support "data" or "label"'
    # Decode header include: magic_number, images_num, rows_num, cols_num
    bin_data = open(file_path, 'rb').read()
    offset = 0
    if file_type == 'data':
        header_fmt = '>iiii'
        magic_number, img_num, rows_num, cols_num = struct.unpack_from(header_fmt, bin_data, offset)
        # Decode dataset
        img_size = rows_num * cols_num
        offset += struct.calcsize(header_fmt)
        img_fmt = f'>{img_size}B'
        data = np.empty((img_num, rows_num, cols_num, 1))
        for i in range(img_num):
            data[i] = np.array(struct.unpack_from(img_fmt, bin_data, offset)).reshape((rows_num, cols_num, 1)) / 255.
            offset += struct.calcsize(img_fmt)
        return data
    else:
        header_fmt = '>ii'
        magic_number, img_num = struct.unpack_from(header_fmt, bin_data, offset)
        # Decode dataset
        offset += struct.calcsize(header_fmt)
        img_fmt = '>B'
        labels = np.empty(img_num)
        for i in range(img_num):
            labels[i] = struct.unpack_from(img_fmt, bin_data, offset)[0]
            offset += struct.calcsize(img_fmt)
        return labels.astype(np.uint8)


if __name__ == '__main__':
    x_train_val = load_file('mnist-data/train-images-idx3-ubyte', 'data')
    # for idx in range(5):
    #     plt.subplot(1, 5, idx+1)
    #     plt.axis('off')
    #     plt.imshow(x_train_val[idx, :, :, :])
    # plt.savefig('res_img/data_visualization.png')
    # plt.show()
    y_train_val = load_file('mnist-data/train-labels-idx1-ubyte', 'label')
    x_test = load_file('mnist-data/t10k-images-idx3-ubyte', 'data')
    y_test = load_file('mnist-data/t10k-labels-idx1-ubyte', 'label')
    model = Sequential([
        # Layer 1: 6 convolution kernels of 5 * 5, same padding
        Conv2D(filters=6, kernel_size=5, activation='relu', input_shape=(28, 28, 1), padding='same'),
        # Layer 2: Maximum pool, 2 * 2 pool core, step size 2
        MaxPool2D(pool_size=2, strides=2),
        # Layer 3: 16 convolution kernels of 5 * 5, same padding
        Conv2D(filters=16, kernel_size=5, activation='relu', padding='same'),
        # Layer 4: Maximum pool, 2 * 2 pool core, step size 2
        MaxPool2D(pool_size=2, strides=2),
        # Layer 5: 120 convolution kernels of 5 * 5, same padding
        Conv2D(filters=120, kernel_size=5, activation='relu', padding='same'),
        # Layer 6: flatten, (28, 28)->(-1, 1); dense, 84 neurons
        Flatten(),
        Dense(84, activation='relu'),
        # Layer 7: output layer, dense, 10 neurons
        Dense(10, activation='softmax')
    ])
    model.summary()
    model.compile(loss=SparseCategoricalCrossentropy(), optimizer=Adam(learning_rate=0.01),
                  metrics=['accuracy'])
    history = model.fit(x_train_val, y_train_val, epochs=5, batch_size=64, validation_split=0.1,
                        callbacks=[TensorBoard()])
    score = model.evaluate(x_test, y_test)
    print('Test Loss: %.4f, Test accuracy: %.4f' % (score[0], score[1]))

    # Visualize the result
    # To run TensorBoard please use: tensorboard --logdir logs
    # Extract the result
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    # Plot the result
    plt.subplot(1, 2, 1)
    plt.plot(acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.title('Training and Validation Accuracy')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    plt.savefig('res_img/train_val_loss_acc.png')
    plt.show()
