import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import optimizers, losses, metrics, activations, layers, callbacks
import numpy as np
import sys
import os


def my_conv(filters, kernel_size, strides, padding='same'):
    return layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding)


class ConvNbRelu(keras.Model):

    def __init__(self, filters, kernel_size, strides, **kwargs):
        super().__init__(**kwargs)
        self.conv = my_conv(filters, kernel_size, strides)
        self.bn = layers.BatchNormalization()
        self.relu = layers.ReLU()

    def call(self, x, training=None):
        x = self.conv(x, training=training)
        x = self.bn(x, training=training)
        x = self.relu(x, training=training)
        return x


class InceptionNetBlock(keras.Model):

    def __init__(self, filters, strides, **kwargs):
        super().__init__(**kwargs)
        self.filters = filters

        self.conv1 = ConvNbRelu(filters * 2, [1, 1], strides)
        self.conv2 = ConvNbRelu(filters * 4, [3, 3], strides)
        self.conv3 = ConvNbRelu(filters, [5, 5], strides)
        self.pool4 = layers.MaxPool2D([3, 3], strides, 'same')
        self.conv4 = ConvNbRelu(filters, [1, 1], (1, 1))

    def call(self, x, training=None):
        x1 = self.conv1(x, training=training)
        x2 = self.conv2(x, training=training)
        x3 = self.conv3(x, training=training)
        x4 = self.pool4(x, training=training)
        x4 = self.conv4(x4, training=training)
        x = tf.concat([x1, x2, x3, x4], axis=3)
        return x


class InceptionNet(keras.Model):

    def __init__(self, n_cls, init_filters, n_block_units, **kwargs):
        super().__init__(**kwargs)
        self.block_seq = keras.Sequential()
        filters = None
        for block_unit_id in range(n_block_units):
            for layer_id in range(2):
                if 0 == layer_id:
                    strides = (2, 2)
                    if filters is None:
                        filters = init_filters
                    else:
                        filters *= 2
                else:
                    strides = (1, 1)
                print(f'strides: {strides}, filters: {filters}')
                self.block_seq.add(InceptionNetBlock(filters, strides))
        self.glb_avg = layers.GlobalAvgPool2D()
        self.flt = layers.Flatten()
        self.fc1 = layers.Dense(200, activation=activations.relu)
        self.dp = layers.Dropout(0.3)
        self.fc2 = layers.Dense(n_cls, activation=None)

    def call(self, x, training=None):
        x = self.block_seq(x, training=training)
        x = self.glb_avg(x, training=training)
        x = self.flt(x, training=training)
        x = self.fc1(x, training=training)
        x = self.dp(x, training=training)
        x = self.fc2(x, training=training)
        return x


if '__main__' == __name__:

    np.random.seed(777)
    tf.random.set_seed(777)

    # load data
    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
    print('x_train', x_train.shape)
    print('y_train', y_train.shape)
    print('x_test', x_test.shape)
    print('y_test', y_test.shape)

    # params
    VER = 'v5.0'
    ALPHA = 0.01
    SELECT_RATE = 0.05  # 因数据集较大，为了快速演示，只按比例随机取一部分，正式代码可以将这个定为1.0
    N_CLS = len(np.unique(y_train))
    N_BATCH_SIZE = 64
    N_EPOCHS = 6
    FILENAME = os.path.basename(__file__)
    SPEC = VER + '_' + str(ALPHA) + '_' + str(SELECT_RATE) + '_' + str(N_BATCH_SIZE) + '_' + str(N_EPOCHS)
    print(SPEC)
    SAVE_DIR = os.path.join('_save', FILENAME, SPEC)
    SAVE_PREFIX = os.path.join(SAVE_DIR, 'weights')
    LOG_DIR = os.path.join('_log', FILENAME, SPEC)

    # select data
    # train
    M_TRAIN_ORI, PIC_H, PIC_W = x_train.shape
    M_TRAIN = int(np.ceil(M_TRAIN_ORI * SELECT_RATE))
    idx = np.random.permutation(M_TRAIN_ORI)[:M_TRAIN]
    x_train = x_train[idx]
    y_train = y_train[idx]
    # test
    M_TEST_ORI, PIC_H, PIC_W = x_test.shape
    M_TEST = int(np.ceil(M_TEST_ORI * SELECT_RATE))
    idx = np.random.permutation(M_TEST_ORI)[:M_TEST]
    x_test = x_test[idx]
    y_test = y_test[idx]

    # process data
    x_train = x_train.astype(np.float32) / 255.0
    x_test = x_test.astype(np.float32) / 255.0
    x_train = x_train.reshape((-1, 28, 28, 1))
    x_test = x_test.reshape((-1, 28, 28, 1))

    # check again
    print('x_train', x_train.shape)
    print('y_train', y_train.shape)
    print('x_test', x_test.shape)
    print('y_test', y_test.shape)

    # model
    model = InceptionNet(N_CLS, 16, 2)
    model.build(input_shape=(None, 28, 28, 1))
    model.summary()
    model.compile(
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        loss=losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[metrics.sparse_categorical_accuracy]
    )

    # train
    if os.path.exists(SAVE_DIR):
        print('Loading...')
        model.load_weights(SAVE_PREFIX)
        print('Loaded')
    else:
        tb_callback = callbacks.TensorBoard(log_dir=LOG_DIR,
                                            update_freq='batch',
                                            profile_batch=0)
        model.fit(x_train, y_train,
                  batch_size=N_BATCH_SIZE,
                  epochs=N_EPOCHS,
                  callbacks=[tb_callback],
                  validation_data=(x_test, y_test),
                  validation_batch_size=N_BATCH_SIZE)
        model.save_weights(SAVE_PREFIX)
        print('Saved')

    # test
    print('Testing...')
    model.evaluate(x_test, y_test, batch_size=N_BATCH_SIZE)
    print('Tested')

    # predict
    print('Predicting...')
    NUM_OF_PRED = 20
    x_pred = x_test[:NUM_OF_PRED]
    y_pred = y_test[:NUM_OF_PRED]
    h_pred = tf.argmax(model.predict(x_pred), 1).numpy()
    # h2_pred = model.predict_classes(x_pred)  # AttributeError: 'InceptionNet' object has no attribute 'predict_classes'
    print(y_pred)
    print(h_pred)
    # print(h2_pred)

    compare_list = np.c_[y_pred, h_pred, y_pred == h_pred]
    print(compare_list)
    print('Predicted')
