import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import optimizers, losses, metrics, activations, layers, callbacks
import numpy as np
import sys
import os


def my_conv(filters, kernel_size, strides, padding='same'):
    return layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding)


class ConvNbRelu(keras.Model):

    def __init__(self, filters, kernel_size, strides, **kwargs):
        super().__init__(**kwargs)
        self.conv = my_conv(filters, kernel_size, strides)
        self.bn = layers.BatchNormalization()
        self.relu = layers.ReLU()

    def call(self, x, training=None):
        x = self.conv(x, training=training)
        x = self.bn(x, training=training)
        x = self.relu(x, training=training)
        return x


class InceptionNetBlock(keras.Model):

    def __init__(self, filters, strides, **kwargs):
        super().__init__(**kwargs)
        self.filters = filters

        self.conv1 = ConvNbRelu(filters * 2, [1, 1], strides)
        self.conv2 = ConvNbRelu(filters * 4, [3, 3], strides)
        self.conv3 = ConvNbRelu(filters, [5, 5], strides)
        self.pool4 = layers.MaxPool2D([3, 3], strides, 'same')
        self.conv4 = ConvNbRelu(filters, [1, 1], (1, 1))

    def call(self, x, training=None):
        x1 = self.conv1(x, training=training)
        x2 = self.conv2(x, training=training)
        x3 = self.conv3(x, training=training)
        x4 = self.pool4(x, training=training)
        x4 = self.conv4(x4, training=training)
        x = tf.concat([x1, x2, x3, x4], axis=3)
        return x


class InceptionNet(keras.Model):

    def __init__(self, n_cls, init_filters, n_block_units, **kwargs):
        super().__init__(**kwargs)
        self.block_seq = keras.Sequential()
        filters = None
        for block_unit_id in range(n_block_units):
            for layer_id in range(2):
                if 0 == layer_id:
                    strides = (2, 2)
                    if filters is None:
                        filters = init_filters
                    else:
                        filters *= 2
                else:
                    strides = (1, 1)
                print(f'strides: {strides}, filters: {filters}')
                self.block_seq.add(InceptionNetBlock(filters, strides))
        self.glb_avg = layers.GlobalAvgPool2D()
        self.flt = layers.Flatten()
        self.fc1 = layers.Dense(200, activation=activations.relu)
        self.dp = layers.Dropout(0.3)
        self.fc2 = layers.Dense(n_cls, activation=None)

    def call(self, x, training=None):
        x = self.block_seq(x, training=training)
        x = self.glb_avg(x, training=training)
        x = self.flt(x, training=training)
        x = self.fc1(x, training=training)
        x = self.dp(x, training=training)
        x = self.fc2(x, training=training)
        return x


if '__main__' == __name__:

    np.random.seed(777)
    tf.random.set_seed(777)

    # load data
    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
    print('x_train', x_train.shape)
    print('y_train', y_train.shape)
    print('x_test', x_test.shape)
    print('y_test', y_test.shape)

    # params
    VER = 'v5.0'
    ALPHA = 0.001
    SELECT_RATE = 0.05  # 因数据集较大，为了快速演示，只按比例随机取一部分，正式代码可以将这个定为1.0
    N_CLS = len(np.unique(y_train))
    N_BATCH_SIZE = 64
    N_EPOCHS = 6
    FILENAME = os.path.basename(__file__)
    SPEC = VER + '_' + str(ALPHA) + '_' + str(SELECT_RATE) + '_' + str(N_BATCH_SIZE) + '_' + str(N_EPOCHS)
    print(SPEC)
    SAVE_DIR = os.path.join('_save', FILENAME, SPEC)
    SAVE_PREFIX = os.path.join(SAVE_DIR, 'weights')
    LOG_DIR = os.path.join('_log', FILENAME, SPEC)

    # one hot
    y_train = np.eye(N_CLS)[y_train]
    y_test = np.eye(N_CLS)[y_test]
    print('y_train', y_train.shape)
    print('y_test', y_test.shape)

    # select data
    # train
    M_TRAIN_ORI, PIC_H, PIC_W = x_train.shape
    M_TRAIN = int(np.ceil(M_TRAIN_ORI * SELECT_RATE))
    idx = np.random.permutation(M_TRAIN_ORI)[:M_TRAIN]
    x_train = x_train[idx]
    y_train = y_train[idx]
    # test
    M_TEST_ORI, PIC_H, PIC_W = x_test.shape
    M_TEST = int(np.ceil(M_TEST_ORI * SELECT_RATE))
    idx = np.random.permutation(M_TEST_ORI)[:M_TEST]
    x_test = x_test[idx]
    y_test = y_test[idx]

    # process data
    x_train = x_train.astype(np.float32) / 255.0
    x_test = x_test.astype(np.float32) / 255.0
    x_train = x_train.reshape((-1, 28, 28, 1))
    x_test = x_test.reshape((-1, 28, 28, 1))

    # check again
    print('x_train', x_train.shape)
    print('y_train', y_train.shape)
    print('x_test', x_test.shape)
    print('y_test', y_test.shape)

    # model
    model = InceptionNet(N_CLS, 16, 2)
    model.build(input_shape=(None, 28, 28, 1))
    model.summary()
    # model.compile(
    #     optimizer=optimizers.Adam(learning_rate=ALPHA),
    #     loss=losses.SparseCategoricalCrossentropy(from_logits=True),
    #     metrics=[metrics.sparse_categorical_accuracy]
    # )
    optim = tf.optimizers.Adam(learning_rate=ALPHA)


    def criterion(h, y):
        h = tf.nn.softmax(h)
        eps = 1e-5
        return - tf.reduce_mean(
                        tf.reduce_sum(
                            y * tf.math.log(h + eps), 1
                        )
                    )


    def acc(h, y):
        return tf.reduce_mean(
            tf.cast(
                tf.equal(
                    tf.argmax(h, 1),
                    tf.argmax(y, 1)
                ),
                tf.float32
            )
        )


    # train
    if os.path.exists(SAVE_DIR):
        print('Loading...')
        model.load_weights(SAVE_PREFIX)
        print('Loaded')
    else:
        # tb_callback = callbacks.TensorBoard(log_dir=LOG_DIR,
        #                                     update_freq='batch',
        #                                     profile_batch=0)
        # model.fit(x_train, y_train,
        #           batch_size=N_BATCH_SIZE,
        #           epochs=N_EPOCHS,
        #           callbacks=[tb_callback],
        #           validation_data=(x_test, y_test),
        #           validation_batch_size=N_BATCH_SIZE)

        fw = tf.summary.create_file_writer(LOG_DIR)
        g_step = -1
        for epoch in range(N_EPOCHS):
            N_BATCHES = int(np.ceil(M_TRAIN / N_BATCH_SIZE))
            GROUP = int(np.ceil(N_BATCHES / 10))
            for i in range(N_BATCHES):
                g_step += 1
                if 0 == g_step:
                    with fw.as_default():
                        tf.summary.trace_on()
                bx = x_train[i*N_BATCH_SIZE:(i + 1)*N_BATCH_SIZE]
                by = y_train[i*N_BATCH_SIZE:(i + 1)*N_BATCH_SIZE]
                with tf.GradientTape() as tape:
                    h = model(bx)
                    cost = criterion(h, by)
                if 0 == g_step:
                    with fw.as_default():
                        tf.summary.trace_export('graph', g_step)
                grads = tape.gradient(cost, model.trainable_variables)
                optim.apply_gradients(zip(grads, model.trainable_variables))
                cost = cost.numpy()
                accv = acc(h, by).numpy()
                with fw.as_default():
                    tf.summary.scalar('cost', cost, g_step)
                    tf.summary.scalar('acc', accv, g_step)
                if i % GROUP == 0:
                    print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
            if i % GROUP == 0:
                print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
            h = model(x_test)
            cost = criterion(h, y_test).numpy()
            accv = acc(h, y_test).numpy()
            with fw.as_default():
                tf.summary.scalar('val_cost', cost, g_step)
                tf.summary.scalar('val_acc', accv, g_step)
            print(f'epoch#{epoch + 1}: test cost = {cost}, test acc = {accv}')
        fw.close()

        model.save_weights(SAVE_PREFIX)
        print('Saved')

    # test
    print('Testing...')
    h = model(x_test)
    cost = criterion(h, y_test).numpy()
    accv = acc(h, y_test).numpy()
    print(f'test cost = {cost}, test acc = {accv}')
    print('Tested')
