"""
Tensorflow 2.x
"""
import tensorflow as tf
import numpy as np
from python_ai.common.xcommon import *
from tensorflow.keras.datasets import cifar10
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks


class ConvBn(keras.Model):

    def __init__(self, filters, ksize, strides=(1, 1), padding='same', **kwargs):
        super().__init__(**kwargs)
        self.conv = layers.Conv2D(filters, ksize, strides, padding)
        self.bn = layers.BatchNormalization()

    def call(self, inputs, training=None):
        x = self.conv(inputs, training=training)
        x = self.bn(x, training=training)
        return x


class ResNetBlock(keras.Model):

    def __init__(self, filters, is_shrink, **kwargs):
        super().__init__(**kwargs)
        self.is_shrink = is_shrink
        if is_shrink:
            strides = (2, 2)
            self.conv03 = ConvBn(filters, (1, 1), (2, 2))
        else:
            strides = (1, 1)
        self.conv01 = ConvBn(filters, (3, 3), strides)
        self.conv02 = ConvBn(filters, (3, 3), (1, 1))

    def call(self, inputs, training=None):
        x = self.conv01(inputs, training=training)
        x = layers.ReLU()(x, training=training)
        x = self.conv02(x, training=training)  # ATTENTION It is forgotten prone.
        if self.is_shrink:
            x2 = self.conv03(inputs, training=training)
        else:
            x2 = inputs
        x += x2
        x = layers.ReLU()(x, training=training)
        return x


class ResNet(keras.Model):

    def __init__(self, filters, n_cls, n_blocks, **kwargs):
        super().__init__(**kwargs)
        self.conv = ConvBn(filters, (3, 3))

        ch = filters
        self.blocks = keras.Sequential()
        for block_id in range(n_blocks):
            for layer_id in range(2):
                if block_id != 0 and layer_id == 0:
                    is_shrink = True
                else:
                    is_shrink = False
                block = ResNetBlock(ch, is_shrink)
                self.blocks.add(block)
            ch *= 2

        self.glbAvg = layers.GlobalAvgPool2D()
        self.flt = layers.Flatten()
        self.fc = layers.Dense(n_cls)

    def call(self, inputs, training=None):
        x = self.conv(inputs, training=training)
        x = layers.ReLU()(x, training=training)  # ATTENTION It is forgotten prone.
        x = self.blocks(x, training=training)
        x = self.glbAvg(x, training=training)
        x = self.flt(x, training=training)
        x = self.fc(x, training=training)
        return x


if '__main__' == __name__:
    tf.random.set_seed(1)
    np.random.seed(1)

    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    print('x_train', np.shape(x_train), x_train.dtype)
    print('y_train', np.shape(y_train), y_train.dtype)
    print('x_test', np.shape(x_test))
    print('y_test', np.shape(y_test))
    x_train = x_train.astype(np.float32) / 255.
    x_test = x_test.astype(np.float32) / 255.
    print('x_train', np.shape(x_train), x_train.dtype)
    print('y_train', np.shape(y_train), y_train.dtype)
    print('x_test', np.shape(x_test))
    print('y_test', np.shape(y_test))

    SHUFFLE_BUFFER = 2000
    BATCH_SIZE = 64
    N_EPOCHS = 2
    ALPHA = 0.001
    VER = 'v2.0'

    N_CLS = len(np.unique(y_train))
    FILE_NAME = os.path.basename(__file__)
    LOG_DIR = os.path.join('_log', FILE_NAME, VER)

    ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\
        .shuffle(SHUFFLE_BUFFER)\
        .batch(batch_size=BATCH_SIZE, drop_remainder=True)\
        .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    ds_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\
        .shuffle(SHUFFLE_BUFFER)\
        .batch(batch_size=BATCH_SIZE, drop_remainder=True)\
        .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    model = ResNet(64, N_CLS, 4)
    model.build(input_shape=(None, 32, 32, 3))
    model.summary()
    model.compile(
        loss=losses.SparseCategoricalCrossentropy(from_logits=True),
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        metrics=metrics.sparse_categorical_accuracy
    )

    model.fit(ds, validation_data=ds_test,
              epochs=N_EPOCHS,
              callbacks=callbacks.TensorBoard(log_dir=LOG_DIR, update_freq='batch', profile_batch=0),
              )

    print('Testing ...')
    model.evaluate(ds_test,)
    print('Tested')
    print('Over')
