import tensorflow as tf
from tensorflow.keras import layers, losses, optimizers, metrics, activations, models, callbacks
import numpy as np
from sklearn.model_selection import train_test_split
import os
from python_ai.common.xcommon import *


def MyConv(ch, kernel=3, strides=1):
    return layers.Conv2D(ch, kernel, strides=strides, padding='same')  # ATTENTION API tf.random_normal_initializer


class ConvBNRelu(tf.keras.Model):

    def __init__(self, ch, kernels=3, strides=1, **kwargs):
        super().__init__(**kwargs)
        self.my_model = tf.keras.Sequential([
            MyConv(ch, kernels, strides=strides),
            layers.BatchNormalization(),
            layers.ReLU(),
        ])

    def call(self, x, training=None):
        h = self.my_model(x, training=training)
        return h


class InterceptionBlock(tf.keras.Model):

    def __init__(self, ch, strides=1, **kwargs):
        super().__init__(**kwargs)

        self.ch = ch
        self.strides = strides

        self.conv1 = ConvBNRelu(ch, 3, strides)
        self.conv2 = ConvBNRelu(ch, 5, strides)
        self.conv3_1 = ConvBNRelu(ch, 3, strides)
        self.conv3_2 = ConvBNRelu(ch, 1, 1)
        self.pool = layers.MaxPool2D(3, strides=1, padding='same')
        self.pool_conv = ConvBNRelu(ch, 3, strides)

    def call(self, x, training=None):
        x1 = self.conv1(x, training=training)
        x2 = self.conv2(x, training=training)
        x3 = self.conv3_1(x, training=training)
        x3 = self.conv3_2(x3, training=training)
        x4 = self.pool(x)
        x4 = self.pool_conv(x4, training=training)
        x = tf.concat([x1, x2, x3, x4], axis=3)
        return x


class MyInterceptionNet(tf.keras.Model):

    def __init__(self, n_layers, n_cls, init_ch=16, **kwargs):
        super().__init__(**kwargs)

        self.init_ch = init_ch
        self.in_ch = init_ch
        self.out_ch = init_ch
        self.n_layers = n_layers
        self.n_cls = n_cls

        self.conv1 = ConvBNRelu(self.init_ch, 3, 1)

        self.blocks = tf.keras.Sequential(name='my_dynamic_blocks')
        for block_id in range(n_layers):
            for layer_id in range(2):
                if layer_id == 0:
                    block = InterceptionBlock(self.out_ch, strides=2)
                else:
                    block = InterceptionBlock(self.out_ch, strides=1)
                self.blocks.add(block)
            self.out_ch *= 2

        self.avg_pool = layers.GlobalAvgPool2D()
        self.fc = layers.Dense(self.n_cls)

    def call(self, x, training=None):

        out = self.conv1(x, training=training)

        out = self.blocks(out, training=training)
        out = self.avg_pool(out)
        out = self.fc(out)
        return out


if '__main__' == __name__:

    np.random.seed(777)
    tf.random.set_seed(777)
    filename = os.path.basename(__file__)

    ver = 'v2.0'
    alpha = 0.001
    batch_size = 64
    n_epochs = 20
    n_clip = 4
    print(f'n_clip: {n_clip}, n_epochs: {n_epochs}')

    # 1.	按照要求，使用mnist数据集完成cnn处理（每题10分）
    # ①	获取数据
    # 1)	获取数据信息
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)

    # 3)	对数据进行预处理工作
    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)
    x_train /= 255.
    x_test /= 255.
    x_train = x_train.reshape([-1, 28, 28, 1])
    x_test = x_test.reshape([-1, 28, 28, 1])

    # 2)	切分数据集
    x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
                                                      random_state=777, train_size=0.9)

    # select smaller dataset
    m_train, _, _, _ = x_train.shape
    idx = np.arange(m_train)
    x_train = x_train[idx % n_clip == 0]
    y_train = y_train[idx % n_clip == 0]

    model = MyInterceptionNet(2, 10, 16)
    model.compile(
        optimizer=optimizers.Adam(learning_rate=alpha),
        loss=losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=metrics.sparse_categorical_accuracy
    )
    model.build(input_shape=(None, 28, 28, 1))
    model.summary()

    logdir = os.path.join('_log', filename, ver)
    print(logdir)
    tb_callback = callbacks.TensorBoard(log_dir=logdir, update_freq='batch', profile_batch=0)


    class MyEarlyStopping(callbacks.EarlyStopping):
        """
        https://stackoverflow.com/questions/46287403/is-there-a-way-to-implement-early-stopping-in-keras-only-after-the-first-say-1
        """
        def __init__(self, monitor_min_val, **kwargs):
            super().__init__(**kwargs)
            self.monitor_min_val = monitor_min_val

        def on_epoch_end(self, epoch, logs=None):
            current = self.get_monitor_value(logs)
            if current > self.monitor_min_val:
                super().on_epoch_end(epoch, logs)


    early_stop = MyEarlyStopping(monitor_min_val=0.75,
                                 monitor='val_sparse_categorical_accuracy',
                                 min_delta=1e-2, patience=2, verbose=1,
                                 restore_best_weights=True)

    model.fit(x_train, y_train, batch_size=batch_size, epochs=n_epochs,
              validation_data=(x_val, y_val), verbose=1,
              callbacks=[tb_callback, early_stop])
    print('TESTING...')
    model.evaluate(x_test, y_test, verbose=1)
