import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers, activations, optimizers, losses, metrics, callbacks
import os
import sys

np.random.seed(777)
tf.random.set_seed(777)
filename = os.path.basename(__file__)

ver = 'v1.0'
alpha = 0.001
n_epoch = 20
batch_size = 64

# 1.	按照要求，完成ResNet以下处理（每题10分）
# ①	数据处理
# 1)	读取mnist数据集
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

# 2)	对数据切分训练集测试集及相关预处理
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
x_train /= 255.0
x_test /= 255.0
x_train = x_train.reshape((-1, 28, 28, 1))
x_test = x_test.reshape((-1, 28, 28, 1))


# ②	模型处理
# 1)	设置自定义函数，初始设定卷积尺寸3*3	步长1,0补边
def my_conv(ch, kernel=3, strides=1, padding='same'):
    return layers.Conv2D(ch, kernel, strides, padding)


# ③	设置resNet模块（类），
class ResNetBlock(keras.Model):

    # 1)	内部设置两个卷积（3*3），配合批量归一化处理
    # 2)	判断是否跃迁，如果跃迁，设计跃迁卷积1*1
    # 3)	实现正向传播处理
    def __init__(self, output_ch, strides=1, residual=True, **kwargs):
        super().__init__(**kwargs)

        self.output_ch = output_ch
        self.strides = strides
        self.residual = residual

        self.conv1 = my_conv(output_ch, 3, strides)
        self.conv2 = my_conv(output_ch, 3, 1)

    def call(self, input, training=None):
        x = self.conv1(input)
        x = layers.BatchNormalization()(x, training=training)
        x = activations.relu(x)
        x = self.conv2(x)
        x = layers.BatchNormalization()(x, training=training)
        x = activations.relu(x)

        if self.residual:
            r = my_conv(self.output_ch, 1, 1)(input)
            r = layers.AvgPool2D(self.strides, self.strides, 'same')(r)
            x += r
        return x


# ④	Resnet主网络（类）（初始16通道）
# 1)	初始卷积后，生成动态序列
# 2)	按照resnet规则，进行resnet模块生成，（循环流程，每次小循环结束后，通道*2）
# 3)	最后使用平均池化进行分类，并完成正向传播
class Resnet(keras.Model):

    def __init__(self, blocks_spec_list, n_cls, init_ch=16, **kwargs):
        super().__init__(**kwargs)

        self.n_layers = blocks_spec_list
        self.n_cls = n_cls
        self.init_ch = init_ch
        self.output_ch = init_ch

        self.conv1 = my_conv(init_ch, 3, 1)

        self.blocks = keras.Sequential()
        for id_block in range(len(blocks_spec_list)):
            for id_layer in range(blocks_spec_list[id_block]):
                if id_block == 0:
                    residual = True
                elif id_layer == 0:
                    residual = True
                else:
                    residual = False
                if id_layer == 0:
                    strides = 2
                else:
                    strides = 1
                block = ResNetBlock(self.output_ch, strides, residual)
                self.blocks.add(block)

            self.output_ch *= 2

    def call(self, input, training=None):
        x = self.conv1(input)
        x = layers.BatchNormalization()(x, training=training)
        x = activations.relu(x)

        x = self.blocks(x)

        x = layers.GlobalAveragePooling2D()(x)
        x = layers.Dense(self.n_cls, activation=None)(x)
        return x


# 4)	完成模型创建及训练
model = Resnet([2, 2, 2, 2], 10, 16)
model.build((None, 28, 28, 1))
model.summary()
model.compile(
    optimizer=optimizers.Adam(learning_rate=alpha),
    loss=losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=[metrics.sparse_categorical_accuracy]
)
logdir = os.path.join('_log', filename, ver)
tb_callback = callbacks.TensorBoard(log_dir=logdir, update_freq='batch', profile_batch=0)


class MyEarlyStopping(callbacks.EarlyStopping):

    def __init__(self, monitor_min_val, **kwargs):
        super().__init__(**kwargs)
        self.monitor_min_val = monitor_min_val

    def on_epoch_end(self, epoch, logs=None):
        current = self.get_monitor_value(logs)
        if current > self.monitor_min_val:
            super().on_epoch_end(epoch, logs)


early_stopping = MyEarlyStopping(monitor_min_val=0.75,
                                 monitor='val_sparse_categorical_accuracy',
                                 min_delta=1e-2,
                                 patience=2,
                                 verbose=1,
                                 restore_best_weights=True)
model.fit(x_train, y_train,
          batch_size=batch_size, epochs=n_epoch, verbose=1,
          callbacks=[tb_callback, early_stopping],
          validation_split=0.1)
model.evaluate(x_test, y_test, verbose=1)
