import os
import tensorflow as tf
import numpy as np
from tensorflow import keras

# In[1]:
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')

#数据装填
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train, x_test = x_train.astype(np.float32) / 255., x_test.astype(np.float32) / 255.
# [b, 28, 28] => [b, 28, 28, 1]
x_train, x_test = np.expand_dims(x_train, axis=3), np.expand_dims(x_test, axis=3)

db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(256)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(256)

print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

#卷积单元模块
# 卷积初始化 卷积3*3 步长1 0补边    批量归一化    relu
class ConvBNRelu(keras.Model):

    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
        super(ConvBNRelu, self).__init__()

        self.model = keras.models.Sequential([
            keras.layers.Conv2D(ch, kernelsz, strides=strides, padding=padding),
            keras.layers.BatchNormalization(),  #批次归一化
            keras.layers.ReLU()  #激活函数relu
        ])

    def call(self, x, training=None):
        x = self.model(x, training=training)

        return x

# inception模块
class InceptionBlk(keras.Model):

    def __init__(self, ch, strides=1):
        super(InceptionBlk, self).__init__()

        self.ch = ch  #channel通道数
        self.strides = strides   #strides步长

        self.conv1 = ConvBNRelu(ch, strides=strides)   #卷积模块1:3*3
        self.conv2 = ConvBNRelu(ch, kernelsz=5, strides=strides)  #卷积模块2: 5*5
        self.conv3_1 = ConvBNRelu(ch, kernelsz=3, strides=strides) #卷积模块3_1:3*3
        self.conv3_2 = ConvBNRelu(ch, kernelsz=1, strides=1) #卷积模块3_2:1*1

        self.pool = keras.layers.MaxPooling2D(3, strides=1, padding='same')  #池化模块:3*3
        self.pool_conv = ConvBNRelu(ch, strides=strides)   #卷积模块pool: 3*3
    #正向传播
    def call(self, x, training=None):
        x1 = self.conv1(x, training=training)

        x2 = self.conv2(x, training=training)

        x3_1 = self.conv3_1(x, training=training)
        x3_2 = self.conv3_2(x3_1, training=training)

        x4 = self.pool(x)
        x4 = self.pool_conv(x4, training=training)

        # concat along axis=channel
        #拼接
        x = tf.concat([x1, x2, x3_2, x4], axis=3)  #在通道方向上堆叠

        return x
#完整的inceptionnet模型
class Inception(keras.Model):
    # num_layers:InceptionBlk数量
    def __init__(self, num_layers, num_classes, init_ch=16, **kwargs):
        super(Inception, self).__init__(**kwargs)

        self.in_channels = init_ch  #init_ch初始的通道数 in_channels输入通道数，out_channels输出通道数
        self.out_channels = init_ch
        self.num_layers = num_layers  #层数
        self.init_ch = init_ch
        #初始的卷积层
        self.conv1 = ConvBNRelu(init_ch)
        # 动态inception模块
        self.blocks = keras.models.Sequential(name='dynamic-blocks')

        for block_id in range(num_layers):

            for layer_id in range(2): #每层加入两个inception模块

                if layer_id == 0:  #第一个inception模块，窗口尺寸减半(strides=2)
                    # 步长越大尺寸越小
                    block = InceptionBlk(self.out_channels, strides=2)

                else:#第二个inception模块，窗口和第一个保持一致(strides=1)
                    block = InceptionBlk(self.out_channels, strides=1)
                #添加incetion模块
                self.blocks.add(block)

            # enlarger out_channels per block
            #每一层结束之后，将通道数加倍
            self.out_channels *= 2
        # 全局平均池化
        self.avg_pool = keras.layers.GlobalAveragePooling2D()  #全局平均池化，间接展开的作用
        self.fc = keras.layers.Dense(num_classes)

    #正向传播
    def call(self, x, training=None):

        out = self.conv1(x, training=training)

        out = self.blocks(out, training=training)
        # 平均池化后，做多分类处理
        out = self.avg_pool(out)
        out = self.fc(out)

        return out


# build model and optimizer
batch_size = 32
epochs = 1
model = Inception(2, 10)
# derive input shape for every layers.
model.build(input_shape=(None, 28, 28, 1))  #指定模型输入
model.summary()

optimizer = keras.optimizers.Adam(learning_rate=1e-3)
criteon = keras.losses.CategoricalCrossentropy(from_logits=True)

acc_meter = keras.metrics.Accuracy()

for epoch in range(epochs):

    for step, (x, y) in enumerate(db_train):

        with tf.GradientTape() as tape:
            # print(x.shape, y.shape)
            # [b, 10]
            logits = model(x)
            # [b] vs [b, 10]
            loss = criteon(tf.one_hot(y, depth=10), logits)

        grads = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        if step % 10 == 0:
            print(epoch, step, 'loss:', loss.numpy())

    acc_meter.reset_states() # 重置准确率

    for x, y in db_test:
        # [b, 10]
        logits = model(x, training=False)
        # [b, 10] => [b]
        pred = tf.argmax(logits, axis=1)
        # [b] vs [b, 10]
        acc_meter.update_state(y, pred) # 更新准确率

    print(epoch, 'evaluation acc:', acc_meter.result().numpy())
