"""
1.	使用tensorflow2.0+keras接口完成InceptionNet网络模型（共100分）
【For Tensorflow 2.x】
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, metrics, optimizers, callbacks
import numpy as np
import os
import cv2 as cv
import matplotlib.pyplot as plt

np.random.seed(1)
tf.random.set_seed(1)

VER = 'v4.0'
ALPHA = 1e-4
BATCH_SIZE = 32
N_EPOCHS = 4
SIZE = 32
BASE_DIR, FILE_NAME = os.path.split(__file__)
LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)
IMG_DIR_TRAIN = '../../../../large_data/DL2/_many_files/cifar2/train'
IMG_DIR_TEST = '../../../../large_data/DL2/_many_files/cifar2/test'


# ①	数据集读取
# 1)	定义图片读取函数read_file
def read_file(dir):
    x = []
    for file in os.listdir(dir):
        _, ext = os.path.splitext(file)
        ext = ext.lower()
        if '.jpg' != ext:
            continue
        path = os.path.join(dir, file)
        img = cv.imread(path, cv.IMREAD_COLOR)
        x.append(img)
    x = np.float32(x)
    x = x / 255. * 2. - 1.
    return x


def load_data(dir, map=None):
    yi = 0
    x, y = None, None
    map_this = {}
    for sub_dir in os.listdir(dir):
        sub_dir_path = os.path.join(dir, sub_dir)
        if not os.path.isdir(sub_dir_path):
            continue
        map_this[sub_dir] = yi
        x_sub = read_file(sub_dir_path)
        xlen = len(x_sub)
        if map is None:
            y_sub_i = yi
        else:
            y_sub_i = map[sub_dir]
        y_sub = np.full([xlen], y_sub_i)
        if x is None:
            x = x_sub
            y = y_sub
        else:
            x = np.concatenate([x, x_sub], axis=0)
            y = np.concatenate([y, y_sub], axis=0)
        yi += 1
    return x, y, map_this


# 2)	加载cifar2数据集，将训练集数据放入管道中
print('Loading training data ...')
x_train, y_train, label2idx = load_data(IMG_DIR_TRAIN)
n_cls = len(label2idx)
print('n_cls', n_cls)
print('x_train', x_train.shape)
print('y_train', y_train.shape)
dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1000).batch(BATCH_SIZE, drop_remainder=True) \
    .prefetch(tf.data.experimental.AUTOTUNE)

# 3)	加载cifar2数据集，将测试集数据放入管道中
print('Loading testing data ...')
x_test, y_test, _ = load_data(IMG_DIR_TEST, label2idx)
print('x_test', x_test.shape)
print('y_test', y_test.shape)
dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(1000).batch(BATCH_SIZE) \
    .prefetch(tf.data.experimental.AUTOTUNE)


# ②	模型创建
# 1)	创建卷积单元convCell，卷积核尺寸默认为3，步长默认为1,进行零填充，卷积之后使用批量归一化，激活函数使用relu
def convCell(filters, ksize=(3, 3), strides=(1, 1), padding='same'):
    return keras.Sequential([
        layers.Conv2D(filters, ksize, strides, padding, use_bias=False),
        layers.BatchNormalization(),
        layers.ReLU()
    ])


# 2)	创建InceptionBlock模块，参考下图定义InceptionBlock模块
class InceptionBlock(keras.Model):

    def __init__(self, filters, is_shrink, **kwargs):
        super().__init__(**kwargs)
        if is_shrink:
            strides = (2, 2)
        else:
            strides = (1, 1)
        self.branch01 = convCell(filters, (1, 1), strides)
        self.branch02 = convCell(filters, (1, 1), strides)
        self.branch02_2 = convCell(filters, (3, 3))
        self.branch03 = convCell(filters, (1, 1), strides)
        self.branch03_2 = convCell(filters, (5, 5))
        self.branch04 = layers.MaxPool2D((3, 3), (1, 1), 'same')
        self.branch04_2 = convCell(filters, (1, 1), strides)

    # 3)	进行Inception模块的正向传播
    def call(self, inputs, training=None):
        x1 = self.branch01(inputs, training=training)
        x2 = self.branch02(inputs, training=training)
        x2 = self.branch02_2(x2, training=training)
        x3 = self.branch03(inputs, training=training)
        x3 = self.branch03_2(x3, training=training)
        x4 = self.branch04(inputs, training=training)
        x4 = self.branch04_2(x4, training=training)
        x = tf.concat((x1, x2, x3, x4), axis=3)
        return x


# 4)	创建InceptionNet模型，四个InceptionBlock，第一个和第三个模块通道数翻倍，但是尺寸减半。初始通道数为16，
# 在进行 Inception模块之前，先进行一次3*3的卷积，通道数和图片尺寸均不变。
# 6)	进行正向传播。
inputs = keras.Input((SIZE, SIZE, 3))
ch = 32
x = convCell(ch)(inputs)
x = InceptionBlock(ch, True)(x)
x = InceptionBlock(ch, False)(x)
ch *= 2
x = InceptionBlock(ch, True)(x)
x = InceptionBlock(ch, False)(x)

# 5)	使用平均池化取代全连接
x = layers.GlobalAvgPool2D()(x)
x = layers.Dense(n_cls)(x)

# ③	模型评估（每题8分）
# 1)	创建模型对象
model = keras.Model(inputs, x)
model.summary()

# 2)	进行编译，合理选择优化器
model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.sparse_categorical_crossentropy,
    metrics=[metrics.sparse_categorical_accuracy]
)

# 3)	进行拟合，使用训练集进行训练，使用测试集进行验证，参数自拟
result = model.fit(
    dl_train,
    epochs=N_EPOCHS,
    callbacks=[callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0)],
    validation_data=dl_test
)
his = result.history
print(his)

# 4)	计算测试集的损失值和准确率
print('Testing ...')
model.evaluate(dl_test)

# 5)	绘制训练集与测试集损失值对比图
spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('loss')
plt.plot(his['loss'], label='train')
plt.plot(his['val_loss'], label='val')
plt.grid()
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('accuracy')
plt.plot(his['sparse_categorical_accuracy'], label='train')
plt.plot(his['val_sparse_categorical_accuracy'], label='val')
plt.grid()
plt.legend()

plt.show()
