import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics
import numpy as np
import os
import sys

np.random.seed(777)
tf.random.set_seed(777)

CIFAR2_ROOT_DIR = '../../../../../large_data/DL2/_many_files/cifar2'
CIFAR2_TRAIN_DIR = os.path.join(CIFAR2_ROOT_DIR, 'train')
CIFAR2_TEST_DIR = os.path.join(CIFAR2_ROOT_DIR, 'test')
BATCH_SIZE = 64


# 1.	使用tensorflow2.0完成cifar2数据集处理（自己找文件）（每小题2分）
# (1)	数据预处理
# ①	读取数据集
def load_img(path):
    y = tf.where(tf.strings.regex_full_match(path, r'^.*automobile[\\/][^\\/]+$'), 0, 1)
    x = tf.io.read_file(path)
    x = tf.image.decode_jpeg(x, channels=3)
    x = tf.image.resize(x, (32, 32))
    x = tf.cast(x, tf.float32) / 255.0
    return x, y


# ②	将训练集数据存入管道
ds_train = tf.data.Dataset.list_files(CIFAR2_TRAIN_DIR + '/*/*.jpg')\
    .map(load_img)\
    .shuffle(1000)\
    .batch(batch_size=BATCH_SIZE, drop_remainder=True)\
    .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

# ③	将测试集数据存入管道
ds_test = tf.data.Dataset.list_files(CIFAR2_TEST_DIR + '/*/*.jpg')\
    .map(load_img)\
    .shuffle(1000)\
    .batch(batch_size=BATCH_SIZE, drop_remainder=True)\
    .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

# ④	合理设定超参数
ALPHA = 0.001
N_EPOCH = 6  # 样本数量巨大，为了快速演示，这里设置偏小，正式代码中请调大。


# (2)	跃迁块设置
class ResNetBlock(keras.Model):

    def __init__(self, residual, filters, strides, dropout, **kwargs):
        super().__init__(**kwargs)
        self.residual = residual
        # ①	自定义函数，实现3*3，步长为1卷积处理
        self.conv1 = layers.Conv2D(filters, (3, 3), strides, 'same')
        self.bn1 = layers.BatchNormalization()
        self.relu1 = layers.ReLU()
        self.dp1 = layers.Dropout(dropout)
        self.conv2 = layers.Conv2D(filters, (3, 3), (1, 1), 'same')
        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()
        self.dp2 = layers.Dropout(dropout)
        # ②	创建跃迁类，设定初始化参数
        # ③	设置跃迁处理，合理进行处理
        self.down_conv = layers.Conv2D(filters, (1, 1), strides, 'same')

    # ④	实现正向传播算法
    def call(self, input, training=None):
        # ⑤	通道数量改变情况下，进行跃迁处理
        if self.residual:
            res = self.down_conv(input)
        else:
            res = input
        # ⑥	做两次卷积以后，判断是否做1*1跃迁处理
        x = self.conv1(input)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.dp1(x)
        x = self.conv2(x)
        x = layers.add((x, res))
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.dp2(x)
        return x


# (3)	Resnet网络布置
# ①	创建Resnet类
class ResNet(keras.Model):

    # ②	定义初始化函数，设定参数值
    def __init__(self, init_ch, block_spec_list, **kwargs):
        super().__init__(**kwargs)
        self.blocks = keras.Sequential()
        ch = init_ch
        # ③	进行循环判断，循环两次，通道数量翻倍
        for n_blocks in block_spec_list:
            for layer_id in range(n_blocks):
                if 0 == layer_id:
                    residual = True
                    strides = (2, 2)
                else:
                    residual = False
                    strides = (1, 1)
                block = ResNetBlock(residual, ch, strides, 0.2)
                self.blocks.add(block)
            ch *= 2
        #平均池化
        self.avg_pool = layers.GlobalAvgPool2D()
        self.fc = layers.Dense(1)

    # ④	完成正向传播处理
    def call(self, x, training=None):
        x = self.blocks(x)
        # ⑤	使用平均池化进行分类处理
        x = self.avg_pool(x)
        x = self.fc(x)
        return x


# (4)	主函数处理
if '__main__' == __name__:
    # ①	设置合理参数
    # ②	创建resnet网络（2,2,2）
    model = ResNet(16, (2, 2, 2))
    model.build(input_shape=(None, 32, 32, 3))
    model.summary()

    # ③	编译网络
    model.compile(
        loss=losses.BinaryCrossentropy(from_logits=True),
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        metrics=metrics.binary_accuracy
    )

    # ④	训练训练集数据
    model.fit(ds_train,
              epochs=N_EPOCH,
              validation_data=ds_test
              )

    # ⑤	打印最终得分
    model.evaluate(ds_test)
