import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.keras import layers, datasets, optimizers, losses
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from PokemonData import load_pokemon, load_csv

# 这里的mean和std根据真实的数据计算获得，比如ImageNet
img_mean = tf.constant([0.485, 0.456, 0.406])
img_std = tf.constant([0.229, 0.224, 0.225])
def normalize(x, mean=img_mean, std=img_std):
    # 标准化
    # x: [224, 224, 3]
    # mean: [224, 224, 3], std: [3]
    x = (x - mean)/std
    return x

def denormalize(x, mean=img_mean, std=img_std):
    # 标准化的逆过程
    x = x * std + mean
    return x

def preprocess(images, labels):
    # 数据预处理函数
    x = tf.io.read_file(images) # 根据路径读取图片
    x = tf.image.decode_jpeg(x, channels=3) # 图片解码，忽略透明的通道
    x = tf.image.resize(x, [244, 244]) # 图片缩放为略大于224的244

    # 数据增强工作
    x = tf.image.random_flip_up_down(x) # 上下翻转
    x = tf.image.random_flip_left_right(x) # 左右镜像
    x = tf.image.random_crop(x, [224, 224, 3]) # 随机裁剪为224

    # 转化成张量并压缩到0~1之间
    x = tf.cast(x, dtype=tf.float32) / 255.
    # 0~1 => D(0,1)
    x = normalize(x) # 标准化
    y = tf.convert_to_tensor(labels) # 转化成张量

    return x, y

def main():

    batchsz = 4
    # 创建训练集Dataset对象
    images, labels, table = load_pokemon(r'D:\下载（文档）\pokeman', mode='train')
    db_train = tf.data.Dataset.from_tensor_slices((images, labels))
    db_train = db_train.shuffle(1000).map(preprocess).batch(batchsz)
    # 创建验证集Dataset对象
    images2, labels2, table = load_pokemon(r'D:\下载（文档）\pokeman', mode='val')
    db_val = tf.data.Dataset.from_tensor_slices((images2, labels2))
    db_val = db_val.map(preprocess).batch(batchsz)
    # 创建测试集Dataset对象
    images3, labels3, table = load_pokemon(r'D:\下载（文档）\pokeman', mode='val')
    db_test = tf.data.Dataset.from_tensor_slices((images3, labels3))
    db_test = db_test.map(preprocess).batch(batchsz)

    sample = next(iter(db_train))
    print(sample[0].shape, sample[1].shape, len(images))
    sample1 = next(iter(db_val))
    print(sample1[0].shape, sample1[1].shape, len(images2))
    sample2 = next(iter(db_test))
    print(sample2[0].shape, sample2[1].shape, len(images3))

    # 加载DenseNet网络模型， 并去掉最后一层全连接层， 最后一个池化层设置为max pooling
    net = keras.applications.DenseNet121(weights='imagenet', include_top=False, pooling='max')
    # 设计为不参与优化，即MobileNet这部分参数固定不动
    net.trainable = True
    newnet = keras.Sequential([
        net,  # 去掉最后一层的DenseNet121
        layers.Dense(1024, activation='relu'),  # 追加全连接层
        layers.BatchNormalization(),  # 追加BN层
        layers.Dropout(rate=0.5),  # 追加Dropout层，防止过拟合
        layers.Dense(5)  # 根据宝可梦数据的任务，设置最后一层输出节点数为5
    ])

    newnet.build(input_shape=(4, 224, 224, 3))
    newnet.summary()

    # 创建Early Stopping类，连续3次不下降则终止
    early_stopping = EarlyStopping(
        monitor='val_accuracy',
        min_delta=0.001,
        patience=3
    )

    newnet.compile(optimizer=optimizers.Adam(lr=1e-3),
                   loss=losses.CategoricalCrossentropy(from_logits=True),
                   metrics=['accuracy'])
    history = newnet.fit(db_train, validation_data=db_val, validation_freq=1, epochs=100, callbacks=[early_stopping])
    history = history.history
    print(history.keys())
    print(history['val_accuracy'])
    print(history['accuracy'])
    test_acc = newnet.evaluate(db_test)

if __name__ == '__main__':
    main()