# encoding=utf-8

import os
import numpy as np
import cv2 as cv
import glob
from tqdm import tqdm
import tensorflow as tf

AUTOTUNE = tf.data.experimental.AUTOTUNE


def read_data():
    train_data_list = glob.glob(os.path.join('G:\\spx\\images_background', '*'))
    label_list = []
    for name in train_data_list:
        label_list = label_list + glob.glob(os.path.join(name, '*'))
    train_data_list2 = glob.glob(os.path.join('G:\\spx\\images_evaluation', '*'))
    for name in train_data_list2:
        label_list = label_list + glob.glob(os.path.join(name, '*'))
    print("omniglot数据集类别数量为", len(label_list))

    all_image_paths = []
    all_image_labels = []
    i = 0
    for label_name in label_list:
        allImagePath = glob.glob(os.path.join(label_name, '*.png'))
        for j in range(len(allImagePath)):
            all_image_labels.append(i)
        all_image_paths = all_image_paths + allImagePath
        i += 1
    return all_image_paths, all_image_labels


def preprocess_image(image):
    image = tf.image.decode_jpeg(image, channels = 3)
    image = tf.image.resize(image, [32, 32])
    image /= 255.0  # normalize to [0,1] range
    return image


def load_and_preprocess_image(path):
    image = tf.io.read_file(path)
    return preprocess_image(image)


# 元组被解压缩到映射函数的位置参数中
def load_and_preprocess_from_path_label(path, label):
    return load_and_preprocess_image(path), label


def change_range(image, label):
    return 2 * image - 1, label


if __name__ == '__main__':
    all_image_paths, all_image_labels = read_data()
    ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))

    image_label_ds = ds.map(load_and_preprocess_from_path_label)
    BATCH_SIZE = 32

    # 设置一个和数据集大小一致的 shuffle buffer size（随机缓冲区大小）以保证数据
    # 被充分打乱。
    image_count = len(all_image_paths)
    ds = image_label_ds.shuffle(buffer_size = image_count)
    ds = ds.repeat()
    ds = ds.batch(BATCH_SIZE)
    # 当模型在训练的时候，`prefetch` 使数据集在后台取得 batch。
    ds = ds.prefetch(buffer_size = AUTOTUNE)
    # 初始化DenseNet169网络(卷积神经网络的一种)
    mobile_net = tf.keras.applications.DenseNet169(input_shape = (32, 32, 3), include_top = False)
    #固定参数
    mobile_net.trainable = False

    keras_ds = ds.map(change_range)
    # 数据集可能需要几秒来启动，因为要填满其随机缓冲区。
    image_batch, label_batch = next(iter(keras_ds))
    feature_map_batch = mobile_net(image_batch)
    print(feature_map_batch.shape)
    model = tf.keras.Sequential([
        mobile_net,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(1000, activation = 'relu'),
        tf.keras.layers.Dense(1623, activation = 'softmax')])
    logit_batch = model(image_batch).numpy()

    print("Shape:", logit_batch.shape)
    model.compile(optimizer = tf.keras.optimizers.Adam(),
                  loss = 'sparse_categorical_crossentropy',
                  metrics = ["accuracy"])
    print(model.summary())
    steps_per_epoch = tf.math.ceil(len(all_image_paths) / BATCH_SIZE).numpy()
    # 迭代次数2000，准确率还可以，耐心等待
    model.fit(ds, epochs = 2000, steps_per_epoch = 16)
    model.save('DenseNet169.h5')