import tensorflow as tf
import GoogLeNet.InceptionV3.config as config
from GoogLeNet.InceptionV3.config import BATCH_SIZE


def load_and_preprocess_image(img_path):
    # read pictures
    img_raw = tf.io.read_file(img_path)
    # decode pictures
    img_tensor = tf.image.decode_jpeg(img_raw, channels=channels)
    # resize
    img_tensor = tf.image.resize(img_tensor, [image_height, image_width])
    img_tensor = tf.cast(img_tensor, tf.float32)
    # normalization
    img = img_tensor / 255.0
    return img

def get_images_and_labels(data_root_dir):
    # get all images' paths (format: string)
    data_root = pathlib.Path(data_root_dir)
    all_image_path = [str(path) for path in list(data_root.glob('*/*'))]
    # get labels' names
    label_names = sorted(item.name for item in data_root.glob('*/'))
    # dict: {label : index}
    label_to_index = dict((index, label) for label, index in enumerate(label_names))
    # get all images' labels
    all_image_label = [label_to_index[pathlib.Path(single_image_path).parent.name] for single_image_path in all_image_path]

    return all_image_path, all_image_label


def get_dataset(dataset_root_dir):
    all_image_path, all_image_label = get_images_and_labels(data_root_dir=dataset_root_dir)
    # print("image_path: {}".format(all_image_path[:]))
    # print("image_label: {}".format(all_image_label[:]))
    # load the dataset and preprocess images
    image_dataset = tf.data.Dataset.from_tensor_slices(all_image_path).map(load_and_preprocess_image)
    label_dataset = tf.data.Dataset.from_tensor_slices(all_image_label)
    dataset = tf.data.Dataset.zip((image_dataset, label_dataset))
    image_count = len(all_image_path)

    return dataset, image_count


def generate_datasets():
    train_dataset, train_count = get_dataset(dataset_root_dir=config.train_dir)
    valid_dataset, valid_count = get_dataset(dataset_root_dir=config.valid_dir)
    test_dataset, test_count = get_dataset(dataset_root_dir=config.test_dir)


    # read the original_dataset in the form of batch
    train_dataset = train_dataset.shuffle(buffer_size=train_count).batch(batch_size=config.BATCH_SIZE)
    valid_dataset = valid_dataset.batch(batch_size=config.BATCH_SIZE)
    test_dataset = test_dataset.batch(batch_size=config.BATCH_SIZE)

    return train_dataset, valid_dataset, test_dataset, train_count, valid_count, test_count


def preprocess(x, y):  # 数据预处理
    x = tf.cast(x, dtype=tf.float32)/ 255. - 0.5
    y = tf.cast(y, dtype=tf.int32)
    return x,y


# (x_train, y_train), (x_test, y_test) = datasets.fashion_mnist.load_data()
# print(x_train.shape, y_train.shape)
#mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()


# [b, 28, 28] => [b, 28, 28, 1]
x_train, x_test = np.expand_dims(x_train, axis=3), np.expand_dims(x_test, axis=3)

# 训练集预处理
db_train = tf.data.Dataset.from_tensor_slices((x_train,y_train))   # 构造数据集,这里可以自动的转换为tensor类型了
db_train = db_train.map(preprocess).shuffle(10*BATCH_SIZE).batch(BATCH_SIZE)

# 测试集预处理
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))  # 构造数据集
db_test = db_test.map(preprocess).shuffle(10*BATCH_SIZE).batch(BATCH_SIZE)