import random
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow.keras import layers
import random

im_x = random.randint(0, 5)

# 读取数据
(train_ds, val_ds, test_ds), metadata = tfds.load(
    'tf_flowers',
    split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
    with_info=True,
    as_supervised=True,
)
print(train_ds)

num_classes = metadata.features['label'].num_classes
print(num_classes)


def visualize(orig_images, change_image):
    plt.subplot(1, 2, 1)
    plt.title('original image')
    plt.imshow(orig_images)
    plt.subplot(1, 2, 2)
    plt.title('Transform the graph')
    plt.imshow(change_image)
    plt.show()


image, label = next(iter(train_ds))

flipped = tf.image.flip_left_right(image)
grayscaled = tf.image.rgb_to_grayscale(image)
data_1 = tf.squeeze(grayscaled)
data_2 = tf.image.adjust_saturation(image, 3)
data_3 = tf.image.adjust_brightness(image, 0.4)
data_4 = tf.image.central_crop(image, central_fraction=0.5)
data_5 = tf.image.rot90(image)
image_data = [flipped, data_1, data_2, data_3, data_4, data_5]
#
# for i in image_data:
#     visualize(image, i)
# for i in range(6):
#     plt.subplot(3, 2, i + 1)
#     plt.imshow(image_data[i])
# plt.show()

IMG_SIZE = 180
batch_size = 64
AUTOTUNE = tf.data.AUTOTUNE


def resize_and_rescale(image, label):
    image = tf.cast(image, tf.float32)
    image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
    image = (image / 255.0)
    return image, label


def augment(image_label, seed):
    image, label = image_label
    image, label = resize_and_rescale(image, label)
    image = tf.image.resize_with_crop_or_pad(image, IMG_SIZE + 6, IMG_SIZE + 6)
    # Make a new seed
    new_seed = tf.random.experimental.stateless_split(seed, num=1)[0, :]
    # shape = (num, 2)
    # Random crop back to the original size
    image = tf.image.stateless_random_crop(
        image, size=[IMG_SIZE, IMG_SIZE, 3], seed=seed)
    # Random brightness
    image = tf.image.stateless_random_brightness(
        image, max_delta=0.5, seed=new_seed)
    image = tf.clip_by_value(image, 0, 1)

    flipped = tf.image.flip_left_right(image)
    grayscaled = tf.image.rgb_to_grayscale(image)
    data_1 = tf.squeeze(grayscaled)
    data_2 = tf.image.adjust_saturation(image, 3)
    data_3 = tf.image.adjust_brightness(image, 0.4)
    data_4 = tf.image.central_crop(image, central_fraction=0.5)
    data_5 = tf.image.rot90(image)

    image_data = [flipped, data_1, data_2, data_3, data_4, data_5]
    image = image_data[im_x]

    return image, label


counter = tf.data.experimental.Counter()
train_ds = tf.data.Dataset.zip((train_ds, (counter, counter)))

train_ds = (
    train_ds
        .shuffle(1000)
        .map(augment, num_parallel_calls=AUTOTUNE)
        .batch(batch_size)
        .prefetch(AUTOTUNE)
)
print(train_ds)
#
# val_ds = (
#     val_ds
#         .map(resize_and_rescale, num_parallel_calls=AUTOTUNE)
#         .batch(batch_size)
#         .prefetch(AUTOTUNE)
# )
#
# test_ds = (
#     test_ds
#         .map(resize_and_rescale, num_parallel_calls=AUTOTUNE)
#         .batch(batch_size)
#         .prefetch(AUTOTUNE)
# )
#
#
# model1 = tf.keras.Sequential([
#     layers.Conv2D(32, 3, padding='same', activation='relu'),
#     layers.MaxPooling2D(),
#     layers.Conv2D(64, 3, padding='same', activation='relu'),
#     layers.MaxPooling2D(),
#     layers.Conv2D(128, 3, padding='same', activation='relu'),
#     layers.MaxPooling2D(),
#     layers.Conv2D(64, 3, padding='same', activation='relu'),
#     layers.MaxPooling2D(),
#     layers.Conv2D(32, 3, padding='same', activation='relu'),
#     layers.MaxPooling2D(),
#     layers.Flatten(),
#     layers.Dropout(0.4),
#     layers.Dense(128, activation='relu'),
#     layers.Dense(64, activation='relu'),
#     layers.Dense(32, activation='relu'),
#     layers.Dense(num_classes)
# ])
#
# model1.compile(optimizer='adam',
#               loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
#               metrics=['accuracy'])
#
# epochs = 5
# history = model1.fit(
#     train_ds,
#     validation_data=val_ds,
#     epochs=epochs
# )
# model1.summary()
#
# loss, acc = model1.evaluate(test_ds)
# print("Accuracy:", acc)
#
# acc1 = history.history['accuracy']
# val_acc = history.history['val_accuracy']
#
# loss1 = history.history['loss']
# val_loss = history.history['val_loss']
#
# plt.figure(figsize=(8, 8))
# plt.subplot(2, 1, 1)
# plt.plot(acc1, label='Training Accuracy')
# plt.plot(val_acc, label='Validation Accuracy')
# plt.legend(loc='lower right')
# plt.ylabel('Accuracy')
# plt.ylim([min(plt.ylim()), 1])
# plt.title('Training and Validation Accuracy')
#
# plt.subplot(2, 1, 2)
# plt.plot(loss1, label='Training Loss')
# plt.plot(val_loss, label='Validation Loss')
# plt.legend(loc='upper right')
# plt.ylabel('Cross Entropy')
# plt.ylim([0, 1.0])
# plt.title('Training and Validation Loss')
# plt.xlabel('epoch')
# plt.show()
