import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential


def preprocess(x, y):
    # x ==> [0, 1]
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x, y


convolution_layers = [

    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

]

convolution_net = Sequential(convolution_layers)
full_connection_net = Sequential(
    [
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None)
    ]
)

convolution_net.build(input_shape=[None, 32, 32, 3])
full_connection_net.build(input_shape=[None, 512])
vgg_trainable_variables = convolution_net.trainable_variables + full_connection_net.trainable_variables

optimizer = optimizers.Adam(lr=1e-4)

print(convolution_net.summary())
print(full_connection_net.summary())

# x = tf.random.normal([4, 32, 32, 3])
# convolution_net.build(input_shape=[None, 32, 32, 3])
# out = convolution_net(x)
# print(out.shape)

batch_size = 64

(x, y), (x_test, y_test) = datasets.cifar100.load_data()
print('cifar 100: ', x.shape, y.shape, x_test.shape, y_test.shape)

y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print('cifar 100(after squeeze): ', x.shape, y.shape, x_test.shape, y_test.shape)

train_ds = tf.data.Dataset.from_tensor_slices((x, y))
train_ds = train_ds.shuffle(1000).map(preprocess).batch(batch_size=batch_size)

test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.map(preprocess).batch(batch_size=64)

for epoch in range(50):
    for step, (x, y) in enumerate(train_ds):
        with tf.GradientTape() as tape:
            # [b, 32, 32, 3] ==> [b, 1, 1, 512]
            out = convolution_net(x)

            # [b, 1, 1, 512] ==> [b, 512]
            out = tf.reshape(out, [-1, 512])

            # [b, 512] ==> [b, 100]
            logits_out = full_connection_net(out)

            # [b, 1] ==> [b, 100]
            y_onehot = tf.one_hot(y, depth=100)

            loss = tf.losses.categorical_crossentropy(y_onehot, logits_out, from_logits=True)
            loss = tf.reduce_mean(loss)

        gradients = tape.gradient(loss, vgg_trainable_variables)
        optimizer.apply_gradients(zip(gradients, vgg_trainable_variables))

        if step % 10 == 0:
            print(epoch, step, 'loss: ', float(loss))

    total_num = 0
    total_correct = 0

    for x, y in test_ds:
        out = convolution_net(x)
        out = tf.reshape(out, [-1, 512])
        logits = full_connection_net(out)
        probability = tf.nn.softmax(logits, axis=1)
        prediction = tf.argmax(probability, axis=1)
        prediction = tf.cast(prediction, dtype=tf.int32)

        correct = tf.cast(tf.equal(prediction, y), dtype=tf.int32)
        correct = tf.reduce_sum(correct)

        total_num = total_num + x.shape[0]
        total_correct += int(correct)

    accuracy = total_correct / total_num
    print(epoch, 'accuracy: ', accuracy)
