{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Lr9NyG4g-YlH"
      },
      "outputs": [],
      "source": [
        "import tensorflow as tf\n",
        "from tensorflow.keras.layers import Dense, Flatten, Reshape\n",
        "from tensorflow.keras.models import Sequential\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "from tensorflow.keras.layers import Dense, Flatten, Reshape, BatchNormalization, LeakyReLU, Conv2DTranspose, Conv2D\n",
        "\n",
        "# Load MNIST dataset\n",
        "(train_images, _), (_, _) = tf.keras.datasets.mnist.load_data()\n",
        "train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n",
        "train_images = (train_images - 127.5) / 127.5  # Normalize the images to [-1, 1]\n",
        "\n",
        "# Buffer size and batch size\n",
        "BUFFER_SIZE = 60000\n",
        "BATCH_SIZE = 256\n",
        "\n",
        "# Batch and shuffle the data\n",
        "train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n",
        "\n",
        "# Generator model\n",
        "def make_generator_model():\n",
        "    model = Sequential()\n",
        "    model.add(Dense(7*7*256, use_bias=False, input_shape=(100,)))\n",
        "    model.add(BatchNormalization())\n",
        "    model.add(LeakyReLU())\n",
        "    model.add(Reshape((7, 7, 256)))\n",
        "\n",
        "    model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n",
        "    model.add(BatchNormalization())\n",
        "    model.add(LeakyReLU())\n",
        "\n",
        "    model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n",
        "    model.add(BatchNormalization())\n",
        "    model.add(LeakyReLU())\n",
        "\n",
        "    # Output layer with tanh activation\n",
        "    model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n",
        "    return model\n",
        "\n",
        "# Discriminator model\n",
        "def make_discriminator_model():\n",
        "    model = Sequential()\n",
        "    model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]))\n",
        "    model.add(LeakyReLU())\n",
        "    model.add(Flatten())\n",
        "    model.add(Dense(1))\n",
        "    return model\n",
        "\n",
        "generator = make_generator_model()\n",
        "discriminator = make_discriminator_model()\n",
        "\n",
        "# Define the loss and optimizers\n",
        "cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n",
        "\n",
        "def generator_loss(fake_output):\n",
        "    return cross_entropy(tf.ones_like(fake_output), fake_output)\n",
        "\n",
        "def discriminator_loss(real_output, fake_output):\n",
        "    real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n",
        "    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n",
        "    total_loss = real_loss + fake_loss\n",
        "    return total_loss\n",
        "\n",
        "generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
        "discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
        "\n",
        "# Training loop\n",
        "EPOCHS = 1000\n",
        "noise_dim = 100\n",
        "num_examples_to_generate = 16\n",
        "\n",
        "# Seed for visualization\n",
        "seed = tf.random.normal([num_examples_to_generate, noise_dim])\n",
        "\n",
        "def train_step(images):\n",
        "    noise = tf.random.normal([BATCH_SIZE, noise_dim])\n",
        "\n",
        "    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n",
        "        generated_images = generator(noise, training=True)\n",
        "        real_output = discriminator(images, training=True)\n",
        "        fake_output = discriminator(generated_images, training=True)\n",
        "\n",
        "        gen_loss = generator_loss(fake_output)\n",
        "        disc_loss = discriminator_loss(real_output, fake_output)\n",
        "\n",
        "        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n",
        "        gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n",
        "\n",
        "        generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n",
        "        discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n",
        "\n",
        "    return gen_loss, disc_loss\n",
        "\n",
        "def train(dataset, epochs):\n",
        "    for epoch in range(epochs):\n",
        "        gen_loss_list = []\n",
        "        disc_loss_list = []\n",
        "\n",
        "        for image_batch in dataset:\n",
        "            t = train_step(image_batch)\n",
        "            gen_loss_list.append(t[0])\n",
        "            disc_loss_list.append(t[1])\n",
        "\n",
        "        # Produce images for the GIF as we go\n",
        "        if (epoch + 1) % 250 == 0:\n",
        "            generate_and_save_images(generator, epoch + 1, seed)\n",
        "\n",
        "        # Save the model every 50 epochs\n",
        "        if (epoch + 1) % 50 == 0:\n",
        "            checkpoint.save(file_prefix = checkpoint_prefix)\n",
        "\n",
        "    return gen_loss_list, disc_loss_list\n",
        "\n",
        "# Generate and save images\n",
        "def generate_and_save_images(model, epoch, test_input):\n",
        "    predictions = model(test_input, training=False)\n",
        "    fig = plt.figure(figsize=(4,4))\n",
        "\n",
        "    for i in range(predictions.shape[0]):\n",
        "        plt.subplot(4, 4, i+1)\n",
        "        plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n",
        "        plt.axis('off')\n",
        "\n",
        "    plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n",
        "    plt.show()\n",
        "\n",
        "gen_loss_list, disc_loss_list = train(train_dataset, EPOCHS)\n",
        "\n",
        "# Plotting the training losses\n",
        "plt.figure(figsize=(10,5))\n",
        "plt.title(\"Generator and Discriminator Loss During Training\")\n",
        "plt.plot(gen_loss_list,label=\"G\")\n",
        "plt.plot(disc_loss_list,label=\"D\")\n",
        "plt.xlabel(\"iterations\")\n",
        "plt.ylabel(\"Loss\")\n",
        "plt.legend()\n",
        "plt.show()\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "\n",
        "# Load the CranexD dataset\n",
        "dataset = tf.keras.datasets.cranexd\n",
        "(x_train, y_train), (x_test, y_test) = dataset.load_data()\n",
        "\n",
        "# Define the generator and discriminator networks\n",
        "generator = tf.keras.Sequential([\n",
        "  tf.keras.layers.Input(shape=(224, 224, 3)),\n",
        "  tf.keras.layers.Conv2D(64, (3, 3), padding=’same’),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.MaxPooling2D((2, 2)),\n",
        "  tf.keras.layers.Conv2D(128, (3, 3), padding=’same’),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.MaxPooling2D((2, 2)),\n",
        "  tf.keras.layers.Flatten(),\n",
        "  tf.keras.layers.Dense(1024),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.Dense(224 * 224 * 3),\n",
        "  tf.keras.layers.Reshape((224, 224, 3))\n",
        "])\n",
        "\n",
        "discriminator = tf.keras.Sequential([\n",
        "  tf.keras.layers.Input(shape=(224, 224, 3)),\n",
        "  tf.keras.layers.Conv2D(64, (3, 3), padding=’same’),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.MaxPooling2D((2, 2)),\n",
        "  tf.keras.layers.Conv2D(128, (3, 3), padding=’same’),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.MaxPooling2D((2, 2)),\n",
        "  tf.keras.layers.Flatten(),\n",
        "  tf.keras.layers.Dense(1024),\n",
        "  tf.keras.layers.LeakyReLU(),\n",
        "  tf.keras.layers.Dense(1, activation=’sigmoid’)\n",
        "])\n",
        "\n",
        "# Compile the generator and discriminator networks\n",
        "generator.compile(optimizer=’adam’, loss=’binary_crossentropy’, metrics=[‘accuracy’])\n",
        "discriminator.compile(optimizer=’adam’, loss=’binary_crossentropy’, metrics=[‘accuracy’])\n",
        "\n",
        "# Define the GAN loss\n",
        "gan_loss = tf.keras.losses.BinaryCrossentropy()\n",
        "\n",
        "# Define the GAN training steps\n",
        "@tf.function\n",
        "def train_step(x_batch):\n",
        "  # Generate fake images\n",
        "  fake_images = generator(x_batch)\n",
        "\n",
        "  # Train the discriminator\n",
        "  real_labels = tf.ones((x_batch.shape[0], 1))\n",
        "  fake_labels = tf.zeros((x_batch.shape[0], 1))\n",
        "  discriminator_loss = discriminator.train_on_batch([x_batch, fake_images], [real_labels, fake_labels])\n",
        "\n",
        "  # Train the generator\n",
        "  generator_loss = gan_loss(tf.ones((x_batch.shape[0], 1)), generator(x_batch))\n",
        "\n",
        "  return discriminator_loss, generator_loss\n",
        "\n",
        "# Train the GAN\n",
        "epochs = 10\n",
        "for epoch in range(epochs):\n",
        "  discriminator_loss, generator_loss = train_step(x_train)\n",
        "  print(f’Epoch {epoch + 1}/{epochs}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}’)\n",
        "\n",
        "# Generate fake images\n",
        "fake_images = generator.predict(x_test)\n",
        "\n",
        "# Save the generated images\n",
        "tf.keras.utils.save_img(fake_images, ‘generated_images.jpg’)\n"
      ],
      "metadata": {
        "id": "ZBpvqq-l-50g"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}