{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tqcpGC_OD3wo"
      },
      "outputs": [],
      "source": [
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "from tensorflow import keras\n",
        "from tensorflow.keras.datasets import mnist\n",
        "from tensorflow.keras.models import Model\n",
        "from tensorflow.keras.layers import Input, Dense, Lambda\n",
        "from tensorflow.keras import backend as K\n",
        "\n",
        "# Load MNIST dataset\n",
        "(x_train, _), (x_test, _) = mnist.load_data()\n",
        "\n",
        "# Normalize and flatten images\n",
        "x_train = x_train.astype('float32') / 255.\n",
        "x_test = x_test.astype('float32') / 255.\n",
        "x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\n",
        "x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n",
        "\n",
        "# Define VAE architecture\n",
        "input_dim = x_train.shape[1]\n",
        "latent_dim = 2  # 2-dimensional latent space\n",
        "\n",
        "# Encoder\n",
        "input_img = Input(shape=(input_dim,))\n",
        "encoded = Dense(256, activation='relu')(input_img)\n",
        "z_mean = Dense(latent_dim)(encoded)\n",
        "z_log_var = Dense(latent_dim)(encoded)\n",
        "\n",
        "# Reparameterization trick\n",
        "def sampling(args):\n",
        "    z_mean, z_log_var = args\n",
        "    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.0)\n",
        "    return z_mean + K.exp(0.5 * z_log_var) * epsilon\n",
        "\n",
        "z = Lambda(sampling)([z_mean, z_log_var])\n",
        "\n",
        "# Decoder\n",
        "decoder_input = Input(shape=(latent_dim,))\n",
        "decoded = Dense(256, activation='relu')(decoder_input)\n",
        "output_img = Dense(input_dim, activation='sigmoid')(decoded)\n",
        "\n",
        "# Define VAE model\n",
        "encoder = Model(input_img, z_mean)\n",
        "decoder = Model(decoder_input, output_img)\n",
        "\n",
        "# VAE model\n",
        "vae_output = decoder(z)\n",
        "vae = Model(input_img, vae_output)\n",
        "\n",
        "# Compute VAE loss\n",
        "reconstruction_loss = keras.losses.binary_crossentropy(input_img, vae_output)\n",
        "kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n",
        "vae_loss = K.mean(reconstruction_loss + kl_loss)\n",
        "\n",
        "vae.add_loss(vae_loss)\n",
        "vae.compile(optimizer='adam')\n",
        "vae.summary()\n",
        "\n",
        "# Train the VAE\n",
        "epochs = 100\n",
        "batch_size = 128\n",
        "history = vae.fit(x_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, None))\n",
        "\n",
        "# Generate latent space points using VAE\n",
        "latent_points_vae = encoder.predict(x_test)\n",
        "\n",
        "# Generate samples from Dirichlet distribution\n",
        "samples = np.random.dirichlet(np.ones(latent_dim), size=10)\n",
        "\n",
        "# Visualize the latent space after 100 epochs\n",
        "plt.figure(figsize=(6, 6))\n",
        "plt.subplot(1, 2, 1)\n",
        "plt.scatter(latent_points_vae[:, 0], latent_points_vae[:, 1], c='b', cmap='rainbow')\n",
        "plt.title('Latent Space Visualization (VAE)')\n",
        "plt.xlabel('Latent Dimension 1')\n",
        "plt.ylabel('Latent Dimension 2')\n",
        "\n",
        "# Generate and visualize samples from the VAE after 100 epochs\n",
        "decoded_samples = decoder.predict(samples)\n",
        "decoded_samples = decoded_samples.reshape(-1, 28, 28)\n",
        "\n",
        "plt.subplot(1, 2, 2)\n",
        "for i in range(10):\n",
        "    plt.imshow(decoded_samples[i], cmap='gray')\n",
        "    plt.xticks([])\n",
        "    plt.yticks([])\n",
        "    plt.title('Generated Samples (VAE)')\n",
        "    plt.tight_layout()\n",
        "plt.show()\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "from tensorflow import keras\n",
        "from tensorflow.keras.datasets import mnist\n",
        "from tensorflow.keras.models import Model\n",
        "from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, Lambda\n",
        "from tensorflow.keras import backend as K\n",
        "\n",
        "# Load MNIST dataset\n",
        "(x_train, _), (x_test, _) = mnist.load_data()\n",
        "\n",
        "# Normalize and reshape images\n",
        "x_train = x_train.astype('float32') / 255.\n",
        "x_test = x_test.astype('float32') / 255.\n",
        "x_train = np.expand_dims(x_train, axis=-1)\n",
        "x_test = np.expand_dims(x_test, axis=-1)\n",
        "\n",
        "# Define VAE architecture\n",
        "input_shape = x_train.shape[1:]\n",
        "latent_dim = 2  # 2-dimensional latent space\n",
        "\n",
        "# Encoder\n",
        "inputs = Input(shape=input_shape)\n",
        "x = Conv2D(32, 3, activation='relu', strides=2, padding='same')(inputs)\n",
        "x = Conv2D(64, 3, activation='relu', strides=2, padding='same')(x)\n",
        "x = Flatten()(x)\n",
        "z_mean = Dense(latent_dim)(x)\n",
        "z_log_var = Dense(latent_dim)(x)\n",
        "\n",
        "# Reparameterization trick\n",
        "def sampling(args):\n",
        "    z_mean, z_log_var = args\n",
        "    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.0)\n",
        "    return z_mean + K.exp(0.5 * z_log_var) * epsilon\n",
        "\n",
        "z = Lambda(sampling)([z_mean, z_log_var])\n",
        "\n",
        "# Decoder\n",
        "decoder_inputs = Input(shape=(latent_dim,))\n",
        "x = Dense(7 * 7 * 64, activation='relu')(decoder_inputs)\n",
        "x = Reshape((7, 7, 64))(x)\n",
        "x = Conv2DTranspose(64, 3, activation='relu', strides=2, padding='same')(x)\n",
        "x = Conv2DTranspose(32, 3, activation='relu', strides=2, padding='same')(x)\n",
        "outputs = Conv2DTranspose(1, 3, activation='sigmoid', padding='same')(x)\n",
        "\n",
        "# Define VAE model\n",
        "encoder = Model(inputs, z_mean)\n",
        "decoder = Model(decoder_inputs, outputs)\n",
        "vae_inputs = inputs\n",
        "vae_outputs = decoder(encoder(vae_inputs))\n",
        "vae = Model(vae_inputs, vae_outputs)\n",
        "\n",
        "# Compute VAE loss\n",
        "reconstruction_loss = keras.losses.binary_crossentropy(K.flatten(vae_inputs), K.flatten(vae_outputs))\n",
        "reconstruction_loss *= input_shape[0] * input_shape[1]\n",
        "kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n",
        "vae_loss = K.mean(reconstruction_loss + kl_loss)\n",
        "\n",
        "vae.add_loss(vae_loss)\n",
        "vae.compile(optimizer='adam')\n",
        "vae.summary()\n",
        "\n",
        "# Train the VAE\n",
        "epochs = 100\n",
        "batch_size = 128\n",
        "history = vae.fit(x_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, None))\n",
        "\n",
        "# Generate latent space points using VAE\n",
        "latent_points_vae = encoder.predict(x_test)\n",
        "\n",
        "# Generate samples from Dirichlet distribution\n",
        "samples = np.random.dirichlet(np.ones(latent_dim), size=10)\n",
        "\n",
        "# Visualize the latent space after 100 epochs\n",
        "plt.figure(figsize=(6, 6))\n",
        "plt.subplot(1, 2, 1)\n",
        "plt.scatter(latent_points_vae[:, 0], latent_points_vae[:, 1], c='b', cmap='rainbow')\n",
        "plt.title('Latent Space Visualization (VAE)')\n",
        "plt.xlabel('Latent Dimension 1')\n",
        "plt.ylabel('Latent Dimension 2')\n",
        "\n",
        "# Generate and visualize samples from the VAE after 100 epochs\n",
        "decoded_samples = decoder.predict(samples)\n",
        "decoded_samples = decoded_samples.reshape(-1, 28, 28)\n",
        "\n",
        "plt.subplot(1, 2, 2)\n",
        "for i in range(10):\n",
        "    plt.imshow(decoded_samples[i], cmap='gray')\n",
        "    plt.xticks([])\n",
        "    plt.yticks([])\n",
        "    plt.title('Generated Samples (VAE)')\n",
        "plt.tight_layout()\n",
        "plt.show()\n"
      ],
      "metadata": {
        "id": "9pdMwLaJENHl"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Define VAE architecture\n",
        "input_dim = x_train.shape[1]\n",
        "latent_dim = 2  # 2-dimensional latent space\n",
        "\n",
        "# Encoder\n",
        "input_img = Input(shape=(input_dim,))\n",
        "encoded = Dense(256, activation='relu')(input_img)\n",
        "z_mean = Dense(latent_dim)(encoded)\n",
        "z_log_var = Dense(latent_dim)(encoded)\n",
        "\n",
        "# Reparameterization trick\n",
        "defsampling(args):\n",
        "    z_mean,z_log_var = args\n",
        "    epsilon = K.random_normal(shape=(K.shape(z_mean)[0],latent_dim), mean=0.,stddev=1.0)\n",
        "    returnz_mean + K.exp(0.5 * z_log_var) * epsilon\n",
        "\n",
        "z = Lambda(sampling)([z_mean,z_log_var])\n",
        "\n",
        "# Decoder\n",
        "decoder_input = Input(shape=(latent_dim,))\n",
        "decoded = Dense(256, activation='relu')(decoder_input)\n",
        "output_img = Dense(input_dim, activation='sigmoid')(decoded)\n"
      ],
      "metadata": {
        "id": "MZ-AQaDCEUaD"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Define VAE architecture\n",
        "input_shape = x_train.shape[1:]\n",
        "latent_dim = 2  # 2-dimensional latent space\n",
        "\n",
        "# Encoder\n",
        "inputs = Input(shape=input_shape)\n",
        "x = Conv2D(32,3, activation='relu', strides=2, padding='same')(inputs)\n",
        "x = Conv2D(64,3, activation='relu', strides=2, padding='same')(x)\n",
        "x = Flatten()(x)\n",
        "z_mean = Dense(latent_dim)(x)\n",
        "z_log_var = Dense(latent_dim)(x)\n",
        "\n",
        "# Reparameterization trick\n",
        "defsampling(args):\n",
        "    z_mean,z_log_var = args\n",
        "    epsilon = K.random_normal(shape=(K.shape(z_mean)[0],latent_dim), mean=0.,stddev=1.0)\n",
        "    returnz_mean + K.exp(0.5 * z_log_var) * epsilon\n",
        "\n",
        "z = Lambda(sampling)([z_mean,z_log_var])\n",
        "\n",
        "# Decoder\n",
        "decoder_inputs = Input(shape=(latent_dim,))\n",
        "x = Dense(7 * 7 * 64, activation='relu')(decoder_inputs)\n",
        "x = Reshape((7,7,64))(x)\n",
        "x = Conv2DTranspose(64,3, activation='relu', strides=2, padding='same')(x)\n",
        "x = Conv2DTranspose(32,3, activation='relu', strides=2, padding='same')(x)\n",
        "outputs = Conv2DTranspose(1,3, activation='sigmoid', padding='same')(x)\n"
      ],
      "metadata": {
        "id": "ffY5V0INFTCh"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "\n",
        "# Load Fashion MNIST dataset\n",
        "(train_images, _), (test_images, _) = tf.keras.datasets.fashion_mnist.load_data()\n",
        "\n",
        "# Normalize and reshape data\n",
        "train_images = train_images.astype('float32') / 255.0\n",
        "train_images = np.reshape(train_images, (-1, 28, 28, 1))\n",
        "\n",
        "# Define Conv2D VAE architecture\n",
        "latent_dim = 2\n",
        "\n",
        "class Sampling(tf.keras.layers.Layer):\n",
        "    def call(self, inputs):\n",
        "        z_mean, z_log_var = inputs\n",
        "        batch = tf.shape(z_mean)[0]\n",
        "        dim = tf.shape(z_mean)[1]\n",
        "        epsilon = tf.random.normal(shape=(batch, dim), mean=0.0, stddev=1.0)\n",
        "        return z_mean + tf.exp(0.5 * z_log_var) * epsilon\n",
        "\n",
        "encoder_inputs = tf.keras.Input(shape=(28, 28, 1))\n",
        "x = tf.keras.layers.Conv2D(32, 3, activation='relu', strides=2, padding='same')(encoder_inputs)\n",
        "x = tf.keras.layers.Conv2D(64, 3, activation='relu', strides=2, padding='same')(x)\n",
        "x = tf.keras.layers.Flatten()(x)\n",
        "z_mean = tf.keras.layers.Dense(latent_dim)(x)\n",
        "z_log_var = tf.keras.layers.Dense(latent_dim)(x)\n",
        "z = Sampling()([z_mean, z_log_var])\n",
        "encoder = tf.keras.Model(encoder_inputs, [z_mean, z_log_var, z], name='encoder')\n",
        "\n",
        "decoder_inputs = tf.keras.Input(shape=(latent_dim,))\n",
        "x = tf.keras.layers.Dense(7 * 7 * 64, activation='relu')(decoder_inputs)\n",
        "x = tf.keras.layers.Reshape((7, 7, 64))(x)\n",
        "x = tf.keras.layers.Conv2DTranspose(64, 3, activation='relu', strides=2, padding='same')(x)\n",
        "x = tf.keras.layers.Conv2DTranspose(32, 3, activation='relu', strides=2, padding='same')(x)\n",
        "decoder_outputs = tf.keras.layers.Conv2DTranspose(1, 3, activation='sigmoid', padding='same')(x)\n",
        "decoder = tf.keras.Model(decoder_inputs, decoder_outputs, name='decoder')\n",
        "\n",
        "# Create VAE model\n",
        "vae_inputs = encoder_inputs\n",
        "vae_outputs = decoder(encoder(encoder_inputs)[2])\n",
        "vae_model = tf.keras.Model(vae_inputs, vae_outputs, name='vae')\n",
        "\n",
        "\n",
        "def loss_func(encoder_mu, encoder_log_variance):\n",
        "    def vae_reconstruction_loss(y_true, y_predict):\n",
        "        reconstruction_loss_factor = 1000\n",
        "        reconstruction_loss = tf.keras.backend.mean(tf.keras.backend.square(y_true-y_predict), axis=[1, 2, 3])\n",
        "        return reconstruction_loss_factor * reconstruction_loss\n",
        "\n",
        "    def vae_kl_loss(encoder_mu, encoder_log_variance):\n",
        "        kl_loss = -0.5 * tf.keras.backend.sum(1.0 + encoder_log_variance - tf.keras.backend.square(encoder_mu) - tf.keras.backend.exp(encoder_log_variance), axis=1)\n",
        "        return kl_loss\n",
        "\n",
        "    def vae_kl_loss_metric(y_true, y_predict):\n",
        "        kl_loss = -0.5 * tf.keras.backend.sum(1.0 + encoder_log_variance - tf.keras.backend.square(encoder_mu) - tf.keras.backend.exp(encoder_log_variance), axis=1)\n",
        "        return kl_loss\n",
        "\n",
        "    def vae_loss(y_true, y_predict):\n",
        "        reconstruction_loss = vae_reconstruction_loss(y_true, y_predict)\n",
        "        kl_loss = vae_kl_loss(y_true, y_predict)\n",
        "\n",
        "        loss = reconstruction_loss + kl_loss\n",
        "        return loss\n",
        "\n",
        "    return vae_loss\n",
        "\n",
        "# Compile the model\n",
        "vae_model.compile(optimizer='adam', loss=loss_func(z_mean, z_log_var))\n",
        "#vae.compile(optimizer=tensorflow.keras.optimizers.Adam(lr=0.0005), loss=loss_func(encoder_mu, encoder_log_variance))\n",
        "# Training the model\n",
        " #vae_model.fit(train_images, train_images, epochs=30, batch_size=128)\n",
        "history = vae_model.fit(train_images, train_images, epochs=20, batch_size=32, shuffle=True)\n",
        "# Plot the loss curve\n",
        "plt.plot(history.history['loss'])\n",
        "plt.title('VAE Loss')\n",
        "plt.xlabel('Epoch')\n",
        "plt.ylabel('Loss')\n",
        "plt.show()\n",
        "\n",
        "# Generate samples of \"Trouser\" and \"Shirt\" classes\n",
        "num_samples = 10\n",
        "\n",
        "# Generate \"Trouser\" samples\n",
        "z_samples_trouser = np.random.normal(size=(num_samples, latent_dim))\n",
        "generated_images_trouser = decoder.predict(z_samples_trouser)\n",
        "\n",
        "# Generate \"Shirt\" samples\n",
        "z_samples_shirt = np.random.normal(size=(num_samples, latent_dim))\n",
        "generated_images_shirt = decoder.predict(z_samples_shirt)\n",
        "\n",
        "# Plot generated samples\n",
        "plt.figure(figsize=(15, 4))\n",
        "for i in range(num_samples):\n",
        "    plt.subplot(2, num_samples, i + 1)\n",
        "    plt.imshow(generated_images_trouser[i].reshape(28, 28), cmap='gray')\n",
        "    plt.title('Trouser')\n",
        "    plt.axis('off')\n",
        "\n",
        "    plt.subplot(2, num_samples, num_samples + i + 1)\n",
        "    plt.imshow(generated_images_shirt[i].reshape(28, 28), cmap='gray')\n",
        "    plt.title('Shirt')\n",
        "    plt.axis('off')\n",
        "\n",
        "plt.show()\n"
      ],
      "metadata": {
        "id": "JRajulXyFYnM"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}