{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "G_mUFqHSMRKD"
      },
      "outputs": [],
      "source": [
        "import matplotlib.pyplot as plt\n",
        "import torchvision\n",
        "from torchvision.transforms import ToTensor\n",
        "\n",
        "DATA_DIRECTORY = './data'  # Specify the directory where CIFAR-10 is stored\n",
        "\n",
        "# Load CIFAR-10 dataset\n",
        "cifar10_dataset = torchvision.datasets.CIFAR10(DATA_DIRECTORY, train=True, download=True)\n",
        "\n",
        "# Create a dictionary to store the metadata for each CIFAR-10 class\n",
        "class_metadata = {\n",
        "    0: 'airplane',\n",
        "    1: 'automobile',\n",
        "    2: 'bird',\n",
        "    3: 'cat',\n",
        "    4: 'deer',\n",
        "    5: 'dog',\n",
        "    6: 'frog',\n",
        "    7: 'horse',\n",
        "    8: 'ship',\n",
        "    9: 'truck'\n",
        "}\n",
        "\n",
        "def visualize_cifar10_images(dataset, start_index, num_images):\n",
        "    fig, axes = plt.subplots(1, num_images, figsize=(12, 3))\n",
        "\n",
        "    for i in range(num_images):\n",
        "        index = start_index + i\n",
        "        image, _ = dataset[index]\n",
        "        class_label = class_metadata[dataset.targets[index]]\n",
        "\n",
        "        tensor_image = ToTensor()(image)  # Convert PIL image to Torch Tensor\n",
        "\n",
        "        axes[i].imshow(tensor_image.permute(1, 2, 0))  # Transpose tensor dimensions for visualization\n",
        "        axes[i].set_title(f'ID: {index}, Class: {class_label}')\n",
        "        axes[i].axis('off')\n",
        "\n",
        "    plt.tight_layout()\n",
        "    plt.show()\n",
        "\n",
        "# Usage example:\n",
        "start_index = 0  # Start index of the images to visualize\n",
        "num_images = 5  # Number of images to display\n",
        "\n",
        "visualize_cifar10_images(cifar10_dataset, start_index, num_images)\n",
        "\n",
        "torch.cuda.is_available()\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "import requests\n",
        "\n",
        "import tqdm\n",
        "import httpimport\n",
        "import pinecone\n",
        "import numpy as np\n",
        "from PIL import Image\n",
        "\n",
        "import torch\n",
        "\n",
        "DATA_DIRECTORY = 'tmp'\n",
        "INDEX_NAME = 'image-search'\n",
        "INDEX_DIMENSION = 1000\n",
        "BATCH_SIZE=200\n",
        "datasets = {\n",
        "    'CIFAR10': torchvision.datasets.CIFAR10(DATA_DIRECTORY, transform=h.preprocess, download=True),\n",
        "    'CIFAR100': torchvision.datasets.CIFAR100(DATA_DIRECTORY, transform=h.preprocess, download=True)\n",
        "}\n",
        "\n",
        "combined_dataset = torch.utils.data.ConcatDataset(list(datasets.values()))\n",
        "# Calculate the dimensionality of the combined dataset\n",
        "sample = combined_dataset[0][0]\n",
        "dimension = sample.numel()\n",
        "\n",
        "print(f\"Combined dataset dimension: {dimension}\")\n",
        "h.show_random_images_from_full_dataset(datasets['CIFAR100'])\n"
      ],
      "metadata": {
        "id": "HJgz3OuBN3HY"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "model = torchvision.models.squeezenet1_1(pretrained=True).eval()\n",
        "# authenticate with Pinecone API, keys and environment available at your project at https://app.pinecone.io\n",
        "pinecone.init(h.pinecone_api_key, environment=’Your environment')\n",
        "# if the index does not already exist, we create it\n",
        "# if INDEX_NAME not in pinecone.list_indexes():\n",
        "#     pinecone.create_index(name=INDEX_NAME, dimension=INDEX_DIMENSION)\n",
        "# # instantiate connection to your Pinecone index\n",
        "index = pinecone.Index('index')\n",
        "\n",
        "def get_vector_ids(batch_number, batch_size, prefix):\n",
        "    \"\"\"Return vector ids.\"\"\"\n",
        "    start_index = batch_number * batch_size\n",
        "    end_index = start_index + batch_size\n",
        "    ids = np.arange(start_index, end_index)\n",
        "    # create id based on prefix\n",
        "    # eg. if id == 5, prefix == 'CIFAR10', then create 'CIFAR10.5' as vector id.\n",
        "    ids_with_prefix = map(lambda x: f'{prefix}.{str(x)}', ids)\n",
        "    return ids_with_prefix\n",
        "\n",
        "def get_vector_metadata(label_indices, class_list):\n",
        "    \"\"\"Return list of {'label': <class name>}.\"\"\"\n",
        "    get_class_name = lambda index: {'label': class_list[index]}\n",
        "    return map(get_class_name, label_indices)\n",
        "\n",
        "def get_vectors_from_batch(preprocessed_data, label_indices, batch_number, dataset):\n",
        "    \"\"\"Return list of tuples like (vector_id, vector_values, vector_metadata).\"\"\"\n",
        "    num_records = len(preprocessed_data)\n",
        "    prefix = dataset.__class__.__name__\n",
        "    with torch.no_grad():\n",
        "        # generate image embeddings with PyTorch model\n",
        "        vector_values = model(preprocessed_data).tolist()\n",
        "    # return respective IDs/metadata for each image embedding\n",
        "    vector_metadata = get_vector_metadata(label_indices, dataset.classes)\n",
        "    vector_ids = get_vector_ids(batch_number, num_records, prefix)\n",
        "    return list(zip(vector_ids, vector_values, vector_metadata))\n",
        "\n",
        "dataset = datasets['CIFAR100']\n",
        "list_of_preprocessed_tensors, label_indices = list(zip(*[dataset[i] for i in range(BATCH_SIZE)]))\n",
        "preprocessed_data = torch.stack(list_of_preprocessed_tensors)\n",
        "vectors = get_vectors_from_batch(preprocessed_data, label_indices, 0, dataset)\n",
        "id_, embedding, metadata = vectors[123]\n",
        "print(id_, embedding[:3], metadata, sep=', ')\n",
        "\n",
        "def upsert_image_embeddings(dataset, pinecone_index, batch_size=BATCH_SIZE, num_rows=None):\n",
        "    \"\"\"Iterate through dataset, generate embeddings and upsert in batches to Pinecone index.\n",
        "\n",
        "    Args:\n",
        "     - dataset: a PyTorch Dataset\n",
        "     - pinecone_index: your Pinecone index\n",
        "     - batch_size: batch size\n",
        "     - num_rows: Number of initial rows to use of dataset, use all rows if None.\n",
        "    \"\"\"\n",
        "    if num_rows>len(dataset):\n",
        "        raise ValueError(f'`num_rows` should not exceed length of dataset: {len(dataset)}')\n",
        "    if num_rows:\n",
        "        sampler = range(num_rows)\n",
        "    else:\n",
        "        sampler = None\n",
        "    dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler)\n",
        "    tqdm_kwargs = h.get_tqdm_kwargs(dataloader)\n",
        "    for batch_number, (data, label_indices) in tqdm.notebook.tqdm(enumerate(dataloader), tqdm_kwargs):\n",
        "        vectors = get_vectors_from_batch(\n",
        "            data,\n",
        "            label_indices,\n",
        "            batch_number,\n",
        "            dataloader.dataset)\n",
        "        pinecone_index.upsert(vectors)\n",
        "\n",
        "for dataset in datasets.values():\n",
        "    upsert_image_embeddings(dataset, index, num_rows=50_000)\n",
        "url = 'https://cdn.britannica.com/40/109040-050-62EEDEA6/Male-white-tailed-deer.jpg'\n",
        "r = requests.get(url, stream=True)\n",
        "query_image = Image.open(r.raw)\n",
        "h.printmd(\"#### A sample image\")\n",
        "query_image.resize((125,125))\n"
      ],
      "metadata": {
        "id": "4e2q52nXN5-0"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "query_embedding = model(h.preprocess(query_image).unsqueeze(0)).tolist()\n",
        "response = index.query(query_embedding, top_k=4, include_metadata=True)\n",
        "#h.printmd(f\"#### A sample response from Pinecone \\n ==============\\n \\n\")\n",
        "h.printmd(f\"```python\\n{response}\\n```\")\n",
        "\n",
        "response = response\n",
        "\n",
        "def visualize_images_with_ids(response, dataset):\n",
        "    fig, axes = plt.subplots(1, len(response['matches']), figsize=(12, 3))\n",
        "\n",
        "    for i, match in enumerate(response['matches']):\n",
        "        image_id = int(match['id'].split('.')[1])\n",
        "        image, _ = dataset[image_id]\n",
        "        class_label = class_metadata[dataset.targets[image_id]]\n",
        "\n",
        "        tensor_image = ToTensor()(image)  # Convert PIL image to Torch Tensor\n",
        "\n",
        "        axes[i].imshow(tensor_image.permute(1, 2, 0))  # Transpose tensor dimensions for visualization\n",
        "        axes[i].set_title(f'ID: {image_id}, Class: {class_label}')\n",
        "        axes[i].axis('off')\n",
        "\n",
        "    plt.tight_layout()\n",
        "    plt.show()\n",
        "\n",
        "# Usage example:\n",
        "visualize_images_with_ids(response, cifar10_dataset)\n"
      ],
      "metadata": {
        "id": "GD2wEkTQN_Z3"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "url = 'https://t4.ftcdn.net/jpg/00/97/58/97/360_F_97589769_t45CqXyzjz0KXwoBZT9PRaWGHRk5hQqQ.jpg'\n",
        "r = requests.get(url, stream=True)\n",
        "query_image = Image.open(r.raw)\n",
        "h.printmd(\"#### A sample image\")\n",
        "query_image.resize((125,125))\n"
      ],
      "metadata": {
        "id": "khWa7_ESOIxc"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "Figure 11.6: Image of a cat\n",
        "query_embedding = model(h.preprocess(query_image).unsqueeze(0)).tolist()\n",
        "response = index.query(query_embedding, top_k=4, include_metadata=True)\n",
        "#h.printmd(f\"#### A sample response from Pinecone \\n ==============\\n \\n\")\n",
        "h.printmd(f\"```python\\n{response}\\n```\")\n",
        "\n",
        "response = response\n",
        "\n",
        "def visualize_images_with_ids(response, dataset):\n",
        "    fig, axes = plt.subplots(1, len(response['matches']), figsize=(12, 3))\n",
        "\n",
        "    for i, match in enumerate(response['matches']):\n",
        "        image_id = int(match['id'].split('.')[1])\n",
        "        image, _ = dataset[image_id]\n",
        "        class_label = class_metadata[dataset.targets[image_id]]\n",
        "\n",
        "        tensor_image = ToTensor()(image)  # Convert PIL image to Torch Tensor\n",
        "\n",
        "        axes[i].imshow(tensor_image.permute(1, 2, 0))  # Transpose tensor dimensions for visualization\n",
        "        axes[i].set_title(f'ID: {image_id}, Class: {class_label}')\n",
        "        axes[i].axis('off')\n",
        "\n",
        "    plt.tight_layout()\n",
        "    plt.show()\n",
        "\n",
        "# Usage example:\n",
        "visualize_images_with_ids(response, cifar10_dataset)\n"
      ],
      "metadata": {
        "id": "aDswvNXvOM57"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "url = 'https://t3.ftcdn.net/jpg/00/20/13/60/240_F_20136083_gk0ppzak6UdK9PcDRgPdLjcuAdo7o1LK.jpg'\n",
        "r = requests.get(url, stream=True)\n",
        "query_image = Image.open(r.raw)\n",
        "h.printmd(\"#### A sample image\")\n",
        "query_image.resize((125,125))\n"
      ],
      "metadata": {
        "id": "ejkZpdnIOQqk"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "query_embedding = model(h.preprocess(query_image).unsqueeze(0)).tolist()\n",
        "response = index.query(query_embedding, top_k=4, include_metadata=True)\n",
        "#h.printmd(f\"#### A sample response from Pinecone \\n ==============\\n \\n\")\n",
        "h.printmd(f\"```python\\n{response}\\n```\")\n",
        "response = response\n",
        "\n",
        "def visualize_images_with_ids(response, dataset):\n",
        "    fig, axes = plt.subplots(1, len(response['matches']), figsize=(12, 3))\n",
        "\n",
        "    for i, match in enumerate(response['matches']):\n",
        "        image_id = int(match['id'].split('.')[1])\n",
        "        image, _ = dataset[image_id]\n",
        "        class_label = class_metadata[dataset.targets[image_id]]\n",
        "\n",
        "        tensor_image = ToTensor()(image)  # Convert PIL image to Torch Tensor\n",
        "\n",
        "        axes[i].imshow(tensor_image.permute(1, 2, 0))  # Transpose tensor dimensions for visualization\n",
        "        axes[i].set_title(f'ID: {image_id}, Class: {class_label}')\n",
        "        axes[i].axis('off')\n",
        "\n",
        "    plt.tight_layout()\n",
        "    plt.show()\n",
        "\n",
        "# Usage example:\n",
        "visualize_images_with_ids(response, cifar10_dataset)"
      ],
      "metadata": {
        "id": "A_mEsaX5OV91"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}