{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "8b5279f0",
      "metadata": {},
      "outputs": [],
      "source": [
        "import io\n",
        "\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import torch\n",
        "from PIL import Image\n",
        "from torchvision import transforms\n",
        "from torchvision.models import resnet18, ResNet18_Weights\n",
        "from pyspark.sql.functions import col, pandas_udf\n",
        "from pyspark.sql.types import StringType, ArrayType, FloatType"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "cdd90987",
      "metadata": {},
      "outputs": [],
      "source": [
        "%%configure -f\n",
        "{\n",
        "  \"conf\": {\n",
        "    \"spark.sql.execution.arrow.maxRecordsPerBatch\": \"100\"\n",
        "  }\n",
        "}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "eab2402f",
      "metadata": {},
      "outputs": [],
      "source": [
        "_model_cache = {\"model\": None, \"weights\": None, \"device\": None}\n",
        "\n",
        "\n",
        "def get_model():\n",
        "    import os\n",
        "\n",
        "    os.environ[\"TORCH_HOME\"] = \"/tmp/torch\"\n",
        "    os.environ[\"XDG_CACHE_HOME\"] = \"/tmp\"\n",
        "\n",
        "    if _model_cache[\"model\"] is None:\n",
        "        device = \"cuda\"\n",
        "        weights = ResNet18_Weights.DEFAULT\n",
        "        model = resnet18(weights=weights).eval().to(device)\n",
        "        _model_cache[\"model\"] = model\n",
        "        _model_cache[\"weights\"] = weights\n",
        "        _model_cache[\"device\"] = device\n",
        "    return _model_cache[\"model\"], _model_cache[\"weights\"], _model_cache[\"device\"]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "2fbb3235",
      "metadata": {},
      "outputs": [],
      "source": [
        "transform = transforms.Compose([\n",
        "    transforms.ToTensor(),\n",
        "    ResNet18_Weights.DEFAULT.transforms()\n",
        "])\n",
        "\n",
        "@pandas_udf(ArrayType(FloatType()))\n",
        "def decode_and_preprocess_image_udf(image_data_series: pd.Series) -> pd.Series:\n",
        "    decoded_images = []\n",
        "    \n",
        "    for image_data in image_data_series:\n",
        "        if image_data is None:\n",
        "            decoded_images.append(None)\n",
        "            continue\n",
        "            \n",
        "        try:\n",
        "            image = np.array(Image.open(io.BytesIO(image_data)).convert(\"RGB\"))\n",
        "            \n",
        "            if len(image.shape) != 3:\n",
        "                raise ValueError(f\"Invalid image shape: {image.shape}\")\n",
        "            \n",
        "            decoded_images.append(transform(image).flatten().tolist())\n",
        "            \n",
        "        except Exception as e:\n",
        "            print(f\"Error decoding image: {e}\")\n",
        "            decoded_images.append(None)\n",
        "    \n",
        "    return pd.Series(decoded_images)\n",
        "\n",
        "@pandas_udf(StringType())\n",
        "def predict_batch_udf(norm_images: pd.Series):\n",
        "    model, weights, device = get_model()\n",
        "    try:\n",
        "        np_batch = np.vstack(norm_images.tolist())\n",
        "        np_batch_reshaped = np_batch.reshape(-1, 3, 224, 224).astype(np.float32)\n",
        "    except ValueError as e:\n",
        "        print(f\"Error reshaping tensor: {e}\")\n",
        "        return pd.Series([None] * len(norm_images))\n",
        "\n",
        "    torch_batch = torch.from_numpy(np_batch_reshaped).to(device)\n",
        "    with torch.inference_mode():\n",
        "        prediction = model(torch_batch)\n",
        "        predicted_classes = prediction.argmax(dim=1).detach().cpu()\n",
        "        predicted_labels = [\n",
        "            weights.meta[\"categories\"][i] for i in predicted_classes\n",
        "        ]\n",
        "    \n",
        "    return pd.Series(predicted_labels)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "c653f3fa",
      "metadata": {},
      "outputs": [],
      "source": [
        "paths = spark.read.parquet(\"s3://daft-public-datasets/imagenet/benchmark\").collect()\n",
        "paths = [row.image_url for row in paths]\n",
        "\n",
        "df = spark.read.format(\"binaryFile\").load(paths)\n",
        "df = (\n",
        "    df.withColumn(\"processed_image\", decode_and_preprocess_image_udf(col(\"content\")))\n",
        "    .filter(col(\"processed_image\").isNotNull())\n",
        "    .withColumn(\"label\", predict_batch_udf(col(\"processed_image\")))\n",
        "    .select(\"path\", \"label\")\n",
        ")\n",
        "\n",
        "df.write.mode(\"append\").parquet(\n",
        "    \"s3://eventual-dev-benchmarking-results/ai-benchmark-results/image-classification-results\"\n",
        ")"
      ]
    }
  ],
  "metadata": {
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 5
}
