{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "d3fb3274",
      "metadata": {},
      "outputs": [],
      "source": [
        "from __future__ import annotations\n",
        "\n",
        "import pymupdf\n",
        "import pandas as pd\n",
        "import torch\n",
        "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
        "from pyspark.sql.functions import udf, explode, col, pandas_udf\n",
        "from pyspark.sql.types import (\n",
        "    ArrayType,\n",
        "    StructType,\n",
        "    StructField,\n",
        "    StringType,\n",
        "    IntegerType,\n",
        "    FloatType,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "30b4fb0d",
      "metadata": {},
      "outputs": [],
      "source": [
        "%%configure -f\n",
        "{\n",
        "  \"executorCores\": 1,\n",
        "  \"conf\": {\n",
        "    \"spark.sql.execution.arrow.maxRecordsPerBatch\": \"10\"\n",
        "  }\n",
        "}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "49940514",
      "metadata": {},
      "outputs": [],
      "source": [
        "def extract_text_from_parsed_pdf(pdf_bytes: bytes):\n",
        "    try:\n",
        "        doc = pymupdf.Document(stream=pdf_bytes, filetype=\"pdf\")\n",
        "        if len(doc) > 100:\n",
        "            return None\n",
        "        return [{\"text\": page.get_text(), \"page_number\": page.number} for page in doc]\n",
        "    except Exception:\n",
        "        return None\n",
        "\n",
        "\n",
        "extract_schema = ArrayType(\n",
        "    StructType(\n",
        "        [\n",
        "            StructField(\"text\", StringType(), True),\n",
        "            StructField(\"page_number\", IntegerType(), True),\n",
        "        ]\n",
        "    )\n",
        ")\n",
        "extract_udf = udf(extract_text_from_parsed_pdf, extract_schema)\n",
        "\n",
        "\n",
        "def chunk(text: str):\n",
        "    splitter = RecursiveCharacterTextSplitter(chunk_size=2048, chunk_overlap=200)\n",
        "    chunks = []\n",
        "    for idx, t in enumerate(splitter.split_text(text)):\n",
        "        chunks.append({\"text\": t, \"chunk_id\": idx})\n",
        "    return chunks\n",
        "\n",
        "\n",
        "chunk_schema = ArrayType(\n",
        "    StructType(\n",
        "        [\n",
        "            StructField(\"text\", StringType(), True),\n",
        "            StructField(\"chunk_id\", IntegerType(), True),\n",
        "        ]\n",
        "    )\n",
        ")\n",
        "chunk_udf = udf(chunk, chunk_schema)\n",
        "\n",
        "_model_cache = {\"model\": None}\n",
        "\n",
        "\n",
        "def get_model():\n",
        "    import os\n",
        "\n",
        "    os.environ[\"TORCH_HOME\"] = \"/tmp/torch\"\n",
        "    os.environ[\"XDG_CACHE_HOME\"] = \"/tmp\"\n",
        "    os.environ[\"HF_HOME\"] = \"/tmp/huggingface\"\n",
        "    os.environ[\"TRANSFORMERS_CACHE\"] = \"/tmp/huggingface\"\n",
        "\n",
        "    if _model_cache[\"model\"] is None:\n",
        "        from sentence_transformers import SentenceTransformer\n",
        "\n",
        "        device = \"cuda\"\n",
        "        model = SentenceTransformer(\n",
        "            \"sentence-transformers/all-MiniLM-L6-v2\", device=device\n",
        "        )\n",
        "        model.compile()\n",
        "        _model_cache[\"model\"] = model\n",
        "    return _model_cache[\"model\"]\n",
        "\n",
        "\n",
        "@pandas_udf(ArrayType(FloatType()))\n",
        "def embed_udf(texts: pd.Series) -> pd.Series:\n",
        "    model = get_model()\n",
        "    if texts.empty:\n",
        "        return pd.Series([[]] * len(texts))\n",
        "\n",
        "    embeddings = model.encode(\n",
        "        texts.tolist(),\n",
        "        convert_to_tensor=True,\n",
        "        torch_dtype=torch.bfloat16,\n",
        "    )\n",
        "    return pd.Series([row.tolist() for row in embeddings.cpu().numpy()])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "1b08c912",
      "metadata": {},
      "outputs": [],
      "source": [
        "paths_df = spark.read.parquet(\n",
        "    \"s3://daft-public-datasets/digitalcorpora_metadata\"\n",
        ").filter(col(\"file_name\").endswith(\".pdf\"))\n",
        "paths = [row.uploaded_pdf_path for row in paths_df.collect()]\n",
        "\n",
        "df = spark.read.format(\"binaryFile\").load(paths)\n",
        "\n",
        "df = df.withColumnRenamed(\"path\", \"uploaded_pdf_path\")\n",
        "\n",
        "df = df.withColumn(\"pages\", extract_udf(col(\"content\")))\n",
        "df = df.withColumn(\"page\", explode(\"pages\"))\n",
        "df = df.withColumn(\"page_text\", col(\"page.text\"))\n",
        "df = df.withColumn(\"page_number\", col(\"page.page_number\"))\n",
        "df = df.filter(col(\"page_text\").isNotNull())\n",
        "df = df.withColumn(\"chunks\", chunk_udf(col(\"page_text\")))\n",
        "df = df.withColumn(\"chunk\", explode(\"chunks\"))\n",
        "df = df.withColumn(\"chunk_text\", col(\"chunk.text\"))\n",
        "df = df.withColumn(\"chunk_id\", col(\"chunk.chunk_id\"))\n",
        "df = df.filter(col(\"chunk_text\").isNotNull())\n",
        "df = df.withColumn(\"embedding\", embed_udf(col(\"chunk_text\")))\n",
        "df = df.select(\n",
        "    \"uploaded_pdf_path\", \"page_number\", \"chunk_id\", \"chunk_text\", \"embedding\"\n",
        ")\n",
        "df.write.mode(\"append\").parquet(\n",
        "    \"s3://eventual-dev-benchmarking-results/ai-benchmark-results/document-embedding-results\"\n",
        ")"
      ]
    }
  ],
  "metadata": {
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 5
}
