{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "8b6ba98b",
      "metadata": {},
      "outputs": [],
      "source": [
        "import io\n",
        "import numpy as np\n",
        "import torch\n",
        "import torchaudio\n",
        "import torchaudio.transforms as T\n",
        "\n",
        "import pandas as pd\n",
        "from pyspark.sql.functions import pandas_udf, col, udf\n",
        "from pyspark.sql.types import ArrayType, FloatType, StringType, IntegerType"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "d9fa130f",
      "metadata": {},
      "outputs": [],
      "source": [
        "%%configure -f\n",
        "{\n",
        "  \"executorCores\": 1,\n",
        "  \"conf\": {\n",
        "    \"spark.sql.execution.arrow.maxRecordsPerBatch\": \"64\",\n",
        "    \"spark.executorEnv.HF_HOME\": \"/tmp/huggingface\"\n",
        "  }\n",
        "}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "fcf78b68",
      "metadata": {},
      "outputs": [],
      "source": [
        "TRANSCRIPTION_MODEL = \"openai/whisper-tiny\"\n",
        "NEW_SAMPLING_RATE = 16000\n",
        "\n",
        "_processor_cache = {\"processor\": None}\n",
        "\n",
        "\n",
        "def get_processor():\n",
        "    if _processor_cache[\"processor\"] is None:\n",
        "        from transformers import AutoProcessor\n",
        "\n",
        "        _processor_cache[\"processor\"] = AutoProcessor.from_pretrained(\n",
        "            TRANSCRIPTION_MODEL\n",
        "        )\n",
        "    return _processor_cache[\"processor\"]\n",
        "\n",
        "\n",
        "_model_cache = {\"model\": None, \"device\": None, \"dtype\": None}\n",
        "\n",
        "\n",
        "def get_model():\n",
        "    if _model_cache[\"model\"] is None:\n",
        "        from transformers import AutoModelForSpeechSeq2Seq\n",
        "\n",
        "        device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
        "        dtype = torch.float16 if torch.cuda.is_available() else torch.float32\n",
        "        model = AutoModelForSpeechSeq2Seq.from_pretrained(\n",
        "            TRANSCRIPTION_MODEL,\n",
        "            torch_dtype=dtype,\n",
        "            low_cpu_mem_usage=True,\n",
        "            use_safetensors=True,\n",
        "        ).to(device)\n",
        "        _model_cache[\"model\"] = model\n",
        "        _model_cache[\"device\"] = device\n",
        "        _model_cache[\"dtype\"] = dtype\n",
        "    return _model_cache[\"model\"], _model_cache[\"device\"], _model_cache[\"dtype\"]\n",
        "\n",
        "\n",
        "@pandas_udf(ArrayType(FloatType()))\n",
        "def resample_udf(audio_bytes: pd.Series) -> pd.Series:\n",
        "    results = []\n",
        "    for bytes_arr in audio_bytes:\n",
        "        waveform, sampling_rate = torchaudio.load(io.BytesIO(bytes_arr))\n",
        "        waveform = T.Resample(sampling_rate, NEW_SAMPLING_RATE)(waveform).squeeze()\n",
        "        results.append(waveform.numpy().astype(np.float32).tolist())\n",
        "    return pd.Series(results)\n",
        "\n",
        "\n",
        "@pandas_udf(ArrayType(ArrayType(FloatType())))\n",
        "def whisper_preprocess_udf(resampled: pd.Series) -> pd.Series:\n",
        "    processor = get_processor()\n",
        "    features = processor(\n",
        "        resampled.tolist(), sampling_rate=NEW_SAMPLING_RATE, return_tensors=\"np\"\n",
        "    ).input_features\n",
        "    return pd.Series([f.astype(np.float32).tolist() for f in features])\n",
        "\n",
        "\n",
        "@pandas_udf(ArrayType(IntegerType()))\n",
        "def transcriber_udf(extracted_features: pd.Series) -> pd.Series:\n",
        "    model, device, dtype = get_model()\n",
        "    batch = [np.array(feat, dtype=np.float32) for feat in extracted_features]\n",
        "    spectrograms = torch.tensor(batch, dtype=dtype, device=device)\n",
        "    with torch.no_grad():\n",
        "        token_ids = model.generate(spectrograms)\n",
        "    return pd.Series([toks.cpu().numpy().tolist() for toks in token_ids])\n",
        "\n",
        "\n",
        "@pandas_udf(StringType())\n",
        "def decode_udf(token_ids: pd.Series) -> pd.Series:\n",
        "    processor = get_processor()\n",
        "    return pd.Series(\n",
        "        processor.batch_decode(token_ids.tolist(), skip_special_tokens=True)\n",
        "    )"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "34928725",
      "metadata": {},
      "outputs": [],
      "source": [
        "df = spark.read.parquet(\"s3://daft-public-datasets/common_voice_17\")\n",
        "df = df.withColumn(\"resampled\", resample_udf(col(\"audio.bytes\")))\n",
        "df = df.withColumn(\"extracted_features\", whisper_preprocess_udf(col(\"resampled\")))\n",
        "df = df.withColumn(\"token_ids\", transcriber_udf(col(\"extracted_features\")))\n",
        "df = df.withColumn(\"transcription\", decode_udf(col(\"token_ids\")))\n",
        "df = df.withColumn(\n",
        "    \"transcription_length\", udf(lambda x: len(x), IntegerType())(col(\"transcription\"))\n",
        ")\n",
        "\n",
        "final_df = df.drop(\"token_ids\", \"extracted_features\", \"resampled\")\n",
        "final_df.write.mode(\"append\").parquet(\n",
        "    \"s3://eventual-dev-benchmarking-results/ai-benchmark-results/audio-transcription\"\n",
        ")"
      ]
    }
  ],
  "metadata": {
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 5
}
