{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SageMaker V3 Custom Distributed Training Example\n",
    "\n",
    "This notebook demonstrates how to create and use custom distributed training drivers with SageMaker V3 ModelTrainer."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import tempfile\n",
    "import shutil\n",
    "\n",
    "from sagemaker.train.model_trainer import ModelTrainer\n",
    "from sagemaker.train.configs import SourceCode\n",
    "from sagemaker.train.distributed import DistributedConfig\n",
    "from sagemaker.core.helper.session_helper import Session, get_execution_role"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 1: Setup Session and Create Test Files\n",
    "\n",
    "Initialize the SageMaker session and create the custom distributed driver files."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sagemaker_session = Session()\n",
    "role = get_execution_role()\n",
    "\n",
    "DEFAULT_CPU_IMAGE = \"763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-training:2.0.0-cpu-py310\"\n",
    "\n",
    "# Create temporary directories\n",
    "temp_dir = tempfile.mkdtemp()\n",
    "custom_drivers_dir = os.path.join(temp_dir, \"custom_drivers\")\n",
    "scripts_dir = os.path.join(temp_dir, \"scripts\")\n",
    "\n",
    "os.makedirs(custom_drivers_dir, exist_ok=True)\n",
    "os.makedirs(scripts_dir, exist_ok=True)\n",
    "\n",
    "print(f\"Created temporary directories in: {temp_dir}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 2: Create Custom Driver and Entry Script\n",
    "\n",
    "Create the custom driver script and entry script for training."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create custom driver script\n",
    "driver_script = '''\n",
    "import json\n",
    "import os\n",
    "import subprocess\n",
    "import sys\n",
    "\n",
    "def main():\n",
    "    driver_config = json.loads(os.environ[\"SM_DISTRIBUTED_CONFIG\"])\n",
    "    process_count_per_node = driver_config[\"process_count_per_node\"]\n",
    "    assert process_count_per_node != None\n",
    "\n",
    "    hps = json.loads(os.environ[\"SM_HPS\"])\n",
    "    assert hps != None\n",
    "    assert isinstance(hps, dict)\n",
    "\n",
    "    source_dir = os.environ[\"SM_SOURCE_DIR\"]\n",
    "    assert source_dir == \"/opt/ml/input/data/code\"\n",
    "    sm_drivers_dir = os.environ[\"SM_DISTRIBUTED_DRIVER_DIR\"]\n",
    "    assert sm_drivers_dir == \"/opt/ml/input/data/sm_drivers/distributed_drivers\"\n",
    "\n",
    "    entry_script = os.environ[\"SM_ENTRY_SCRIPT\"]\n",
    "    assert entry_script != None\n",
    "\n",
    "    python = sys.executable\n",
    "\n",
    "    command = [python, entry_script]\n",
    "    print(f\"Running command: {command}\")\n",
    "    subprocess.run(command, check=True)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    print(\"Running custom driver script\")\n",
    "    main()\n",
    "    print(\"Finished running custom driver script\")\n",
    "'''\n",
    "\n",
    "with open(os.path.join(custom_drivers_dir, \"driver.py\"), 'w') as f:\n",
    "    f.write(driver_script)\n",
    "\n",
    "print(\"Created custom driver script\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create entry script\n",
    "entry_script = '''\n",
    "import json\n",
    "import os\n",
    "import time\n",
    "\n",
    "def main():\n",
    "    hps = json.loads(os.environ[\"SM_HPS\"])\n",
    "    assert hps != None\n",
    "    print(f\"Hyperparameters: {hps}\")\n",
    "\n",
    "    print(\"Running pseudo training script\")\n",
    "    for epochs in range(hps[\"epochs\"]):\n",
    "        print(f\"Epoch: {epochs}\")\n",
    "        time.sleep(1)\n",
    "    print(\"Finished running pseudo training script\")\n",
    "    \n",
    "    # Save results\n",
    "    model_dir = os.environ.get(\"SM_MODEL_DIR\", \"/opt/ml/model\")\n",
    "    os.makedirs(model_dir, exist_ok=True)\n",
    "    \n",
    "    results = {\"status\": \"success\", \"epochs_completed\": hps[\"epochs\"]}\n",
    "    with open(os.path.join(model_dir, \"results.json\"), \"w\") as f:\n",
    "        json.dump(results, f, indent=2)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n",
    "'''\n",
    "\n",
    "with open(os.path.join(scripts_dir, \"entry_script.py\"), 'w') as f:\n",
    "    f.write(entry_script)\n",
    "\n",
    "print(\"Created entry script\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 3: Define Custom Distributed Driver\n",
    "\n",
    "Create the custom distributed driver class."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CustomDriver(DistributedConfig):\n",
    "    process_count_per_node: int = None\n",
    "\n",
    "    @property\n",
    "    def driver_dir(self) -> str:\n",
    "        return custom_drivers_dir\n",
    "\n",
    "    @property\n",
    "    def driver_script(self) -> str:\n",
    "        return \"driver.py\"\n",
    "\n",
    "print(\"Custom distributed driver class defined!\")\n",
    "print(f\"Driver directory: {custom_drivers_dir}\")\n",
    "print(f\"Driver script: driver.py\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 4: Configure Source Code and Hyperparameters\n",
    "\n",
    "Set up the source code and hyperparameters for training."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "source_code = SourceCode(\n",
    "    source_dir=scripts_dir,\n",
    "    entry_script=\"entry_script.py\",\n",
    ")\n",
    "\n",
    "hyperparameters = {\"epochs\": 10}\n",
    "\n",
    "custom_driver = CustomDriver(process_count_per_node=2)\n",
    "\n",
    "print(f\"Source directory: {scripts_dir}\")\n",
    "print(f\"Entry script: entry_script.py\")\n",
    "print(f\"Hyperparameters: {hyperparameters}\")\n",
    "print(f\"Custom driver: {custom_driver}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 5: Create ModelTrainer with Custom Driver\n",
    "\n",
    "Initialize ModelTrainer with the custom distributed configuration."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_trainer = ModelTrainer(\n",
    "    sagemaker_session=sagemaker_session,\n",
    "    training_image=DEFAULT_CPU_IMAGE,\n",
    "    hyperparameters=hyperparameters,\n",
    "    source_code=source_code,\n",
    "    distributed=custom_driver,\n",
    "    base_job_name=\"custom-distributed-driver\",\n",
    ")\n",
    "\n",
    "print(\"ModelTrainer created with custom distributed driver!\")\n",
    "print(f\"Job name: custom-distributed-driver\")\n",
    "print(f\"Distributed configuration: {model_trainer.distributed}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 6: Run Custom Distributed Training\n",
    "\n",
    "Start the distributed training job using the custom driver."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"Starting custom distributed training...\")\n",
    "\n",
    "try:\n",
    "    model_trainer.train()\n",
    "    print(f\"Custom distributed training completed successfully!\")\n",
    "    print(f\"Job name: {model_trainer._latest_training_job.training_job_name}\")\n",
    "    training_successful = True\n",
    "except Exception as e:\n",
    "    print(f\"Training failed with error: {e}\")\n",
    "    training_successful = False"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 7: Analyze Training Results\n",
    "\n",
    "Examine the results from the custom distributed training."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if training_successful:\n",
    "    job_name = model_trainer._latest_training_job.training_job_name\n",
    "    model_artifacts = model_trainer._latest_training_job.model_artifacts\n",
    "    \n",
    "    print(\"Custom Distributed Training Results:\")\n",
    "    print(\"=\" * 40)\n",
    "    print(f\"Job Name: {job_name}\")\n",
    "    print(f\"Model Artifacts: {model_artifacts}\")\n",
    "    print(f\"Training Image: {DEFAULT_CPU_IMAGE}\")\n",
    "    \n",
    "    print(\"\\nCustom Driver Configuration:\")\n",
    "    print(f\"Driver Class: {custom_driver.__class__.__name__}\")\n",
    "    print(f\"Process Count Per Node: {custom_driver.process_count_per_node}\")\n",
    "    print(f\"Driver Directory: {custom_driver.driver_dir}\")\n",
    "    print(f\"Driver Script: {custom_driver.driver_script}\")\n",
    "    \n",
    "    print(\"\\nHyperparameters Used:\")\n",
    "    for key, value in hyperparameters.items():\n",
    "        print(f\"  {key}: {value}\")\n",
    "    \n",
    "    print(\"\\n✓ Custom distributed training completed successfully!\")\n",
    "    \n",
    "else:\n",
    "    print(\"Training was not successful.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 8: Clean Up\n",
    "\n",
    "Clean up temporary files."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "try:\n",
    "    shutil.rmtree(temp_dir)\n",
    "    print(f\"Cleaned up temporary directory: {temp_dir}\")\n",
    "except Exception as e:\n",
    "    print(f\"Could not clean up temp directory: {e}\")\n",
    "\n",
    "print(\"Cleanup completed!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Summary\n",
    "\n",
    "This notebook demonstrated:\n",
    "1. **Custom distributed driver creation**: Extending DistributedConfig for specialized needs\n",
    "2. **Driver coordination**: How custom drivers manage training processes\n",
    "3. **ModelTrainer integration**: Seamless integration with SageMaker V3 training\n",
    "4. **Custom training logic**: Implementing specialized training patterns\n",
    "\n",
    "Custom distributed drivers provide flexibility for implementing specialized coordination logic, framework integration, and advanced debugging capabilities for distributed training scenarios."
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv-test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
