{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SageMaker V3 Local Container Mode Example\n",
    "\n",
    "This notebook demonstrates how to use SageMaker V3 ModelBuilder in Local Container mode for testing models in Docker containers locally."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Import required libraries\n",
    "import json\n",
    "import uuid\n",
    "import tempfile\n",
    "import os\n",
    "import shutil\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "from sagemaker.serve.model_builder import ModelBuilder\n",
    "from sagemaker.serve.spec.inference_spec import InferenceSpec\n",
    "from sagemaker.serve.builder.schema_builder import SchemaBuilder\n",
    "from sagemaker.serve.utils.types import ModelServer\n",
    "from sagemaker.serve.mode.function_pointers import Mode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# NOTE: Local mode requires Docker to be installed and running. \n",
    "# If Docker is not in your system PATH, you may need to define the Docker path in one of the top cells.\n",
    "# Here is an example:\n",
    "import os\n",
    "os.environ['PATH'] = '/usr/local/bin:/Applications/Docker.app/Contents/Resources/bin:' + os.environ['PATH']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 1: Create a PyTorch Model\n",
    "\n",
    "Create and save a simple PyTorch model for local container testing."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimpleModel(nn.Module):\n",
    "    \"\"\"Simple PyTorch model for testing.\"\"\"\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.linear = nn.Linear(4, 2)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        return torch.softmax(self.linear(x), dim=1)\n",
    "\n",
    "def save_pytorch_model(model_path: str):\n",
    "    \"\"\"Save PyTorch model for testing.\"\"\"\n",
    "    model = SimpleModel()\n",
    "    sample_input = torch.tensor([[1.0, 2.0, 3.0, 4.0]], dtype=torch.float32)\n",
    "    traced_model = torch.jit.trace(model, sample_input)\n",
    "    model_file = os.path.join(model_path, \"model.pt\")\n",
    "    torch.jit.save(traced_model, model_file)\n",
    "    return model_file\n",
    "\n",
    "# Create temporary model directory and save model\n",
    "temp_model_path = tempfile.mkdtemp()\n",
    "model_file = save_pytorch_model(temp_model_path)\n",
    "print(f\"Model saved to: {model_file}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 2: Define PyTorch InferenceSpec\n",
    "\n",
    "Create an InferenceSpec that can load and run our PyTorch model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PyTorchInferenceSpec(InferenceSpec):\n",
    "    \"\"\"PyTorch InferenceSpec for local container mode.\"\"\"\n",
    "    \n",
    "    def __init__(self, model_path=None):\n",
    "        self.model_path = model_path\n",
    "    \n",
    "    def prepare(self, model_dir: str):\n",
    "        \"\"\"Prepare PyTorch model artifacts.\"\"\"\n",
    "        if self.model_path:\n",
    "            src_model = os.path.join(self.model_path, \"model.pt\")\n",
    "            dst_model = os.path.join(model_dir, \"model.pt\")\n",
    "            if os.path.exists(src_model) and src_model != dst_model:\n",
    "                shutil.copy2(src_model, dst_model)\n",
    "    \n",
    "    def load(self, model_dir: str):\n",
    "        \"\"\"Load PyTorch model.\"\"\"\n",
    "        model_path = os.path.join(model_dir, \"model.pt\")\n",
    "        \n",
    "        if os.path.exists(model_path):\n",
    "            model = torch.jit.load(model_path, map_location='cpu')\n",
    "        else:\n",
    "            model = SimpleModel()\n",
    "        \n",
    "        model.eval()\n",
    "        return model\n",
    "    \n",
    "    def invoke(self, input_object, model):\n",
    "        \"\"\"PyTorch inference.\"\"\"\n",
    "        if isinstance(input_object, dict) and \"data\" in input_object:\n",
    "            input_data = input_object[\"data\"]\n",
    "        else:\n",
    "            input_data = input_object\n",
    "        \n",
    "        if isinstance(input_data, list):\n",
    "            input_tensor = torch.tensor(input_data, dtype=torch.float32)\n",
    "        else:\n",
    "            input_tensor = torch.tensor(input_data.tolist() if hasattr(input_data, 'tolist') else input_data, dtype=torch.float32)\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            output = model(input_tensor)\n",
    "            return output.tolist()\n",
    "\n",
    "print(\"PyTorch InferenceSpec defined successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 3: Create Schema Builder\n",
    "\n",
    "Define the input/output schema for our PyTorch model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create PyTorch schema builder\n",
    "sample_input = [[1.0, 2.0, 3.0, 4.0]]\n",
    "sample_output = [[0.6, 0.4]]\n",
    "schema_builder = SchemaBuilder(sample_input, sample_output)\n",
    "\n",
    "print(\"Schema builder created successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 4: Configure ModelBuilder for Local Container Mode\n",
    "\n",
    "Set up ModelBuilder to run in LOCAL_CONTAINER mode with Docker."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configuration\n",
    "MODEL_NAME_PREFIX = \"pytorch-local\"\n",
    "ENDPOINT_NAME_PREFIX = \"pytorch-local\"\n",
    "\n",
    "# Generate unique identifiers\n",
    "unique_id = str(uuid.uuid4())[:8]\n",
    "model_name = f\"{MODEL_NAME_PREFIX}-{unique_id}\"\n",
    "endpoint_name = f\"{ENDPOINT_NAME_PREFIX}-{unique_id}\"\n",
    "\n",
    "# Create ModelBuilder in LOCAL_CONTAINER mode\n",
    "inference_spec = PyTorchInferenceSpec(model_path=temp_model_path)\n",
    "model_builder = ModelBuilder(\n",
    "    inference_spec=inference_spec,\n",
    "    model_server=ModelServer.TORCHSERVE,\n",
    "    schema_builder=schema_builder,\n",
    "    mode=Mode.LOCAL_CONTAINER  # This enables Docker container mode\n",
    ")\n",
    "\n",
    "print(f\"ModelBuilder configured for local container model: {model_name}\")\n",
    "print(f\"Target endpoint: {endpoint_name}\")\n",
    "print(\"Note: This will use Docker containers locally!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 5: Build the Model\n",
    "\n",
    "Build the model artifacts for containerized deployment."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build the model\n",
    "local_model = model_builder.build(model_name=model_name)\n",
    "print(f\"Model Successfully Created: {local_model.model_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 6: Deploy in Local Container\n",
    "\n",
    "Deploy the model in a local Docker container. This may take a few minutes to pull the container image and ping the container until it is live. This is a normal part of the deployment process."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Deploy locally in container mode\n",
    "print(\"Starting local container deployment...\")\n",
    "print(\"Note: This may take a few minutes to pull the Docker image on first run.\")\n",
    "\n",
    "local_endpoint = model_builder.deploy_local(\n",
    "    endpoint_name=endpoint_name,\n",
    "    wait=True,\n",
    "    container_timeout_in_seconds=1200  # 20 minutes timeout\n",
    ")\n",
    "\n",
    "print(f\"Local Container Endpoint Successfully Created: {endpoint_name}\")\n",
    "print(\"Container is now running and ready for inference!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 7: Test the Containerized Model\n",
    "\n",
    "Send test requests to the model running in the local container."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 1: Single prediction\n",
    "test_input_1 = [[1.0, 2.0, 3.0, 4.0]]\n",
    "\n",
    "response_1 = local_endpoint.invoke(\n",
    "    body=json.dumps(test_input_1),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "response_data_1 = response_1.body.read().decode('utf-8')\n",
    "parsed_response_1 = json.loads(response_data_1)\n",
    "print(f\"Test 1 - Single prediction: {parsed_response_1}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 2: Batch prediction\n",
    "test_input_2 = [\n",
    "    [1.0, 2.0, 3.0, 4.0],\n",
    "    [0.5, 1.5, 2.5, 3.5],\n",
    "    [2.0, 3.0, 4.0, 5.0]\n",
    "]\n",
    "\n",
    "response_2 = local_endpoint.invoke(\n",
    "    body=json.dumps(test_input_2),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "response_data_2 = response_2.body.read().decode('utf-8')\n",
    "parsed_response_2 = json.loads(response_data_2)\n",
    "print(f\"Test 2 - Batch prediction: {parsed_response_2}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 3: Edge case - different input ranges\n",
    "test_input_3 = [[0.1, 0.2, 0.3, 0.4]]\n",
    "\n",
    "response_3 = local_endpoint.invoke(\n",
    "    body=json.dumps(test_input_3),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "response_data_3 = response_3.body.read().decode('utf-8')\n",
    "parsed_response_3 = json.loads(response_data_3)\n",
    "print(f\"Test 3 - Edge case: {parsed_response_3}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 8: Container Information\n",
    "\n",
    "Get information about the running container."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Display container information\n",
    "print(\"Container Information:\")\n",
    "print(f\"- Endpoint Name: {local_endpoint.endpoint_name}\")\n",
    "print(f\"- Model Server: TorchServe\")\n",
    "print(f\"- Container Mode: LOCAL_CONTAINER\")\n",
    "print(f\"- Model Path: {temp_model_path}\")\n",
    "\n",
    "# You can also check Docker containers running\n",
    "print(\"\\nTo see the running container, you can run:\")\n",
    "print(\"docker ps\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 9: Clean Up\n",
    "\n",
    "Clean up the local container and temporary files."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Clean up temporary model files\n",
    "shutil.rmtree(temp_model_path)\n",
    "print(\"Temporary model files cleaned up!\")\n",
    "\n",
    "# Note: Local container will be automatically cleaned up when the process ends\n",
    "print(\"Local container will be automatically stopped when this notebook session ends.\")\n",
    "print(\"No AWS resources were created, so no cloud cleanup needed.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Summary\n",
    "\n",
    "This notebook demonstrated:\n",
    "1. Creating and saving a PyTorch model\n",
    "2. Defining a PyTorch InferenceSpec with prepare(), load(), and invoke() methods\n",
    "3. Configuring ModelBuilder for LOCAL_CONTAINER mode\n",
    "4. Building and deploying models in local Docker containers\n",
    "5. Testing containerized models with various inputs\n",
    "6. Proper cleanup of local resources\n",
    "\n",
    "## Benefits of Local Container Mode:\n",
    "- **Container parity**: Same environment as SageMaker endpoints\n",
    "- **No AWS costs**: Runs entirely locally\n",
    "- **Realistic testing**: Uses actual model serving containers\n",
    "- **Debugging friendly**: Can inspect container logs and behavior\n",
    "- **Dependency isolation**: Container handles all dependencies\n",
    "\n",
    "## When to Use Local Container Mode:\n",
    "- Testing models before deploying to SageMaker\n",
    "- Debugging inference issues\n",
    "- Validating custom inference code\n",
    "- Development with realistic serving environment\n",
    "- CI/CD pipeline testing\n",
    "\n",
    "Local container mode provides the perfect balance between local development speed and production environment fidelity!"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv-test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
