{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SageMaker V3 Custom InferenceSpec Example\n",
    "\n",
    "This notebook demonstrates how to create and deploy custom models using InferenceSpec with SageMaker V3 ModelBuilder."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Prerequisites\n",
    "Note: Ensure you have sagemaker and ipywidgets installed in your environment. The ipywidgets package is required to monitor endpoint deployment progress in Jupyter notebooks.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Import required libraries\n",
    "import json\n",
    "import uuid\n",
    "import tempfile\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "from sagemaker.serve.model_builder import ModelBuilder\n",
    "from sagemaker.serve.spec.inference_spec import InferenceSpec\n",
    "from sagemaker.serve.builder.schema_builder import SchemaBuilder\n",
    "from sagemaker.serve.utils.types import ModelServer\n",
    "from sagemaker.core.resources import EndpointConfig"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 1: Create a Simple PyTorch Model\n",
    "\n",
    "First, let's create a simple neural network model for demonstration."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimpleModel(nn.Module):\n",
    "    \"\"\"A simple neural network for classification.\"\"\"\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.linear = nn.Linear(4, 2)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        return torch.softmax(self.linear(x), dim=1)\n",
    "\n",
    "# Create and save the model\n",
    "pytorch_model = SimpleModel()\n",
    "model_path = tempfile.mkdtemp()\n",
    "\n",
    "# Save model using TorchScript for deployment\n",
    "sample_input = torch.tensor([[0.1, 0.2, 0.3, 0.4]], dtype=torch.float32)\n",
    "traced_model = torch.jit.trace(pytorch_model, sample_input)\n",
    "model_file = os.path.join(model_path, \"model.pth\")\n",
    "torch.jit.save(traced_model, model_file)\n",
    "\n",
    "print(f\"Model saved to: {model_file}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 2: Define Custom InferenceSpec\n",
    "\n",
    "Create a custom InferenceSpec that defines how to load and run inference with our model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimpleModelSpec(InferenceSpec):\n",
    "    \"\"\"Custom InferenceSpec for our simple PyTorch model.\"\"\"\n",
    "    \n",
    "    def load(self, model_dir: str):\n",
    "        \"\"\"Load the PyTorch model from the model directory.\"\"\"\n",
    "        model = SimpleModel()\n",
    "        model_path = os.path.join(model_dir, \"model.pth\")\n",
    "        \n",
    "        if os.path.exists(model_path):\n",
    "            model = torch.jit.load(model_path, map_location='cpu')\n",
    "        \n",
    "        model.eval()\n",
    "        return model\n",
    "    \n",
    "    def invoke(self, input_object: object, model: object):\n",
    "        \"\"\"Run inference on the input data.\"\"\"\n",
    "        # Handle list input (the expected format)\n",
    "        if isinstance(input_object, list):\n",
    "            input_tensor = torch.tensor(input_object, dtype=torch.float32)\n",
    "        else:\n",
    "            input_tensor = torch.tensor([[0.1, 0.2, 0.3, 0.4]], dtype=torch.float32)\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            predictions = model(input_tensor)\n",
    "        \n",
    "        return predictions.tolist()\n",
    "\n",
    "print(\"Custom InferenceSpec defined successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 3: Create Schema Builder\n",
    "\n",
    "Define the input/output schema for our model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create schema builder with sample input/output\n",
    "sample_input = [[0.1, 0.2, 0.3, 0.4]]  # List format for JSON serialization\n",
    "sample_output = [[0.9, 0.1]]  # Expected output format\n",
    "\n",
    "schema_builder = SchemaBuilder(sample_input, sample_output)\n",
    "print(\"Schema builder created successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 4: Configure ModelBuilder\n",
    "\n",
    "Set up the ModelBuilder with our custom InferenceSpec."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configuration\n",
    "MODEL_NAME_PREFIX = \"custom-spec-model\"\n",
    "ENDPOINT_NAME_PREFIX = \"custom-spec-endpoint\"\n",
    "\n",
    "# Generate unique identifiers\n",
    "unique_id = str(uuid.uuid4())[:8]\n",
    "model_name = f\"{MODEL_NAME_PREFIX}-{unique_id}\"\n",
    "endpoint_name = f\"{ENDPOINT_NAME_PREFIX}-{unique_id}\"\n",
    "\n",
    "# Create ModelBuilder with custom InferenceSpec\n",
    "inference_spec = SimpleModelSpec()\n",
    "model_builder = ModelBuilder(\n",
    "    inference_spec=inference_spec,\n",
    "    model_path=model_path,\n",
    "    model_server=ModelServer.TORCHSERVE,\n",
    "    schema_builder=schema_builder\n",
    ")\n",
    "\n",
    "print(f\"ModelBuilder configured for model: {model_name}\")\n",
    "print(f\"Target endpoint: {endpoint_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 5: Build the Model\n",
    "\n",
    "Build the model artifacts for deployment."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build the model\n",
    "core_model = model_builder.build(model_name=model_name)\n",
    "print(f\"Model Successfully Created: {core_model.model_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 6: Deploy the Model\n",
    "\n",
    "Deploy the model to a SageMaker endpoint."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Deploy the model\n",
    "core_endpoint = model_builder.deploy(endpoint_name=endpoint_name)\n",
    "print(f\"Endpoint Successfully Created: {core_endpoint.endpoint_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 7: Test the Model\n",
    "\n",
    "Send test requests to verify the model works correctly."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 1: Single prediction\n",
    "test_data_1 = [[0.1, 0.2, 0.3, 0.4]]\n",
    "\n",
    "result_1 = core_endpoint.invoke(\n",
    "    body=json.dumps(test_data_1),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "prediction_1 = json.loads(result_1.body.read().decode('utf-8'))\n",
    "print(f\"Single Prediction: {prediction_1}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 2: Batch prediction\n",
    "test_data_2 = [\n",
    "    [0.1, 0.2, 0.3, 0.4],\n",
    "    [0.5, 0.6, 0.7, 0.8],\n",
    "    [0.2, 0.3, 0.4, 0.5]\n",
    "]\n",
    "\n",
    "result_2 = core_endpoint.invoke(\n",
    "    body=json.dumps(test_data_2),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "prediction_2 = json.loads(result_2.body.read().decode('utf-8'))\n",
    "print(f\"Batch Prediction: {prediction_2}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 8: Clean Up Resources\n",
    "\n",
    "Clean up all created resources and temporary files."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Clean up AWS resources\n",
    "core_endpoint_config = EndpointConfig.get(endpoint_config_name=core_endpoint.endpoint_name)\n",
    "\n",
    "core_model.delete()\n",
    "core_endpoint.delete()\n",
    "core_endpoint_config.delete()\n",
    "\n",
    "# Clean up temporary files\n",
    "import shutil\n",
    "shutil.rmtree(model_path)\n",
    "\n",
    "print(\"All resources and temporary files successfully deleted!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Summary\n",
    "\n",
    "This notebook demonstrated:\n",
    "1. Creating a simple PyTorch model\n",
    "2. Defining a custom InferenceSpec with load() and invoke() methods\n",
    "3. Setting up schema builders for input/output validation\n",
    "4. Configuring ModelBuilder with TorchServe\n",
    "5. Building and deploying the model\n",
    "6. Testing both single and batch predictions\n",
    "7. Proper cleanup of resources\n",
    "\n",
    "Custom InferenceSpecs provide maximum flexibility for deploying any model with custom preprocessing, postprocessing, and inference logic!"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv-test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
