{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SageMaker V3 HuggingFace Model Example\n",
    "\n",
    "This notebook demonstrates how to deploy HuggingFace models using SageMaker V3 ModelBuilder for text generation tasks."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Prerequisites\n",
    "Note: Ensure you have sagemaker and ipywidgets installed in your environment. The ipywidgets package is required to monitor endpoint deployment progress in Jupyter notebooks."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Import required libraries\n",
    "import json\n",
    "import uuid\n",
    "\n",
    "from sagemaker.serve.model_builder import ModelBuilder\n",
    "from sagemaker.serve.spec.inference_spec import InferenceSpec\n",
    "from sagemaker.serve.builder.schema_builder import SchemaBuilder\n",
    "from sagemaker.serve.utils.types import ModelServer\n",
    "from sagemaker.core.resources import EndpointConfig"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 1: Define HuggingFace InferenceSpec\n",
    "\n",
    "Create a custom InferenceSpec for HuggingFace text generation models."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class HuggingFaceInferenceSpec(InferenceSpec):\n",
    "    \"\"\"Custom InferenceSpec for HuggingFace text generation models.\"\"\"\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.model_name = \"microsoft/DialoGPT-small\"\n",
    "    \n",
    "    def get_model(self):\n",
    "        \"\"\"Return the HuggingFace model ID for auto-detection.\"\"\"\n",
    "        return self.model_name\n",
    "    \n",
    "    def load(self, model_dir: str):\n",
    "        \"\"\"Load HuggingFace model and tokenizer.\"\"\"\n",
    "        try:\n",
    "            from transformers import AutoTokenizer, AutoModelForCausalLM\n",
    "            \n",
    "            tokenizer = AutoTokenizer.from_pretrained(self.model_name)\n",
    "            model = AutoModelForCausalLM.from_pretrained(self.model_name)\n",
    "            \n",
    "            if tokenizer.pad_token is None:\n",
    "                tokenizer.pad_token = tokenizer.eos_token\n",
    "            \n",
    "            return {\"model\": model, \"tokenizer\": tokenizer}\n",
    "        except ImportError:\n",
    "            return {\"model_type\": \"huggingface_mock\"}\n",
    "    \n",
    "    def invoke(self, input_object, model):\n",
    "        \"\"\"Generate text using the HuggingFace model.\"\"\"\n",
    "        if isinstance(model, dict) and \"model_type\" in model:\n",
    "            # Mock behavior for demo\n",
    "            if isinstance(input_object, dict) and \"inputs\" in input_object:\n",
    "                text = input_object[\"inputs\"]\n",
    "                return [{\"generated_text\": f\"Mock response for: {text}\"}]\n",
    "            return [{\"generated_text\": \"Mock response\"}]\n",
    "        \n",
    "        # Real HuggingFace inference\n",
    "        if isinstance(input_object, dict) and \"inputs\" in input_object:\n",
    "            text = input_object[\"inputs\"]\n",
    "        else:\n",
    "            text = str(input_object)\n",
    "        \n",
    "        tokenizer = model[\"tokenizer\"]\n",
    "        hf_model = model[\"model\"]\n",
    "        \n",
    "        inputs = tokenizer.encode(text, return_tensors=\"pt\")\n",
    "        \n",
    "        import torch\n",
    "        with torch.no_grad():\n",
    "            outputs = hf_model.generate(\n",
    "                inputs,\n",
    "                max_length=inputs.shape[1] + 20,\n",
    "                num_return_sequences=1,\n",
    "                pad_token_id=tokenizer.eos_token_id,\n",
    "                do_sample=True,\n",
    "                temperature=0.7\n",
    "            )\n",
    "        \n",
    "        response = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
    "        return [{\"generated_text\": response}]\n",
    "\n",
    "print(\"HuggingFace InferenceSpec defined successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 2: Create Schema Builder\n",
    "\n",
    "Define the input/output schema for the model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create schema builder\n",
    "sample_input = {\"inputs\": \"Hello, how are you?\"}\n",
    "sample_output = [{\"generated_text\": \"Hello, how are you? I'm doing well!\"}]\n",
    "schema_builder = SchemaBuilder(sample_input, sample_output)\n",
    "\n",
    "print(\"Schema builder created successfully!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 3: Configure ModelBuilder\n",
    "\n",
    "Set up the ModelBuilder with HuggingFace configuration."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configuration\n",
    "MODEL_NAME_PREFIX = \"hf-v3-example-model\"\n",
    "ENDPOINT_NAME_PREFIX = \"hf-v3-example-endpoint\"\n",
    "\n",
    "# Generate unique identifiers\n",
    "unique_id = str(uuid.uuid4())[:8]\n",
    "model_name = f\"{MODEL_NAME_PREFIX}-{unique_id}\"\n",
    "endpoint_name = f\"{ENDPOINT_NAME_PREFIX}-{unique_id}\"\n",
    "\n",
    "# Create ModelBuilder\n",
    "inference_spec = HuggingFaceInferenceSpec()\n",
    "model_builder = ModelBuilder(\n",
    "    inference_spec=inference_spec,\n",
    "    model_server=ModelServer.MMS,  # Multi Model Server for HuggingFace\n",
    "    schema_builder=schema_builder\n",
    ")\n",
    "\n",
    "print(f\"ModelBuilder configured for model: {model_name}\")\n",
    "print(f\"Target endpoint: {endpoint_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 4: Build the Model\n",
    "\n",
    "Build the model artifacts."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build the model\n",
    "core_model = model_builder.build(model_name=model_name)\n",
    "print(f\"Model Successfully Created: {core_model.model_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 5: Deploy the Model\n",
    "\n",
    "Deploy to a SageMaker endpoint."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Deploy the model\n",
    "core_endpoint = model_builder.deploy(endpoint_name=endpoint_name)\n",
    "print(f\"Endpoint Successfully Created: {core_endpoint.endpoint_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 6: Test Text Generation\n",
    "\n",
    "Test the deployed model with various text generation tasks."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 1: Simple conversation\n",
    "test_input_1 = {\"inputs\": \"Hello, how are you today?\"}\n",
    "\n",
    "result_1 = core_endpoint.invoke(\n",
    "    body=json.dumps(test_input_1),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "response_1 = json.loads(result_1.body.read().decode('utf-8'))\n",
    "print(f\"Conversation Test: {response_1}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test 2: Creative writing\n",
    "test_input_2 = {\"inputs\": \"Once upon a time in a magical forest\"}\n",
    "\n",
    "result_2 = core_endpoint.invoke(\n",
    "    body=json.dumps(test_input_2),\n",
    "    content_type=\"application/json\"\n",
    ")\n",
    "\n",
    "response_2 = json.loads(result_2.body.read().decode('utf-8'))\n",
    "print(f\"Creative Writing Test: {response_2}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step 7: Clean Up Resources\n",
    "\n",
    "Clean up all created resources."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Clean up resources\n",
    "core_endpoint_config = EndpointConfig.get(endpoint_config_name=core_endpoint.endpoint_name)\n",
    "\n",
    "core_model.delete()\n",
    "core_endpoint.delete()\n",
    "core_endpoint_config.delete()\n",
    "\n",
    "print(\"All resources successfully deleted!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Summary\n",
    "\n",
    "This notebook demonstrated:\n",
    "1. Creating a custom InferenceSpec for HuggingFace models\n",
    "2. Setting up schema builders for text generation\n",
    "3. Configuring ModelBuilder with Multi Model Server\n",
    "4. Deploying HuggingFace models to SageMaker endpoints\n",
    "5. Testing text generation capabilities\n",
    "6. Proper resource cleanup\n",
    "\n",
    "The V3 ModelBuilder provides a flexible way to deploy any HuggingFace model with custom inference logic!"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv-test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
