{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "1eed4a21",
   "metadata": {},
   "source": [
    "## Example: Using OnnxRuntime-GenAI model\n",
    "\n",
    "Requirements:\n",
    "```\n",
    "pip install -e .[onnxruntime_genai]\n",
    "pip install huggingface_hub\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0b7f7a7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "from huggingface_hub import snapshot_download\n",
    "from pydantic import BaseModel\n",
    "from transformers import AutoTokenizer\n",
    "\n",
    "import guidance\n",
    "from guidance.chat import Phi4MiniChatTemplate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f3f99a3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Download Phi4 model\n",
    "model_sub_dir = \"gpu/gpu-int4-rtn-block-32\"\n",
    "base_model_path = snapshot_download(\n",
    "    repo_id=\"microsoft/Phi-4-mini-instruct-onnx\",\n",
    "    allow_patterns=f\"{model_sub_dir}/*\",\n",
    ")\n",
    "model_path = os.path.join(base_model_path, model_sub_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d1870f40",
   "metadata": {},
   "outputs": [],
   "source": [
    "base_lm = guidance.models.OnnxRuntimeGenAI(\n",
    "    model=model_path,\n",
    "    # transformers_tokenizer=AutoTokenizer.from_pretrained(\"microsoft/Phi-4-mini-instruct\"), # you can specify tokenizer explicitly if needed\n",
    "    chat_template=Phi4MiniChatTemplate(),\n",
    "    # execution_provider=\"cuda\", # Uncomment to use GPU\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fac0f5dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_gen_test(lm):\n",
    "    with guidance.user():\n",
    "        lm += \"What is the capital of France? and its population?\"\n",
    "        lm += \"Format your answer as follows: Capital: <capital>, Population: <population>\"\n",
    "\n",
    "    with guidance.assistant():\n",
    "        lm += guidance.gen(max_tokens=1024, temperature=0.7, name=\"answer\")\n",
    "        print(lm[\"answer\"])\n",
    "\n",
    "run_gen_test(base_lm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7364284d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_gen_stop_test(lm):\n",
    "    with guidance.user():\n",
    "        lm += \"What is the capital of France? and its population?\"\n",
    "        lm += \"Format your answer as follows: Capital: <capital>, Population: <population>\"\n",
    "        lm += \"Say 'STOP RIGHT THERE' when you are done.\"\n",
    "\n",
    "    with guidance.assistant():\n",
    "        lm += guidance.gen(max_tokens=1024, temperature=0.7, name=\"answer\", stop=[\"STOP\"])\n",
    "        print(lm[\"answer\"])\n",
    "\n",
    "run_gen_stop_test(base_lm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "57b8a30d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_json_test(lm):\n",
    "    class CityInfo(BaseModel):\n",
    "        capital: str\n",
    "        population: int\n",
    "\n",
    "    with guidance.user():\n",
    "        lm += \"What is the capital of France? and its population? Output as JSON.\"\n",
    "\n",
    "    with guidance.assistant():\n",
    "        lm += guidance.json(schema=CityInfo, name=\"answer\")\n",
    "        print(lm[\"answer\"])\n",
    "\n",
    "run_json_test(base_lm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5fc08f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_json_object_test(lm):\n",
    "    class CityInfo(BaseModel):\n",
    "        capital: str\n",
    "        population: int\n",
    "\n",
    "    with guidance.user():\n",
    "        lm += \"What is the capital of France? and its population? output json\"\n",
    "\n",
    "    with guidance.assistant():\n",
    "        lm += guidance.json(schema=None, name=\"answer\")  # No schema, just output JSON\n",
    "        print(lm[\"answer\"])\n",
    "\n",
    "run_json_object_test(base_lm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b5c2792d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_lark_grammar(lm):\n",
    "    lark_grammar = \"\"\"\n",
    "start: \"Capital: \" CAPITAL \", Population: \" INT\n",
    "CAPITAL: /[A-Z][a-z]+/\n",
    "INT: /[0-9]+/\n",
    "\"\"\"\n",
    "\n",
    "    with guidance.user():\n",
    "        lm += \"What is the capital of France? and its population?\"\n",
    "\n",
    "    with guidance.assistant():\n",
    "        lm += guidance.lark(lark_grammar=lark_grammar, name=\"answer\")\n",
    "        print(lm[\"answer\"])\n",
    "\n",
    "run_lark_grammar(base_lm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5cc1a68",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "guidance",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
