{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "shellscript"
    }
   },
   "outputs": [],
   "source": [
    "! pip install langchain-mcp-adapters langgraph \"langchain[anthropic]\" \"langchain[openai]\" langgraph-swarm httpx markdownify"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Swarm Researcher Example "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from prompts import planner_prompt, researcher_prompt\n",
    "from utils import fetch_doc, print_stream"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Imports\n",
    "from langchain.chat_models import init_chat_model\n",
    "from langchain.agents import create_agent\n",
    "from langgraph_swarm import create_handoff_tool, create_swarm\n",
    "\n",
    "# Chat model (OpenAI)\n",
    "model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")\n",
    "\n",
    "# Reasoning model (Anthropic)\n",
    "model = init_chat_model(\n",
    "    model=\"claude-3-7-sonnet-latest\",\n",
    "    model_provider=\"anthropic\",\n",
    "    max_tokens=20_000,\n",
    "    thinking={\"type\": \"enabled\", \"budget_tokens\": 16_000},\n",
    ")\n",
    "\n",
    "# Handoff tools\n",
    "transfer_to_planner_agent = create_handoff_tool(\n",
    "    agent_name=\"planner_agent\",\n",
    "    description=\"Transfer the user to the planner_agent for clarifying questions related to the user's request.\",\n",
    ")\n",
    "transfer_to_researcher_agent = create_handoff_tool(\n",
    "    agent_name=\"researcher_agent\",\n",
    "    description=\"Transfer the user to the researcher_agent to perform research and implement the solution to the user's request.\",\n",
    ")\n",
    "\n",
    "# LLMS.txt\n",
    "llms_txt = \"LangGraph:https://langchain-ai.github.io/langgraph/llms.txt\"\n",
    "num_urls = 3\n",
    "planner_prompt_formatted = planner_prompt.format(llms_txt=llms_txt, num_urls=num_urls)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Namespace '('planner_agent:eb54a055-5558-8173-bdfe-1354f4af9d59',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: planner_agent\n",
      "\n",
      "[{'signature': 'ErUBCkYIARgCIkC6WpscEupmQ2damyQ0uywIS1Q8cT2Xz3ItLScIVCcwBDDWDn06f2B3dje0yfotuN7ASTQcVF6J73il2P7D+qsVEgyzWzL2LuUcj55xblYaDH+THpsc+4/oaq1cuCIwagfMjqcspGi8154nyX9f6suawXEePc+Rnbj5ie9IuVoMSEU+sx8kLI+l0xN9CMUXKh095N9npEQOXSPmpohSjQA+UuMY7xi1/nYdo7hiTA==', 'thinking': 'The user wants to create a LangGraph application that:\\n1. Takes a topic from a user\\n2. Generates a joke based on that topic\\n3. Checks if the joke has a punchline\\n\\nTo help implement this, I should first understand LangGraph better by fetching the documentation provided in the llms.txt file. Then I can formulate a plan for implementation.\\n\\nLet me first fetch the documentation:', 'type': 'thinking'}, {'text': \"I'll help you create a LangGraph application for generating and validating jokes. First, let me review the LangGraph documentation to give you the best guidance.\", 'type': 'text'}, {'id': 'toolu_013s8tvjmkA3H4PjHo1rw73p', 'input': {'url': 'https://langchain-ai.github.io/langgraph/llms.txt'}, 'name': 'fetch_doc', 'type': 'tool_use'}]\n",
      "Tool Calls:\n",
      "  fetch_doc (toolu_013s8tvjmkA3H4PjHo1rw73p)\n",
      " Call ID: toolu_013s8tvjmkA3H4PjHo1rw73p\n",
      "  Args:\n",
      "    url: https://langchain-ai.github.io/langgraph/llms.txt\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('planner_agent:eb54a055-5558-8173-bdfe-1354f4af9d59',)'\n",
      "Update from node 'tools'\n",
      "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
      "Name: fetch_doc\n",
      "\n",
      "# LangGraph\n",
      "## Tutorials\n",
      "[Learn the basics](https://langchain-ai.github.io/langgraph/tutorials/introduction/): LLM should read this page when needing to build a LangGraph chatbot or when learning about chat agents with memory, human-in-the-loop functionality, and state management. This page provides a comprehensive LangGraph quickstart tutorial covering building a support chatbot with web search capability, conversation memory, human review routing, custom state management, and time travel functionality to explore alternative conversation paths.\n",
      "[Local Deploy](https://langchain-ai.github.io/langgraph/tutorials/langgraph-platform/local-server/): LLM should read this page when setting up a LangGraph app locally using `langgraph dev` and troubleshooting LangGraph server deployment. This page contains a quickstart guide for launching a LangGraph server locally, including installation steps, app creation from templates, environment setup, API testing with Python/JS SDKs, and links to deployment options and further documentation.\n",
      "[Workflows and Agents](https://langchain-ai.github.io/langgraph/tutorials/workflows/): LLM should read this page when implementing agent systems, designing workflow architectures, or troubleshooting LLM orchestration strategies. The page covers patterns for LLM system design, comparing workflows (predefined paths) vs agents (dynamic control), with implementations of prompt chaining, parallelization, routing, orchestrator-worker, evaluator-optimizer, and agent patterns using both graph and functional APIs in LangGraph.\n",
      "## Concepts\n",
      "[Concepts](https://langchain-ai.github.io/langgraph/concepts/): LLM should read this page when needing to understand LangGraph's key concepts or when planning to deploy LangGraph applications. Comprehensive guide covering LangGraph fundamentals (graph primitives, agents, multi-agent systems, breakpoints, persistence), features (time travel, memory, streaming), and LangGraph Platform deployment options (self-hosted, cloud, enterprise).\n",
      "[Agent architectures](https://langchain-ai.github.io/langgraph/concepts/agentic\\_concepts/): LLM should read this page when designing agent architectures, implementing control flows for LLM applications, or customizing agent behavior patterns. This page covers different LLM agent architectures including routers, tool calling agents (ReAct), structured outputs, memory systems, planning capabilities, and advanced customization options like human-in-the-loop, parallelization, subgraphs, and reflection mechanisms.\n",
      "[Application Structure](https://langchain-ai.github.io/langgraph/concepts/application\\_structure/): LLM should read this page when needing to understand LangGraph application structure, preparing to deploy a LangGraph application, or troubleshooting configuration issues. This page details the structure of LangGraph applications, including required components (graphs, langgraph.json config file, dependency files, optional .env), file organization patterns for Python/JavaScript projects, configuration file format with all supported fields, and how to specify dependencies, graphs, and environment variables.\n",
      "[Assistants](https://langchain-ai.github.io/langgraph/concepts/assistants/): LLM should read this page when looking for information about LangGraph assistants, understanding assistant configuration in LangGraph Platform, or learning about versioning agent configurations. This page explains LangGraph assistants, which allow developers to modify agent configurations (prompts, models, etc.) without changing graph logic, supports versioning for tracking changes, and is available only in LangGraph Platform (not open source).\n",
      "[Authentication & Access Control](https://langchain-ai.github.io/langgraph/concepts/auth/): LLM should read this page when implementing authentication in LangGraph Platform, designing access control for LangGraph applications, or troubleshooting security issues in LangGraph deployments. This page explains LangGraph's authentication and authorization system, covering the difference between authentication and authorization, system architecture, implementing custom auth handlers, common access patterns, and supported resources/actions for access control.\n",
      "[Bring Your Own Cloud (BYOC)](https://langchain-ai.github.io/langgraph/concepts/bring\\_your\\_own\\_cloud/): LLM should read this page when learning about LangGraph Platform deployment options, understanding Bring Your Own Cloud architecture, or managing deployments in AWS. This page explains LangGraph's BYOC deployment model, detailing how it separates control plane (managed by LangChain) from data plane (in customer's AWS account), outlines AWS requirements, infrastructure setup via Terraform, required permissions, and explains the deployment workflow.\n",
      "[Deployment Options](https://langchain-ai.github.io/langgraph/concepts/deployment\\_options/): LLM should read this page when needing information about LangGraph deployment options, comparing different deployment methods, or understanding LangGraph Platform plans. This page outlines four deployment options for LangGraph Platform: Self-Hosted Lite (available for all plans), Self-Hosted Enterprise (Enterprise plan only), Cloud SaaS (Plus and Enterprise plans), and Bring Your Own Cloud (Enterprise plan only, AWS-only).\n",
      "[Double Texting](https://langchain-ai.github.io/langgraph/concepts/double\\_texting/): LLM should read this page when handling concurrent user interactions in LangGraph Platform, implementing double-texting safeguards, or designing stateful conversation systems. This page explains four approaches to handling \"double texting\" in LangGraph (when users send a second message before the first completes): Reject, Enqueue, Interrupt, and Rollback, noting these features are currently only available in LangGraph Platform.\n",
      "[Durable Execution](https://langchain-ai.github.io/langgraph/concepts/durable\\_execution/): LLM should read this page when needing to understand durable execution in LangGraph, implementing workflow persistence, or troubleshooting workflow resumption. This page explains durable execution in LangGraph: how workflows save progress to resume later, requirements (checkpointers and thread IDs), determinism guidelines for consistent replay, using tasks to encapsulate non-deterministic operations, and approaches for pausing/resuming workflows.\n",
      "[FAQ](https://langchain-ai.github.io/langgraph/concepts/faq/): LLM should read this page when needing to understand differences between LangGraph and LangChain, exploring deployment options for LangGraph Platform, or determining compatibility with various LLMs. FAQ covering LangGraph basics, comparisons with other frameworks, deployment options (free self-hosted, Cloud SaaS, BYOC, Enterprise), compatibility with different LLMs including OSS models, and feature differences between open-source LangGraph and proprietary LangGraph Platform.\n",
      "[Functional API](https://langchain-ai.github.io/langgraph/concepts/functional\\_api/): LLM should read this page when implementing workflows with persistent state, adding human-in-the-loop features, or converting existing code to use LangGraph. The page documents LangGraph's Functional API, which allows adding persistence, memory, and human-in-the-loop capabilities with minimal code changes using @entrypoint and @task decorators, handling serialization requirements, state management, and common patterns for parallel execution and error handling.\n",
      "[Why LangGraph?](https://langchain-ai.github.io/langgraph/concepts/high\\_level/): LLM should read this page when understanding LangGraph's core capabilities, exploring LLM application infrastructure, or evaluating agent/workflow persistence options. LangGraph provides infrastructure for LLM applications with three key benefits: persistence for memory and human-in-the-loop capabilities, streaming of workflow events and LLM outputs, and tools for debugging and deployment via LangGraph Platform.\n",
      "[Human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human\\_in\\_the\\_loop/): LLM should read this page when implementing human-in-the-loop workflows in LangGraph, designing approval systems with LLMs, or creating interactive multi-turn conversation agents. This page explains human-in-the-loop patterns in LangGraph using the interrupt function, showing how to pause graph execution for human review/input and resume with Command. Includes design patterns for approval workflows, state editing, tool call reviews, and multi-turn conversations, with code examples and warnings about execution flow and common pitfalls.\n",
      "[LangGraph CLI](https://langchain-ai.github.io/langgraph/concepts/langgraph\\_cli/): LLM should read this page when looking for information about LangGraph CLI installation or when needing to deploy a LangGraph API server locally. The page covers LangGraph CLI installation methods (Homebrew, pip), key commands (build, dev, up, dockerfile), and features like hot reloading, debugger support, and database management for running LangGraph servers.\n",
      "[Cloud SaaS](https://langchain-ai.github.io/langgraph/concepts/langgraph\\_cloud/): LLM should read this page when learning about LangGraph's Cloud SaaS offering, understanding deployment options for LangGraph Servers, or planning autoscaling infrastructure for LangGraph applications. This page describes LangGraph Cloud SaaS, a managed deployment service for LangGraph Servers with details on deployment types (Development/Production), revisions, persistence, autoscaling capabilities (up to 10 containers), LangSmith integration, IP whitelisting, and automatic deletion policies after 28 days of non-use.\n",
      "[LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/langgraph\\_platform/): LLM should read this page when seeking information about LangGraph Platform's components or evaluating production deployment options for agentic applications. The page details the LangGraph Platform, a commercial solution for deploying agentic applications, including its components (Server, Studio, CLI, SDK, Remote Graph) and key benefits like streaming support, background runs, long run handling, burstiness management, and human-in-the-loop capabilities.\n",
      "[LangGraph Server](https://langchain-ai.github.io/langgraph/concepts/langgraph\\_server/): LLM should read this page when developing applications with LangGraph Server, deploying agent-based applications, or integrating persistent state management in agent workflows. LangGraph Server provides an API for creating and managing agent applications with key features like streaming endpoints, background runs, task queues, persistence, webhooks, cron jobs, and monitoring capabilities through a structured system of assistants, threads, runs, and stores.\n",
      "[LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph\\_studio/): LLM should read this page when looking for information about LangGraph Studio features, needing to troubleshoot LangGraph Studio issues, or learning how to connect a LangGraph application to the Studio. LangGraph Studio is a specialized agent IDE for visualizing, interacting with, and debugging LLM applications, offering features such as graph visualization, state editing, assistant management, and integration with LangSmith, with instructions for connecting via deployed applications or local development servers, plus troubleshooting FAQs.\n",
      "[LangGraph Glossary](https://langchain-ai.github.io/langgraph/concepts/low\\_level/): LLM should read this page when needing to understand LangGraph terminology, implementing agent workflows as graphs, or developing modular multi-step AI systems. The page covers core LangGraph concepts including StateGraph, nodes, edges, state management, messaging, persistence, configuration, human-in-the-loop features, subgraphs, and visualization capabilities.\n",
      "[Memory](https://langchain-ai.github.io/langgraph/concepts/memory/): LLM should read this page when implementing memory systems for AI agents, managing conversation context across sessions, or designing systems that require both short-term and long-term information retention. This page explains memory systems in LangGraph, covering short-term (thread-scoped) memory for managing conversation history and long-term memory across threads, with techniques for handling long conversations, summarizing past interactions, and organizing persistent memories in namespaces.\n",
      "[Multi-agent Systems](https://langchain-ai.github.io/langgraph/concepts/multi\\_agent/): LLM should read this page when implementing multi-agent systems, troubleshooting complex agent architectures, or designing agent communication patterns. Multi-agent systems organize LLMs into modular architectures (network, supervisor, hierarchical, custom) with different communication patterns, using Command objects for handoffs between agents, and supporting various state management approaches.\n",
      "[Persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/): LLM should read this page when needing to understand LangGraph persistence mechanisms, implementing stateful workflows, or managing conversation history across interactions. This page covers LangGraph's persistence features including checkpointers, threads, state snapshots, replay functionality, forking state, cross-thread memory via InMemoryStore, and semantic search capabilities for stored memories.\n",
      "[LangGraph Platform Plans](https://langchain-ai.github.io/langgraph/concepts/plans/): LLM should read this page when determining LangGraph Platform pricing tiers, comparing deployment options, or researching features available across different plans. This page outlines LangGraph Platform plans (Developer, Plus, Enterprise), detailing deployment options, usage limitations, feature availability, and pricing structure for agentic application deployment.\n",
      "[LangGraph Platform Architecture](https://langchain-ai.github.io/langgraph/concepts/platform\\_architecture/): LLM should read this page when needing to understand LangGraph Platform's technical architecture or troubleshooting deployment issues. The page details how LangGraph Platform uses Postgres for persistent storage of user/run data and Redis for worker communication (run cancellation, output streaming) and ephemeral metadata storage (retry attempts).\n",
      "[LangGraph's Runtime (Pregel)](https://langchain-ai.github.io/langgraph/concepts/pregel/): LLM should read this page when learning about LangGraph's runtime, implementing applications with Pregel directly, or understanding how LangGraph executes graph applications. Explains LangGraph's Pregel runtime which manages graph application execution through a three-phase process (Plan, Execution, Update), describes different channel types (LastValue, Topic, Context, BinaryOperatorAggregate), provides direct implementation examples, and contrasts the StateGraph API with the Functional API.\n",
      "[LangGraph Platform: Scalability & Resilience](https://langchain-ai.github.io/langgraph/concepts/scalability\\_and\\_resilience/): LLM should read this page when needing to understand LangGraph Platform's scaling capabilities, designing high-availability LangGraph deployments, or troubleshooting resilience issues. This page details LangGraph Platform's horizontal scaling features including stateless server instances, queue worker scaling, resilience mechanisms for handling crashes, and database failover strategies in Postgres and Redis.\n",
      "[LangGraph SDK](https://langchain-ai.github.io/langgraph/concepts/sdk/): LLM should read this page when looking for installation instructions for LangGraph SDK, needing to choose between sync and async Python clients, or requiring SDK API references. The page covers LangGraph SDK installation for Python and JS, provides API reference links, explains the difference between synchronous and asynchronous Python clients, and includes code examples for both client types.\n",
      "[Self-Hosted](https://langchain-ai.github.io/langgraph/concepts/self\\_hosted/): LLM should read this page when looking for LangGraph deployment options, understanding self-hosted versions, or seeking requirements for self-hosting LangGraph. This page details two self-hosted deployment options for LangGraph Platform: Self-Hosted Lite (limited to 1M nodes/year) and Self-Hosted Enterprise (full version requiring license). Includes requirements, deployment process using Redis/Postgres, Docker, and optional Kubernetes deployment via Helm chart.\n",
      "[Streaming](https://langchain-ai.github.io/langgraph/concepts/streaming/): LLM should read this page when implementing streaming features in LangGraph applications, understanding different streaming modes, or building responsive LLM applications. This page explains streaming in LangGraph, covering the main types (workflow progress, LLM tokens, custom updates) and streaming modes (values, updates, custom, messages, debug, events), with details on how to use multiple modes simultaneously and differences between LangGraph library and Platform implementations.\n",
      "[Template Applications](https://langchain-ai.github.io/langgraph/concepts/template\\_applications/): LLM should read this page when looking for LangGraph template applications, setting up a new LangGraph project, or finding reference implementations for agentic workflows. This page presents LangGraph template applications with installation requirements, available templates (including ReAct Agent, Memory Agent, Retrieval Agent, etc.), instructions for creating new apps using the CLI, deployment options, and links to further learning resources.\n",
      "[Time Travel ⏱️](https://langchain-ai.github.io/langgraph/concepts/time-travel/): LLM should read this page when debugging LLM-based agent behavior, analyzing decision-making paths, or exploring alternative execution branches in LangGraph. This page explains LangGraph's Time Travel debugging features: Replaying (reproducing past actions up to specific checkpoints) and Forking (creating alternative execution paths from specific points), with code examples for retrieving checkpoints, configuring replay, and creating forked states.\n",
      "## How Tos\n",
      "[How-to Guides](https://langchain-ai.github.io/langgraph/how-tos/): LLM should read this page when looking for specific implementation techniques in LangGraph or when trying to deploy LangGraph applications to production environments. This page contains an extensive collection of how-to guides for LangGraph, covering graph fundamentals, persistence, memory management, human-in-the-loop features, tool calling, multi-agent systems, streaming, and deployment options through LangGraph Platform.\n",
      "[How to implement handoffs between agents](https://langchain-ai.github.io/langgraph/how-tos/agent-handoffs/): LLM should read this page when implementing multi-agent systems that require agent coordination, when building systems with specialized agents that need to work together, or when needing to implement handoffs between agents. This page explains how to implement handoffs between agents in LangGraph using Command objects, both directly from agent nodes and through specialized handoff tools, with code examples for creating multi-agent systems.\n",
      "[How to run a graph asynchronously](https://langchain-ai.github.io/langgraph/how-tos/async/): LLM should read this page when needing to implement asynchronous graph execution in LangGraph or when optimizing IO-bound LLM applications. This page explains how to convert synchronous graphs to asynchronous in LangGraph, including updating node definitions with async/await, using StateGraph with TypedDict, implementing conditional edges, and streaming results.\n",
      "[How to integrate LangGraph with AutoGen, CrewAI, and other frameworks](https://langchain-ai.github.io/langgraph/how-tos/autogen-integration/): LLM should read this page when integrating LangGraph with other agent frameworks, building multi-agent systems, or adding persistence features to agents. The page demonstrates how to combine LangGraph with AutoGen by calling AutoGen agents inside LangGraph nodes, showing code examples for setting up the integration with memory and conversation persistence.\n",
      "[How to integrate LangGraph (functional API) with AutoGen, CrewAI, and other frameworks](https://langchain-ai.github.io/langgraph/how-tos/autogen-integration-functional/): LLM should read this page when integrating LangGraph with other agent frameworks, building multi-agent systems with different frameworks, or adding LangGraph features to existing agent systems. This page demonstrates how to integrate LangGraph's functional API with AutoGen, including code examples for creating a workflow that calls AutoGen agents, leveraging LangGraph's memory and persistence features.\n",
      "[How to create branches for parallel node execution](https://langchain-ai.github.io/langgraph/how-tos/branching/): LLM should read this page when needing to implement parallel node execution in LangGraph, optimizing graph performance, or handling conditional branching in workflows. This page explains how to create branches for parallel execution in LangGraph using fan-out/fan-in mechanisms, reducer functions for state accumulation, handling exceptions during parallel execution, and implementing conditional branching logic between nodes.\n",
      "[How to combine control flow and state updates with Command](https://langchain-ai.github.io/langgraph/how-tos/command): LLM should read this page when learning how to combine control flow with state updates in LangGraph, understanding Command objects, or navigating between parent graphs and subgraphs. This page explains how to use Command objects to simultaneously update state and control flow between nodes, demonstrates using Command.PARENT to navigate from subgraphs to parent graphs, and includes examples of implementing reducers for state updates across graph hierarchies.\n",
      "[How to add runtime configuration to your graph](https://langchain-ai.github.io/langgraph/how-tos/configuration/): LLM should read this page when implementing runtime configuration for LangGraph, adding model selection options to agents, or enabling dynamic system messages. This page demonstrates how to configure LangGraph at runtime, including selecting different LLMs dynamically and adding custom configuration options like system messages through the configurable dictionary.\n",
      "[How to use the pre-built ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/): LLM should read this page when implementing a ReAct agent, needing pre-built agent solutions, or learning how to integrate tools with LLM agents. This page covers how to use the pre-built ReAct agent in LangGraph, including setup instructions, creating a weather checking tool, implementing the agent architecture, and examples of running the agent with and without tool calls.\n",
      "[How to add human-in-the-loop processes to the prebuilt ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-hitl/): LLM should read this page when implementing human-in-the-loop processes for ReAct agents, debugging tool calls, or learning about interrupts in LangGraph. This guide demonstrates how to add human-in-the-loop functionality to prebuilt ReAct agents using interrupt\\_before=[\"tools\"], working with MemorySaver checkpoints, and showing how to approve or edit tool calls before they execute.\n",
      "[How to add thread-level memory to a ReAct Agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-memory/): LLM should read this page when adding memory to ReAct agents, implementing thread-level persistence in LangGraph, or building stateful conversational agents. This guide demonstrates how to add memory to a ReAct agent using LangGraph's checkpointer interface, with code examples showing MemorySaver implementation, thread\\_id configuration, and persistent chat context across multiple interactions.\n",
      "[How to return structured output from the prebuilt ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-structured-output/): LLM should read this page when implementing structured output with ReAct agents, customizing agent response formats, or working with LangGraph agents. This page explains how to return structured output from prebuilt ReAct agents by providing a response\\_format parameter with a Pydantic schema, including examples with weather data and options for customizing the prompt.\n",
      "[How to add a custom system prompt to the prebuilt ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-system-prompt/): LLM should read this page when learning to customize ReAct agents, needing to add system prompts to agents, or working with LangGraph's prebuilt agents. This tutorial demonstrates how to add a custom system prompt to a prebuilt ReAct agent, with code examples showing model setup, tool creation, and using the prompt parameter in the create\\_react\\_agent function.\n",
      "[How to add cross-thread persistence to your graph](https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence): LLM should read this page when needing to implement persistence across multiple threads in LangGraph, when storing user data between conversations, or when implementing shared memory in graph-based LLM applications. This page demonstrates how to use LangGraph's Store API to persist data across threads, including creating an InMemoryStore with embedding search capabilities, passing stores to graph nodes, and accessing user-specific memories in different conversation threads.\n",
      "[How to add cross-thread persistence (functional API)](https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence-functional): LLM should read this page when needing to implement cross-thread persistence in LangGraph functional API, storing user data across different conversation threads, or creating shared memory between workflows. This page explains how to add cross-thread persistence to LangGraph using the Store interface, including defining a store, configuring the entrypoint decorator, and implementing a workflow that can store and retrieve user information across different conversation threads.\n",
      "[How to do a Self-hosted deployment of LangGraph](https://langchain-ai.github.io/langgraph/how-tos/deploy-self-hosted/): LLM should read this page when implementing a self-hosted deployment of LangGraph, configuring required environment variables, or building Docker images for LangGraph applications. This page explains how to deploy LangGraph applications using Docker, covering environment requirements (Redis, Postgres), how to build Docker images with the LangGraph CLI, configuration using environment variables, and deployment options using Docker or Docker Compose.\n",
      "[How to disable streaming for models that don't support it](https://langchain-ai.github.io/langgraph/how-tos/disable-streaming/): LLM should read this page when handling models that don't support streaming, implementing LangGraph with non-streaming models, or troubleshooting streaming errors with OpenAI's O1 models. This page explains how to use the disable\\_streaming=True parameter with ChatOpenAI to make non-streaming models work with LangGraph's astream\\_events API, with code examples showing the error case and proper implementation.\n",
      "[How to edit graph state](https://langchain-ai.github.io/langgraph/how-tos/human\\_in\\_the\\_loop/edit-graph-state/): LLM should read this page when needing to implement human intervention in LangGraph workflows, wanting to edit graph state during execution, or implementing breakpoints in agent systems. This page explains how to edit graph state in LangGraph using breakpoints, including implementing human-in-the-loop interactions, setting up interruptions before specific nodes, and updating state during agent execution.\n",
      "[How to Review Tool Calls](https://langchain-ai.github.io/langgraph/how-tos/human\\_in\\_the\\_loop/review-tool-calls/): LLM should read this page when implementing human review of tool calls, creating interactive agent workflows, or building approval systems for AI actions. This page explains how to implement human-in-the-loop review for tool calls in LangGraph, including approving tool calls, modifying tool calls manually, and providing natural language feedback to agents with complete code examples and explanations.\n",
      "[How to view and update past graph state](https://langchain-ai.github.io/langgraph/how-tos/human\\_in\\_the\\_loop/time-travel/): LLM should read this page when needing to access or modify past states in LangGraph, when debugging agent execution, or when implementing user interventions in agent workflows. This page demonstrates how to view and update past graph states in LangGraph using get\\_state and update\\_state methods, with examples of replaying execution from checkpoints and branching workflows.\n",
      "[How to wait for user input using interrupt](https://langchain-ai.github.io/langgraph/how-tos/human\\_in\\_the\\_loop/wait-user-input/): LLM should read this page when implementing wait-for-user functions in LangGraph, implementing human-in-the-loop interactions, or learning how to use the interrupt() function. This page explains how to pause graph execution to collect user input using LangGraph's interrupt() function, with examples of simple feedback collection and more complex agent interactions that ask clarifying questions.\n",
      "[How to define input/output schema for your graph](https://langchain-ai.github.io/langgraph/how-tos/input\\_output\\_schema/): LLM should read this page when needing to define separate input/output schemas for LangGraph, implementing schema-based data filtering, or understanding schema definitions in StateGraph. This page explains how to define distinct input and output schemas for a StateGraph, showing how input schema validates the provided data structure while output schema filters internal data to return only relevant information, with code examples demonstrating implementation.\n",
      "[How to handle large numbers of tools](https://langchain-ai.github.io/langgraph/how-tos/many-tools/): LLM should read this page when handling large tool collections, implementing dynamic tool selection, or creating retrieval-based tool management in LangGraph. This page demonstrates how to manage large numbers of tools by using vector search to dynamically select relevant tools based on user queries, implementing tool selection nodes in LangGraph, and handling tool selection errors with retry mechanisms.\n",
      "[How to create map-reduce branches for parallel execution](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/): LLM should read this page when learning to implement parallel execution in LangGraph, creating map-reduce operations, or handling dynamic task decomposition. This guide explains how to use LangGraph's Send API to create map-reduce workflows, breaking tasks into parallel sub-tasks and recombining results, with examples showing joke generation across multiple subjects.\n",
      "[How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/): LLM should read this page when implementing conversation summarization, managing context windows, or building chatbots with memory management. This page demonstrates how to add summary functionality to conversation history using LangGraph, including checking conversation length, creating summaries, and removing old messages while maintaining context.\n",
      "[How to delete messages](https://langchain-ai.github.io/langgraph/how-tos/memory/delete-messages): LLM should read this page when attempting to manage message history in LangGraph, needing to delete specific messages from conversational state, or implementing memory management in LLM applications. This page explains how to delete messages from a LangGraph application using RemoveMessage modifiers, covering both manual deletion with message IDs and programmatic deletion within graph logic to maintain conversation history limits.\n",
      "[How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/): LLM should read this page when managing conversation history in LangGraph, preventing context window issues, or implementing custom message filtering. This page explains how to manage conversation history in LangGraph to prevent context window overflow by implementing message filtering functions that control which messages are sent to the LLM.\n",
      "[How to add semantic search to your agent's memory](https://langchain-ai.github.io/langgraph/how-tos/memory/semantic-search/): LLM should read this page when implementing semantic search in agent memory, enabling memory-aware AI assistants, or configuring advanced memory retrieval systems. This page demonstrates how to add semantic search to LangGraph agent memory stores, covering basic setup with embeddings, storing memories, searching by semantic similarity, integrating memory in agents and ReAct agents, and advanced usage like multi-vector indexing and selective memory indexing.\n",
      "[How to add multi-turn conversation in a multi-agent application](https://langchain-ai.github.io/langgraph/how-tos/multi-agent-multi-turn-convo/): LLM should read this page when implementing multi-turn conversations between agents, creating interactive agent systems with human input, or learning about langgraph interrupts and agent handoffs. This page demonstrates how to build a multi-agent system with multi-turn conversations, including human-in-the-loop interactions, agent handoffs, and state management using LangGraph, Command objects, and interrupts.\n",
      "[How to add multi-turn conversation in a multi-agent application (functional API)](https://langchain-ai.github.io/langgraph/how-tos/multi-agent-multi-turn-convo-functional/): LLM should read this page when building multi-turn conversational agents, implementing agent-to-agent handoffs, or using interrupts to collect user input in LangGraph. This guide demonstrates how to create a multi-agent system with multi-turn conversations using LangGraph's functional API, featuring agent handoffs, interrupt mechanics for user input, and a complete example of travel and hotel advisor agents that can transfer control between each other.\n",
      "[How to build a multi-agent network](https://langchain-ai.github.io/langgraph/how-tos/multi-agent-network/): LLM should read this page when implementing multi-agent networks, setting up agent communication via handoffs, or building travel assistance agents. This page explains how to create a fully-connected multi-agent network with LangGraph where agents can communicate with each other via handoffs, including custom agent implementation and using prebuilt ReAct agents with tools.\n",
      "[How to build a multi-agent network (functional API)](https://langchain-ai.github.io/langgraph/how-tos/multi-agent-network-functional/): LLM should read this page when building multi-agent systems, implementing agent handoffs between specialists, or creating fully-connected agent networks. This guide demonstrates how to create a multi-agent network using LangGraph's functional API, with tasks for individual agents and entrypoint functions to manage agent handoffs based on tool calls.\n",
      "[How to add node retry policies](https://langchain-ai.github.io/langgraph/how-tos/node-retries/): LLM should read this page when implementing error handling in LangGraph nodes, configuring API retry mechanisms, or troubleshooting node failures in graph workflows. Shows how to add custom retry policies to LangGraph nodes, including specifying which exceptions to retry on, setting max attempts, intervals, backoff factors, and implementing different retry behaviors for different node types.\n",
      "[How to pass config to tools](https://langchain-ai.github.io/langgraph/how-tos/pass-config-to-tools/): LLM should read this page when implementing secure tool configuration in LangChain, passing user-specific parameters to tools, or configuring tools with runtime values. This page explains how to pass configuration to LangChain tools using RunnableConfig, allowing application-controlled values (like user IDs) to be securely passed to tools without LLM control, with examples of implementing tools that access user-specific data.\n",
      "[How to pass private state between nodes](https://langchain-ai.github.io/langgraph/how-tos/pass\\_private\\_state/): LLM should read this page when implementing data sharing between specific nodes in LangGraph, handling private state in graph workflows, or designing multi-node sequential processes with selective data visibility. This page demonstrates how to pass private data between specific nodes in a LangGraph without making it part of the main schema, using typed dictionaries to define both public and private states, and showing a three-node example where private data flows only between the first two nodes.\n",
      "[How to add thread-level persistence to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/): LLM should read this page when implementing persistence in LangGraph, needing to preserve context across user interactions, or learning about thread-level state management. This page explains how to add thread-level persistence to LangGraph applications using MemorySaver, including code examples for creating stateful conversations where context is maintained across multiple interactions.\n",
      "[How to add thread-level persistence (functional API)](https://langchain-ai.github.io/langgraph/how-tos/persistence-functional/): LLM should read this page when implementing thread-level persistence in LangGraph, creating conversational agents with memory, or using functional API with state management. This page explains how to add thread-level persistence to LangGraph functional API workflows using checkpointers, including code examples for creating a simple chatbot with memory across conversation turns.\n",
      "[How to use MongoDB checkpointer for persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence\\_mongodb/): LLM should read this page when implementing persistence in LangGraph agents, setting up MongoDB for state checkpointing, or working with MongoDB connections in LangGraph applications. This page explains how to use the MongoDB checkpointer for LangGraph persistence, covering connection methods (direct, client-based, async), basic setup requirements, and practical examples of saving and retrieving agent state between interactions.\n",
      "[How to use Postgres checkpointer for persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence\\_postgres/): LLM should read this page when setting up persistence for LangGraph agents, implementing PostgreSQL as a checkpoint storage backend, or working with either synchronous or asynchronous database connections. This page details how to use PostgreSQL for persisting LangGraph agent state, covering setup and configuration of PostgresSaver and AsyncPostgresSaver with different connection methods (pool, direct connection, connection string).\n",
      "[How to create a custom checkpointer using Redis](https://langchain-ai.github.io/langgraph/how-tos/persistence\\_redis/): LLM should read this page when implementing persistence in LangGraph applications, creating custom checkpoint mechanisms for agents, or working with Redis as a storage backend. This page demonstrates how to create custom checkpointers for LangGraph agents using Redis, including implementations for both synchronous and asynchronous interfaces that save and retrieve agent state.\n",
      "[How to create a ReAct agent from scratch](https://langchain-ai.github.io/langgraph/how-tos/react-agent-from-scratch/): LLM should read this page when needing to create a custom ReAct agent, wanting more control than prebuilt agents, or implementing ReAct from scratch with LangGraph. This guide shows how to build a custom ReAct agent using LangGraph, covering state definition, model/tool setup, node/edge configuration, graph creation, and testing the implementation with a weather query example.\n",
      "[How to create a ReAct agent from scratch (Functional API)](https://langchain-ai.github.io/langgraph/how-tos/react-agent-from-scratch-functional): LLM should read this page when creating a ReAct agent using LangGraph's Functional API, implementing tool-calling workflows, or building conversational agents with thread persistence. This page explains how to build a ReAct agent from scratch using LangGraph's Functional API, including model and tool setup, defining tasks for model/tool calling, creating an entrypoint for orchestration, and adding thread-level persistence for conversational experiences.\n",
      "[How to force tool-calling agent to structure output](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output): LLM should read this page when needing to force tool-calling agents to produce structured output, implementing consistent output formats for downstream software, or choosing between single-LLM vs two-LLM structured output approaches. The page explains two methods for implementing structured output with tool-calling agents: binding output as a tool (single LLM approach) and using two LLMs with structured output conversion, with code examples for both approaches using LangGraph.\n",
      "[How to create and control loops](https://langchain-ai.github.io/langgraph/how-tos/recursion-limit/): LLM should read this page when building loops in computational graphs, needing to implement termination conditions, or handling recursion limits in LangGraph. The page explains how to create graphs with loops using conditional edges for termination, set recursion limits, handle GraphRecursionError, and implement complex loops with branches.\n",
      "[How to review tool calls (Functional API)](https://langchain-ai.github.io/langgraph/how-tos/review-tool-calls-functional/): LLM should read this page when implementing human review of tool calls, creating ReAct agents with Functional API, or adding human-in-the-loop workflows. This page demonstrates how to review tool calls before execution in a ReAct agent using LangGraph's Functional API, including accepting, revising, or generating custom tool messages with the interrupt function.\n",
      "[How to pass custom run ID or set tags and metadata for graph runs in LangSmith](https://langchain-ai.github.io/langgraph/how-tos/run-id-langsmith/): LLM should read this page when needing to customize trace information in LangSmith for LangGraph runs or when debugging graph runs with custom identifiers. The page explains how to pass custom run\\_id, set tags, add metadata, and customize run names for LangGraph traces in LangSmith using RunnableConfig, with examples showing implementation with a ReAct agent.\n",
      "[How to create a sequence of steps](https://langchain-ai.github.io/langgraph/how-tos/sequence/): LLM should read this page when implementing sequential workflows in LangGraph, creating multi-step processes in applications, or learning about state management in graph-based systems. This page explains how to create sequences in LangGraph, covering methods for building sequential graphs using .add\\_node/.add\\_edge or the shorthand .add\\_sequence, defining state with TypedDict, creating nodes as functions that update state, and compiling/invoking graphs with examples.\n",
      "[How to use Pydantic model as graph state](https://langchain-ai.github.io/langgraph/how-tos/state-model): LLM should read this page when implementing Pydantic models for state validation in LangGraph, handling complex state schema definitions, or troubleshooting validation errors in graph nodes. This guide explains how to use Pydantic BaseModel as a state schema in LangGraph for runtime validation, covering basic implementation, limitations, validation behavior across multiple nodes, serialization patterns, type coercion, and working with message models.\n",
      "[How to update graph state from nodes](https://langchain-ai.github.io/langgraph/how-tos/state-reducers/): LLM should read this page when needing to update state in LangGraph, designing graphs with nodes that modify state, or implementing reducers for state management. This page explains how to define state schemas in LangGraph using TypedDict, how nodes can update state, and how to use reducers to control state updates, with specific examples using message handling.\n",
      "[How to stream](https://langchain-ai.github.io/langgraph/how-tos/streaming/): LLM should read this page when needing to implement streaming in LangGraph applications, understanding different streaming modes, or troubleshooting LLM response delivery. This page explains how to stream LLM outputs using LangGraph, covering different streaming modes (values, updates, custom, messages, debug), with code examples for each mode and how to combine multiple streaming modes.\n",
      "[How to stream data from within a tool](https://langchain-ai.github.io/langgraph/how-tos/streaming-events-from-within-tools/): LLM should read this page when implementing streaming functionality in tools, integrating LLM outputs with custom data streams, or developing LangGraph applications with real-time feedback. This page explains how to stream data from within tools using LangGraph, covering custom data streaming with stream\\_mode=\"custom\", LLM token streaming with stream\\_mode=\"messages\", and implementation approaches both with and without LangChain.\n",
      "[How to stream LLM tokens from specific nodes](https://langchain-ai.github.io/langgraph/how-tos/streaming-specific-nodes/): LLM should read this page when needing to filter token streaming from specific nodes in LangGraph, implementing selective streaming in multi-node workflows, or controlling which node outputs are displayed. Guide explains how to stream LLM tokens from specific nodes using stream\\_mode=\"messages\" and filtering by the langgraph\\_node metadata field, with complete code examples for implementing this in StateGraph applications.\n",
      "[How to stream from subgraphs](https://langchain-ai.github.io/langgraph/how-tos/streaming-subgraphs/): LLM should read this page when needing to stream outputs from subgraphs in LangGraph, implementing nested graph streaming, or debugging hierarchical graph execution. This page explains how to stream outputs from subgraphs in LangGraph by using the subgraphs=True parameter in the parent graph's stream() method, with a complete code example showing the difference between regular streaming and subgraph streaming.\n",
      "[How to stream LLM tokens from your graph](https://langchain-ai.github.io/langgraph/how-tos/streaming-tokens): LLM should read this page when needing to stream LLM tokens from a LangGraph application, implementing custom token streaming, or filtering streamed outputs. This page explains how to stream individual LLM tokens from LangGraph nodes using graph.stream() with different stream\\_mode options, including examples with and without LangChain, async implementations, and how to filter streamed tokens using metadata.\n",
      "[How to use subgraphs](https://langchain-ai.github.io/langgraph/how-tos/subgraph/): LLM should read this page when building complex systems with subgraphs, implementing multi-agent systems, or needing to share state between parent graphs and subgraphs. The page explains two methods for using subgraphs: adding compiled subgraphs when schemas share keys, and invoking subgraphs via node functions when schemas differ, with code examples for both approaches.\n",
      "[How to add thread-level persistence to a subgraph](https://langchain-ai.github.io/langgraph/how-tos/subgraph-persistence/): LLM should read this page when implementing persistence in nested LangGraph architectures, adding thread-level storage to subgraphs, or debugging state propagation in LangGraph applications. This guide demonstrates how to add thread-level persistence to subgraphs by passing a checkpointer only to the parent graph during compilation, accessing persisted states from both parent and child graphs, and retrieving subgraph state using the proper configuration parameters.\n",
      "[How to transform inputs and outputs of a subgraph](https://langchain-ai.github.io/langgraph/how-tos/subgraph-transform-state/): LLM should read this page when needing to work with nested subgraphs, transforming state between parent and child graphs, or integrating independent state components in LangGraph. This page demonstrates how to transform inputs and outputs between parent graphs and subgraphs with different state structures, showing implementation of three nested graphs (parent, child, grandchild) with separate state dictionaries and transformation functions.\n",
      "[How to view and update state in subgraphs](https://langchain-ai.github.io/langgraph/how-tos/subgraphs-manage-state/): LLM should read this page when working with state management in nested subgraphs, implementing human-in-the-loop patterns, or debugging complex graph flows. This guide covers viewing and updating state in LangGraph subgraphs, including how to resume execution from breakpoints, modify subgraph state, act as specific nodes, and work with multi-level nested subgraphs.\n",
      "[How to call tools using ToolNode](https://langchain-ai.github.io/langgraph/how-tos/tool-calling/): LLM should read this page when learning how to implement tool calling with LangGraph, when working with the ToolNode component, or when building ReAct agents. This page covers using LangGraph's ToolNode for tool calling, including setup, manual invocation, working with chat models, building a ReAct agent, handling single and parallel tool calls, and error handling.\n",
      "[How to handle tool calling errors](https://langchain-ai.github.io/langgraph/how-tos/tool-calling-errors/): LLM should read this page when handling tool call errors, implementing error handling for LLM-tool interactions, or creating fallback strategies for failed tool calls. This page covers strategies for handling tool calling errors in LangGraph, including using the prebuilt ToolNode with built-in error handling, implementing custom error handling patterns, and fallback mechanisms with model upgrades when tools fail.\n",
      "[How to update graph state from tools](https://langchain-ai.github.io/langgraph/how-tos/update-state-from-tools/): LLM should read this page when needing to update graph state from tools in LangGraph, implementing personalized responses based on tool updates, or using Command objects to modify state. This page details how to update graph state from tools using Command objects, creating personalized agents with state tracking, and implementing dynamic prompt construction based on updated state values.\n",
      "[How to interact with the deployment using RemoteGraph](https://langchain-ai.github.io/langgraph/how-tos/use-remote-graph/): LLM should read this page when needing to interact with LangGraph Platform deployments remotely, when implementing RemoteGraph interfaces, or when using deployed graphs as subgraphs. This page explains how to use RemoteGraph to interact with LangGraph Platform deployments, covering initialization methods (URL-based or client-based), synchronous/asynchronous invocation, thread-level persistence, and using RemoteGraph as a subgraph in larger applications.\n",
      "[How to visualize your graph](https://langchain-ai.github.io/langgraph/how-tos/visualization): LLM should read this page when needing to visualize LangGraph graphs, looking for graph visualization methods, or working with graph visualization in Python. Comprehensive guide for visualizing graphs in LangGraph with multiple methods: Mermaid syntax, Mermaid.ink API for PNG rendering, Pyppeteer-based visualization, and Graphviz, with customization options for colors, styles, and layout.\n",
      "[How to wait for user input (Functional API)](https://langchain-ai.github.io/langgraph/how-tos/wait-user-input-functional/): LLM should read this page when implementing human-in-the-loop workflows, integrating user input into agent systems, or adding interruption capabilities to LangGraph applications. The page explains how to use the `interrupt()` function in LangGraph's Functional API to pause execution for human input, with examples for both simple workflows and ReAct agents, including code implementations with checkpointing.\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('planner_agent:eb54a055-5558-8173-bdfe-1354f4af9d59',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: planner_agent\n",
      "\n",
      "Thank you for providing details about your request. Let me ask a few clarifying questions to better understand the scope of your LangGraph application:\n",
      "\n",
      "1. Do you want the joke generator to be able to handle any topic, or are there specific topics you'd like to focus on?\n",
      "2. How do you want the system to respond if a joke doesn't have a punchline?\n",
      "3. Would you like any additional features beyond the basic prompt chain (topic → joke → punchline check)?\n",
      "4. Do you have a specific LLM provider in mind (OpenAI, Anthropic, etc.) or should the solution be model-agnostic?\n",
      "5. Are there any specific requirements for the output format of the jokes?\n",
      "\n",
      "This information will help me create a more tailored plan for implementing your LangGraph application.\n",
      "\n",
      "\n",
      "\n",
      "Namespace '()'\n",
      "Update from node 'planner_agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: planner_agent\n",
      "\n",
      "Thank you for providing details about your request. Let me ask a few clarifying questions to better understand the scope of your LangGraph application:\n",
      "\n",
      "1. Do you want the joke generator to be able to handle any topic, or are there specific topics you'd like to focus on?\n",
      "2. How do you want the system to respond if a joke doesn't have a punchline?\n",
      "3. Would you like any additional features beyond the basic prompt chain (topic → joke → punchline check)?\n",
      "4. Do you have a specific LLM provider in mind (OpenAI, Anthropic, etc.) or should the solution be model-agnostic?\n",
      "5. Are there any specific requirements for the output format of the jokes?\n",
      "\n",
      "This information will help me create a more tailored plan for implementing your LangGraph application.\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "===\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from langgraph.checkpoint.memory import InMemorySaver\n",
    "\n",
    "# Planner agent\n",
    "planner_agent = create_agent(\n",
    "    model,\n",
    "    system_prompt=planner_prompt_formatted,\n",
    "    tools=[fetch_doc, transfer_to_researcher_agent],\n",
    "    name=\"planner_agent\",\n",
    ")\n",
    "\n",
    "# Researcher agent\n",
    "researcher_agent = create_agent(\n",
    "    model,\n",
    "    system_prompt=researcher_prompt,\n",
    "    tools=[fetch_doc, transfer_to_planner_agent],\n",
    "    name=\"researcher_agent\",\n",
    ")\n",
    "\n",
    "# Swarm\n",
    "checkpointer = InMemorySaver()\n",
    "agent_swarm = create_swarm(\n",
    "    [planner_agent, researcher_agent], default_active_agent=\"planner_agent\"\n",
    ")\n",
    "app = agent_swarm.compile(checkpointer=checkpointer)\n",
    "\n",
    "# Input\n",
    "request = \"Create a LangGraph application that is a prompt chain: it takes a topic from a user, generates a joke, and checks if the joke has a punchline.\"\n",
    "config = {\"configurable\": {\"thread_id\": \"1\"}}\n",
    "print_stream(\n",
    "    app.stream(\n",
    "        {\"messages\": [{\"role\": \"user\", \"content\": request}]}, config, subgraphs=True\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Namespace '('planner_agent:07c7e919-0b27-f012-f663-adbf90464fae',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: planner_agent\n",
      "\n",
      "[{'signature': 'ErUBCkYIARgCIkCzcDG8omno4rS3DhnIJLYUf/AUqY3IBoYv+oeAsNMXH+d2GuTHDqYSQJH6AahN5cxPENVzkbx4Ge7PmOeTAFFXEgxAkOH3vUQTThx9a7AaDFXn+zw8r4Ex3ChCgSIwkyZzH+IlvmIgWUUUmE0n2zDmNuWmFLr9XOB1XM9Q79FpHrA+04q6qGnURqMbvItLKh24kfCLQpu68yWeL1SCqiRhE4+G8UZ3YGWKXQnlBg==', 'thinking': 'Thanks for the clarifications. Here\\'s my understanding of the project scope:\\n\\n1. User will provide a topic as input\\n2. The application will generate a joke with a clear \"setup:\" and \"punchline:\" format\\n3. This will be a simple Python script without UI or deployment\\n4. The application will use OpenAI\\'s models\\n5. The final output can be a formatted string\\n\\nNow, I\\'ll identify the most relevant URLs from the LangGraph documentation that will help with implementing this project:\\n\\n1. **[How to create a sequence of steps](https://langchain-ai.github.io/langgraph/how-tos/sequence/)** - This is fundamental since we\\'re building a simple sequence: topic -> joke generation -> punchline check.\\n\\n2. **[How to update graph state from nodes](https://langchain-ai.github.io/langgraph/how-tos/state-reducers/)** - This will help us understand how to pass information between the nodes in our graph (topic -> joke -> validation).\\n\\n3. **[How to stream LLM tokens from your graph](https://langchain-ai.github.io/langgraph/how-tos/streaming-tokens)** - This could be useful if we want to see the joke being generated in real-time, though it\\'s not a strict requirement.\\n\\nHowever, after reviewing the docs more carefully, I think these URLs would be more relevant:\\n\\n1. **[How to create a sequence of steps](https://langchain-ai.github.io/langgraph/how-tos/sequence/)** - This is still fundamental for our basic chain.\\n\\n2. **[How to update graph state from nodes](https://langchain-ai.github.io/langgraph/how-tos/state-reducers/)** - Still important for state management.\\n\\n3. **[How to define input/output schema for your graph](https://langchain-ai.github.io/langgraph/how-tos/input_output_schema/)** - This will help us properly define our input (topic) and output (joke with punchline validation) schemas.\\n\\nLet me prepare a concise scope and links to share with the user.', 'type': 'thinking'}, {'text': 'Based on your requirements, I\\'ve defined the scope and identified the relevant documentation for implementing your LangGraph joke generator application.\\n\\n## Scope:\\n- Create a Python script that takes a topic as input from the user\\n- Implement a three-node LangGraph application (topic input → joke generation → punchline validation)\\n- Use OpenAI\\'s models to generate jokes with a clear \"setup:\" and \"punchline:\" structure\\n- Validate that generated jokes include both parts (setup and punchline)\\n- Return the joke as a formatted string\\n\\n## URLs:\\n1. https://langchain-ai.github.io/langgraph/how-tos/sequence/ - For creating the basic sequence flow\\n2. https://langchain-ai.github.io/langgraph/how-tos/state-reducers/ - For implementing state management between nodes\\n3. https://langchain-ai.github.io/langgraph/how-tos/input_output_schema/ - For defining proper input/output schemas for the graph\\n\\nNow I\\'ll transfer you to the researcher agent who will implement this solution for you.', 'type': 'text'}, {'id': 'toolu_01JdLYx7YGK47wMtQdYzZQ9h', 'input': {}, 'name': 'transfer_to_researcher_agent', 'type': 'tool_use'}]\n",
      "Tool Calls:\n",
      "  transfer_to_researcher_agent (toolu_01JdLYx7YGK47wMtQdYzZQ9h)\n",
      " Call ID: toolu_01JdLYx7YGK47wMtQdYzZQ9h\n",
      "  Args:\n",
      "\n",
      "\n",
      "\n",
      "Namespace '()'\n",
      "Update from node 'planner_agent'\n",
      "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
      "Name: transfer_to_researcher_agent\n",
      "\n",
      "Successfully transferred to researcher_agent\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: researcher_agent\n",
      "\n",
      "[{'text': \"Let me implement your LangGraph application for generating jokes with punchline validation. I'll start by gathering the necessary information from the provided documentation links.\", 'type': 'text'}, {'id': 'toolu_011nxKksF1wzjay1wQW9UTde', 'input': {'url': 'https://langchain-ai.github.io/langgraph/how-tos/sequence/'}, 'name': 'fetch_doc', 'type': 'tool_use'}]\n",
      "Tool Calls:\n",
      "  fetch_doc (toolu_011nxKksF1wzjay1wQW9UTde)\n",
      " Call ID: toolu_011nxKksF1wzjay1wQW9UTde\n",
      "  Args:\n",
      "    url: https://langchain-ai.github.io/langgraph/how-tos/sequence/\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'tools'\n",
      "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
      "Name: fetch_doc\n",
      "\n",
      "How to create a sequence of steps\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "[Skip to content](#how-to-create-a-sequence-of-steps)\n",
      "\n",
      "**Join us at  [Interrupt: The Agent AI Conference by LangChain](https://interrupt.langchain.com/) on May 13 & 14 in San Francisco!**\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "How to create a sequence of steps\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "Initializing search\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "\n",
      "  Home\n",
      "  + Get started\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Get started\n",
      "    - [Learn the basics](../../tutorials/introduction/)\n",
      "    - [Deployment](../../tutorials/deployment/)\n",
      "  + Guides\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Guides\n",
      "    - [How-to Guides](../)\n",
      "\n",
      "      How-to Guides\n",
      "      * LangGraph\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "        LangGraph\n",
      "        + [LangGraph](../../how-tos#langgraph)\n",
      "        + Graph API Basics\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "          Graph API Basics\n",
      "          - [Graph API Basics](../../how-tos#graph-api-basics)\n",
      "          - [How to update graph state from nodes](../state-reducers/)\n",
      "          - [How to create a sequence of steps](./)\n",
      "          - [How to create branches for parallel node execution](../branching/)\n",
      "          - [How to create and control loops](../recursion-limit/)\n",
      "          - [How to visualize your graph](../visualization/)\n",
      "        + [Controllability](../../how-tos#controllability)\n",
      "        + [Persistence](../../how-tos#persistence)\n",
      "        + [Memory](../../how-tos#memory)\n",
      "        + [Human-in-the-loop](../../how-tos#human-in-the-loop)\n",
      "        + [Streaming](../../how-tos#streaming)\n",
      "        + [Tool calling](../../how-tos#tool-calling)\n",
      "        + [Subgraphs](../../how-tos#subgraphs)\n",
      "        + [Multi-agent](../../how-tos#multi-agent)\n",
      "        + [State Management](../../how-tos#state-management)\n",
      "        + [Other](../../how-tos#other)\n",
      "        + [Prebuilt ReAct Agent](../../how-tos#prebuilt-react-agent)\n",
      "      * [LangGraph Platform](../../how-tos#langgraph-platform)\n",
      "    - [Concepts](../../concepts/)\n",
      "    - [Tutorials](../../tutorials/)\n",
      "  + Resources\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Resources\n",
      "    - [Prebuilt Agents](../../prebuilt/)\n",
      "    - [Companies using LangGraph](../../adopters/)\n",
      "    - [LLMS-txt](../../llms-txt-overview/)\n",
      "    - [FAQ](../../concepts/faq/)\n",
      "    - [Troubleshooting](../../troubleshooting/errors/)\n",
      "    - [LangGraph Academy Course](https://academy.langchain.com/courses/intro-to-langgraph)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "1. [Home](../..)\n",
      "2. [Guides](../)\n",
      "3. [How-to Guides](../)\n",
      "4. [LangGraph](../../how-tos#langgraph)\n",
      "5. [Graph API Basics](../../how-tos#graph-api-basics)\n",
      "\n",
      "How to create a sequence of steps[¶](#how-to-create-a-sequence-of-steps \"Permanent link\")\n",
      "=========================================================================================\n",
      "\n",
      "Prerequisites\n",
      "\n",
      "This guide assumes familiarity with the following:\n",
      "\n",
      "* [How to define and update graph state](../../how-tos/state-reducers)\n",
      "\n",
      "This guide demonstrates how to construct a simple sequence of steps. We will demonstrate:\n",
      "\n",
      "1. How to build a sequential graph\n",
      "2. Built-in short-hand for constructing similar graphs.\n",
      "\n",
      "Summary[¶](#summary \"Permanent link\")\n",
      "=====================================\n",
      "\n",
      "To add a sequence of nodes, we use the `.add_node` and `.add_edge` methods of our [graph](../../concepts/low_level/#stategraph):\n",
      "\n",
      "```\n",
      "from langgraph.graph import START, StateGraph\n",
      "\n",
      "graph_builder = StateGraph(State)\n",
      "\n",
      "# Add nodes\n",
      "graph_builder.add_node(step_1)\n",
      "graph_builder.add_node(step_2)\n",
      "graph_builder.add_node(step_3)\n",
      "\n",
      "# Add edges\n",
      "graph_builder.add_edge(START, \"step_1\")\n",
      "graph_builder.add_edge(\"step_1\", \"step_2\")\n",
      "graph_builder.add_edge(\"step_2\", \"step_3\")\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [START](https://langchain-ai.github.io/langgraph/reference/constants/#langgraph.constants.START) | [StateGraph](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.state.StateGraph)\n",
      "\n",
      "We can also use the built-in shorthand `.add_sequence`:\n",
      "\n",
      "```\n",
      "graph_builder = StateGraph(State).add_sequence([step_1, step_2, step_3])\n",
      "graph_builder.add_edge(START, \"step_1\")\n",
      "\n",
      "```\n",
      "\n",
      "Why split application steps into a sequence with LangGraph?\n",
      "LangGraph makes it easy to add an underlying persistence layer to your application.\n",
      "This allows state to be checkpointed in between the execution of nodes, so your LangGraph nodes govern:\n",
      "\n",
      "* How state updates are [checkpointed](../../concepts/persistence/)\n",
      "* How interruptions are resumed in [human-in-the-loop](../../concepts/human_in_the_loop/) workflows\n",
      "* How we can \"rewind\" and branch-off executions using LangGraph's [time travel](../../concepts/time-travel/) features\n",
      "\n",
      "They also determine how execution steps are [streamed](../../concepts/streaming/), and how your application is visualized\n",
      "and debugged using [LangGraph Studio](../../concepts/langgraph_studio/).\n",
      "\n",
      "Setup[¶](#setup \"Permanent link\")\n",
      "---------------------------------\n",
      "\n",
      "First, let's install langgraph:\n",
      "\n",
      "```\n",
      "%%capture --no-stderr\n",
      "%pip install -U langgraph\n",
      "\n",
      "```\n",
      "\n",
      "Set up [LangSmith](https://smith.langchain.com) for better debugging\n",
      "\n",
      "Sign up for LangSmith to quickly spot issues and improve the performance of your LangGraph projects. LangSmith lets you use trace data to debug, test, and monitor your LLM aps built with LangGraph — read more about how to get started in the [docs](https://docs.smith.langchain.com).\n",
      "\n",
      "Build the graph[¶](#build-the-graph \"Permanent link\")\n",
      "-----------------------------------------------------\n",
      "\n",
      "Let's demonstrate a simple usage example. We will create a sequence of three steps:\n",
      "\n",
      "1. Populate a value in a key of the state\n",
      "2. Update the same value\n",
      "3. Populate a different value\n",
      "\n",
      "### Define state[¶](#define-state \"Permanent link\")\n",
      "\n",
      "Let's first define our [state](../../concepts/low_level/#state). This governs the [schema of the graph](../../concepts/low_level/#schema), and can also specify how to apply updates. See [this guide](../../how-tos/state-reducers) for more detail.\n",
      "\n",
      "In our case, we will just keep track of two values:\n",
      "\n",
      "```\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "\n",
      "class State(TypedDict):\n",
      "    value_1: str\n",
      "    value_2: int\n",
      "\n",
      "```\n",
      "\n",
      "### Define nodes[¶](#define-nodes \"Permanent link\")\n",
      "\n",
      "Our [nodes](../../concepts/low_level/#nodes) are just Python functions that read our graph's state and make updates to it. The first argument to this function will always be the state:\n",
      "\n",
      "```\n",
      "def step_1(state: State):\n",
      "    return {\"value_1\": \"a\"}\n",
      "\n",
      "\n",
      "def step_2(state: State):\n",
      "    current_value_1 = state[\"value_1\"]\n",
      "    return {\"value_1\": f\"{current_value_1} b\"}\n",
      "\n",
      "\n",
      "def step_3(state: State):\n",
      "    return {\"value_2\": 10}\n",
      "\n",
      "```\n",
      "\n",
      "Note\n",
      "\n",
      "Note that when issuing updates to the state, each node can just specify the value of the key it wishes to update.\n",
      "\n",
      "By default, this will **overwrite** the value of the corresponding key. You can also use [reducers](../../concepts/low_level/#reducers) to control how updates are processed— for example, you can append successive updates to a key instead. See [this guide](../../how-tos/state-reducers) for more detail.\n",
      "\n",
      "### Define graph[¶](#define-graph \"Permanent link\")\n",
      "\n",
      "We use [StateGraph](../../concepts/low_level/#stategraph) to define a graph that operates on this state.\n",
      "\n",
      "We will then use [add\\_node](../../concepts/low_level/#messagesstate) and [add\\_edge](../../concepts/low_level/#edges) to populate our graph and define its control flow.\n",
      "\n",
      "```\n",
      "from langgraph.graph import START, StateGraph\n",
      "\n",
      "graph_builder = StateGraph(State)\n",
      "\n",
      "# Add nodes\n",
      "graph_builder.add_node(step_1)\n",
      "graph_builder.add_node(step_2)\n",
      "graph_builder.add_node(step_3)\n",
      "\n",
      "# Add edges\n",
      "graph_builder.add_edge(START, \"step_1\")\n",
      "graph_builder.add_edge(\"step_1\", \"step_2\")\n",
      "graph_builder.add_edge(\"step_2\", \"step_3\")\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [START](https://langchain-ai.github.io/langgraph/reference/constants/#langgraph.constants.START) | [StateGraph](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.state.StateGraph)\n",
      "\n",
      "Specifying custom names\n",
      "\n",
      "You can specify custom names for nodes using `.add_node`:\n",
      "\n",
      "```\n",
      "graph_builder.add_node(\"my_node\", step_1)\n",
      "\n",
      "```\n",
      "\n",
      "Note that:\n",
      "\n",
      "* `.add_edge` takes the names of nodes, which for functions defaults to `node.__name__`.\n",
      "* We must specify the entry point of the graph. For this we add an edge with the [START node](../../concepts/low_level/#start-node).\n",
      "* The graph halts when there are no more nodes to execute.\n",
      "\n",
      "We next [compile](../../concepts/low_level/#compiling-your-graph) our graph. This provides a few basic checks on the structure of the graph (e.g., identifying orphaned nodes). If we were adding persistence to our application via a [checkpointer](../../concepts/persistence/), it would also be passed in here.\n",
      "\n",
      "```\n",
      "graph = graph_builder.compile()\n",
      "\n",
      "```\n",
      "\n",
      "LangGraph provides built-in utilities for visualizing your graph. Let's inspect our sequence. See [this guide](../../how-tos/visualization) for detail on visualization.\n",
      "\n",
      "```\n",
      "from IPython.display import Image, display\n",
      "\n",
      "display(Image(graph.get_graph().draw_mermaid_png()))\n",
      "\n",
      "```\n",
      "\n",
      "![]()\n",
      "\n",
      "### Usage[¶](#usage \"Permanent link\")\n",
      "\n",
      "Let's proceed with a simple invocation:\n",
      "\n",
      "```\n",
      "graph.invoke({\"value_1\": \"c\"})\n",
      "\n",
      "```\n",
      "\n",
      "```\n",
      "{'value_1': 'a b', 'value_2': 10}\n",
      "\n",
      "```\n",
      "\n",
      "Note that:\n",
      "\n",
      "* We kicked off invocation by providing a value for a single state key. We must always provide a value for at least one key.\n",
      "* The value we passed in was overwritten by the first node.\n",
      "* The second node updated the value.\n",
      "* The third node populated a different value.\n",
      "\n",
      "Built-in shorthand[¶](#built-in-shorthand \"Permanent link\")\n",
      "-----------------------------------------------------------\n",
      "\n",
      "Prerequisites\n",
      "\n",
      "`.add_sequence` requires `langgraph>=0.2.46`\n",
      "\n",
      "LangGraph includes a built-in shorthand `.add_sequence` for convenience:\n",
      "\n",
      "```\n",
      "graph_builder = StateGraph(State).add_sequence([step_1, step_2, step_3])\n",
      "graph_builder.add_edge(START, \"step_1\")\n",
      "\n",
      "graph = graph_builder.compile()\n",
      "\n",
      "graph.invoke({\"value_1\": \"c\"})\n",
      "\n",
      "```\n",
      "\n",
      "```\n",
      "{'value_1': 'a b', 'value_2': 10}\n",
      "\n",
      "```\n",
      "\n",
      "Was this page helpful?\n",
      "\n",
      "Thanks for your feedback!\n",
      "\n",
      "Thanks for your feedback! Please help us improve this page by adding to the discussion below.\n",
      "\n",
      "Comments\n",
      "--------\n",
      "\n",
      "Back to top\n",
      "\n",
      "\n",
      "[Previous\n",
      "\n",
      "How to update graph state from nodes](../state-reducers/)\n",
      "[Next\n",
      "\n",
      "How to create branches for parallel node execution](../branching/)\n",
      "\n",
      "Copyright © 2025 LangChain, Inc | [Consent Preferences](#__consent)\n",
      "\n",
      "Made with\n",
      "[Material for MkDocs Insiders](https://squidfunk.github.io/mkdocs-material/)\n",
      "\n",
      "#### Cookie consent\n",
      "\n",
      "We use cookies to recognize your repeated visits and preferences, as well as to measure the effectiveness of our documentation and whether users find what they're searching for. **Clicking \"Accept\" makes our documentation better. Thank you!** ❤️\n",
      "\n",
      "* Google Analytics\n",
      "* GitHub\n",
      "\n",
      "Accept\n",
      "Reject\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: researcher_agent\n",
      "\n",
      "[{'id': 'toolu_01Ko5xMHHUGRrzam4LoqpDxd', 'input': {'url': 'https://langchain-ai.github.io/langgraph/how-tos/state-reducers/'}, 'name': 'fetch_doc', 'type': 'tool_use'}]\n",
      "Tool Calls:\n",
      "  fetch_doc (toolu_01Ko5xMHHUGRrzam4LoqpDxd)\n",
      " Call ID: toolu_01Ko5xMHHUGRrzam4LoqpDxd\n",
      "  Args:\n",
      "    url: https://langchain-ai.github.io/langgraph/how-tos/state-reducers/\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'tools'\n",
      "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
      "Name: fetch_doc\n",
      "\n",
      "How to update graph state from nodes\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "[Skip to content](#how-to-update-graph-state-from-nodes)\n",
      "\n",
      "**Join us at  [Interrupt: The Agent AI Conference by LangChain](https://interrupt.langchain.com/) on May 13 & 14 in San Francisco!**\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "How to update graph state from nodes\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "Initializing search\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "\n",
      "  Home\n",
      "  + Get started\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Get started\n",
      "    - [Learn the basics](../../tutorials/introduction/)\n",
      "    - [Deployment](../../tutorials/deployment/)\n",
      "  + Guides\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Guides\n",
      "    - [How-to Guides](../)\n",
      "\n",
      "      How-to Guides\n",
      "      * LangGraph\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "        LangGraph\n",
      "        + [LangGraph](../../how-tos#langgraph)\n",
      "        + Graph API Basics\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "          Graph API Basics\n",
      "          - [Graph API Basics](../../how-tos#graph-api-basics)\n",
      "          - How to update graph state from nodes\n",
      "\n",
      "            [How to update graph state from nodes](./)\n",
      "\n",
      "\n",
      "\n",
      "            Table of contents\n",
      "            * [Setup](#setup)\n",
      "            * [Example graph](#example-graph)\n",
      "\n",
      "              + [Define state](#define-state)\n",
      "              + [Define graph structure](#define-graph-structure)\n",
      "              + [Use graph](#use-graph)\n",
      "            * [Process state updates with reducers](#process-state-updates-with-reducers)\n",
      "\n",
      "              + [MessagesState](#messagesstate)\n",
      "            * [Next steps](#next-steps)\n",
      "          - [How to create a sequence of steps](../sequence/)\n",
      "          - [How to create branches for parallel node execution](../branching/)\n",
      "          - [How to create and control loops](../recursion-limit/)\n",
      "          - [How to visualize your graph](../visualization/)\n",
      "        + [Controllability](../../how-tos#controllability)\n",
      "        + [Persistence](../../how-tos#persistence)\n",
      "        + [Memory](../../how-tos#memory)\n",
      "        + [Human-in-the-loop](../../how-tos#human-in-the-loop)\n",
      "        + [Streaming](../../how-tos#streaming)\n",
      "        + [Tool calling](../../how-tos#tool-calling)\n",
      "        + [Subgraphs](../../how-tos#subgraphs)\n",
      "        + [Multi-agent](../../how-tos#multi-agent)\n",
      "        + [State Management](../../how-tos#state-management)\n",
      "        + [Other](../../how-tos#other)\n",
      "        + [Prebuilt ReAct Agent](../../how-tos#prebuilt-react-agent)\n",
      "      * [LangGraph Platform](../../how-tos#langgraph-platform)\n",
      "    - [Concepts](../../concepts/)\n",
      "    - [Tutorials](../../tutorials/)\n",
      "  + Resources\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Resources\n",
      "    - [Prebuilt Agents](../../prebuilt/)\n",
      "    - [Companies using LangGraph](../../adopters/)\n",
      "    - [LLMS-txt](../../llms-txt-overview/)\n",
      "    - [FAQ](../../concepts/faq/)\n",
      "    - [Troubleshooting](../../troubleshooting/errors/)\n",
      "    - [LangGraph Academy Course](https://academy.langchain.com/courses/intro-to-langgraph)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "Table of contents\n",
      "\n",
      "* [Setup](#setup)\n",
      "* [Example graph](#example-graph)\n",
      "\n",
      "  + [Define state](#define-state)\n",
      "  + [Define graph structure](#define-graph-structure)\n",
      "  + [Use graph](#use-graph)\n",
      "* [Process state updates with reducers](#process-state-updates-with-reducers)\n",
      "\n",
      "  + [MessagesState](#messagesstate)\n",
      "* [Next steps](#next-steps)\n",
      "\n",
      "1. [Home](../..)\n",
      "2. [Guides](../)\n",
      "3. [How-to Guides](../)\n",
      "4. [LangGraph](../../how-tos#langgraph)\n",
      "5. [Graph API Basics](../../how-tos#graph-api-basics)\n",
      "\n",
      "How to update graph state from nodes[¶](#how-to-update-graph-state-from-nodes \"Permanent link\")\n",
      "===============================================================================================\n",
      "\n",
      "This guide demonstrates how to define and update [state](../../concepts/low_level/#state) in LangGraph. We will demonstrate:\n",
      "\n",
      "1. How to use state to define a graph's [schema](../../concepts/low_level/#schema)\n",
      "2. How to use [reducers](../../concepts/low_level/#reducers) to control how state updates are processed.\n",
      "\n",
      "We will use [messages](../../concepts/low_level/#messagesstate) in our examples. This represents a versatile formulation of state for many LLM applications. See our [concepts page](../../concepts/low_level/#working-with-messages-in-graph-state) for more detail.\n",
      "\n",
      "Setup[¶](#setup \"Permanent link\")\n",
      "---------------------------------\n",
      "\n",
      "First, let's install langgraph:\n",
      "\n",
      "```\n",
      "%%capture --no-stderr\n",
      "%pip install -U langgraph\n",
      "\n",
      "```\n",
      "\n",
      "Set up [LangSmith](https://smith.langchain.com) for better debugging\n",
      "\n",
      "Sign up for LangSmith to quickly spot issues and improve the performance of your LangGraph projects. LangSmith lets you use trace data to debug, test, and monitor your LLM aps built with LangGraph — read more about how to get started in the [docs](https://docs.smith.langchain.com).\n",
      "\n",
      "Example graph[¶](#example-graph \"Permanent link\")\n",
      "-------------------------------------------------\n",
      "\n",
      "### Define state[¶](#define-state \"Permanent link\")\n",
      "\n",
      "[State](../../concepts/low_level/#state) in LangGraph can be a `TypedDict`, `Pydantic` model, or dataclass. Below we will use `TypedDict`. See [this guide](../../how-tos/state-model) for detail on using Pydantic.\n",
      "\n",
      "By default, graphs will have the same input and output schema, and the state determines that schema. See [this guide](../../how-tos/input_output_schema/) for how to define distinct input and output schemas.\n",
      "\n",
      "Let's consider a simple example:\n",
      "\n",
      "```\n",
      "from langchain_core.messages import AnyMessage\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "\n",
      "class State(TypedDict):\n",
      "    messages: list[AnyMessage]\n",
      "    extra_field: int\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [AnyMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.AnyMessage.html)\n",
      "\n",
      "This state tracks a list of [message](https://python.langchain.com/docs/concepts/messages/) objects, as well as an extra integer field.\n",
      "\n",
      "### Define graph structure[¶](#define-graph-structure \"Permanent link\")\n",
      "\n",
      "Let's build an example graph with a single node. Our [node](../../concepts/low_level/#nodes) is just a Python function that reads our graph's state and makes updates to it. The first argument to this function will always be the state:\n",
      "\n",
      "```\n",
      "from langchain_core.messages import AIMessage\n",
      "\n",
      "\n",
      "def node(state: State):\n",
      "    messages = state[\"messages\"]\n",
      "    new_message = AIMessage(\"Hello!\")\n",
      "\n",
      "    return {\"messages\": messages + [new_message], \"extra_field\": 10}\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [AIMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html)\n",
      "\n",
      "This node simply appends a message to our message list, and populates an extra field.\n",
      "\n",
      "Important\n",
      "\n",
      "Nodes should return updates to the state directly, instead of mutating the state.\n",
      "\n",
      "Let's next define a simple graph containing this node. We use [StateGraph](../../concepts/low_level/#stategraph) to define a graph that operates on this state. We then use [add\\_node](../../concepts/low_level/#messagesstate) populate our graph.\n",
      "\n",
      "```\n",
      "from langgraph.graph import StateGraph\n",
      "\n",
      "graph_builder = StateGraph(State)\n",
      "graph_builder.add_node(node)\n",
      "graph_builder.set_entry_point(\"node\")\n",
      "graph = graph_builder.compile()\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [StateGraph](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.state.StateGraph)\n",
      "\n",
      "LangGraph provides built-in utilities for visualizing your graph. Let's inspect our graph. See [this guide](../../how-tos/visualization) for detail on visualization.\n",
      "\n",
      "```\n",
      "from IPython.display import Image, display\n",
      "\n",
      "display(Image(graph.get_graph().draw_mermaid_png()))\n",
      "\n",
      "```\n",
      "\n",
      "![]()\n",
      "\n",
      "In this case, our graph just executes a single node.\n",
      "\n",
      "### Use graph[¶](#use-graph \"Permanent link\")\n",
      "\n",
      "Let's proceed with a simple invocation:\n",
      "\n",
      "```\n",
      "from langchain_core.messages import HumanMessage\n",
      "\n",
      "result = graph.invoke({\"messages\": [HumanMessage(\"Hi\")]})\n",
      "result\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [HumanMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.human.HumanMessage.html)\n",
      "\n",
      "```\n",
      "{'messages': [HumanMessage(content='Hi', additional_kwargs={}, response_metadata={}),\n",
      "  AIMessage(content='Hello!', additional_kwargs={}, response_metadata={})],\n",
      " 'extra_field': 10}\n",
      "\n",
      "```\n",
      "\n",
      "Note that:\n",
      "\n",
      "* We kicked off invocation by updating a single key of the state.\n",
      "* We receive the entire state in the invocation result.\n",
      "\n",
      "For convenience, we frequently inspect the content of [message objects](https://python.langchain.com/docs/concepts/messages/) via pretty-print:\n",
      "\n",
      "```\n",
      "for message in result[\"messages\"]:\n",
      "    message.pretty_print()\n",
      "\n",
      "```\n",
      "\n",
      "```\n",
      "================================\u001b[1m Human Message \u001b[0m=================================\n",
      "\n",
      "Hi\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "\n",
      "Hello!\n",
      "\n",
      "```\n",
      "\n",
      "Process state updates with reducers[¶](#process-state-updates-with-reducers \"Permanent link\")\n",
      "---------------------------------------------------------------------------------------------\n",
      "\n",
      "Each key in the state can have its own independent [reducer](../../concepts/low_level/#reducers) function, which controls how updates from nodes are applied. If no reducer function is explicitly specified then it is assumed that all updates to the key should override it.\n",
      "\n",
      "For `TypedDict` state schemas, we can define reducers by annotating the corresponding field of the state with a reducer function.\n",
      "\n",
      "In the earlier example, our node updated the `\"messages\"` key in the state by appending a message to it. Below, we add a reducer to this key, such that updates are automatically appended:\n",
      "\n",
      "```\n",
      "from typing_extensions import Annotated\n",
      "\n",
      "\n",
      "def add(left, right):\n",
      "    \"\"\"Can also import `add` from the `operator` built-in.\"\"\"\n",
      "    return left + right\n",
      "\n",
      "\n",
      "class State(TypedDict):\n",
      "    messages: Annotated[list[AnyMessage], add]\n",
      "    extra_field: int\n",
      "\n",
      "```\n",
      "\n",
      "Now our node can be simplified:\n",
      "\n",
      "```\n",
      "def node(state: State):\n",
      "    new_message = AIMessage(\"Hello!\")\n",
      "    return {\"messages\": [new_message], \"extra_field\": 10}\n",
      "\n",
      "```\n",
      "\n",
      "```\n",
      "from langgraph.graph import START\n",
      "\n",
      "\n",
      "graph = StateGraph(State).add_node(node).add_edge(START, \"node\").compile()\n",
      "\n",
      "result = graph.invoke({\"messages\": [HumanMessage(\"Hi\")]})\n",
      "\n",
      "for message in result[\"messages\"]:\n",
      "    message.pretty_print()\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [START](https://langchain-ai.github.io/langgraph/reference/constants/#langgraph.constants.START)\n",
      "\n",
      "```\n",
      "================================\u001b[1m Human Message \u001b[0m=================================\n",
      "\n",
      "Hi\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "\n",
      "Hello!\n",
      "\n",
      "```\n",
      "\n",
      "### MessagesState[¶](#messagesstate \"Permanent link\")\n",
      "\n",
      "In practice, there are additional considerations for updating lists of messages:\n",
      "\n",
      "* We may wish to update an existing message in the state.\n",
      "* We may want to accept short-hands for [message formats](../../concepts/low_level/#using-messages-in-your-graph), such as [OpenAI format](https://python.langchain.com/docs/concepts/messages/#openai-format).\n",
      "\n",
      "LangGraph includes a built-in reducer `add_messages` that handles these considerations:\n",
      "\n",
      "```\n",
      "from langgraph.graph.message import add_messages\n",
      "\n",
      "\n",
      "class State(TypedDict):\n",
      "    messages: Annotated[list[AnyMessage], add_messages]\n",
      "    extra_field: int\n",
      "\n",
      "\n",
      "def node(state: State):\n",
      "    new_message = AIMessage(\"Hello!\")\n",
      "    return {\"messages\": [new_message], \"extra_field\": 10}\n",
      "\n",
      "\n",
      "graph = StateGraph(State).add_node(node).set_entry_point(\"node\").compile()\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [add\\_messages](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.message.add_messages)\n",
      "\n",
      "```\n",
      "input_message = {\"role\": \"user\", \"content\": \"Hi\"}\n",
      "\n",
      "result = graph.invoke({\"messages\": [input_message]})\n",
      "\n",
      "for message in result[\"messages\"]:\n",
      "    message.pretty_print()\n",
      "\n",
      "```\n",
      "\n",
      "```\n",
      "================================\u001b[1m Human Message \u001b[0m=================================\n",
      "\n",
      "Hi\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "\n",
      "Hello!\n",
      "\n",
      "```\n",
      "\n",
      "This is a versatile representation of state for applications involving [chat models](https://python.langchain.com/docs/concepts/chat_models/). LangGraph includes a pre-built `MessagesState` for convenience, so that we can have:\n",
      "\n",
      "```\n",
      "from langgraph.graph import MessagesState\n",
      "\n",
      "\n",
      "class State(MessagesState):\n",
      "    extra_field: int\n",
      "\n",
      "```\n",
      "\n",
      "Next steps[¶](#next-steps \"Permanent link\")\n",
      "-------------------------------------------\n",
      "\n",
      "* Continue with the [Graph API Basics](../../how-tos/#graph-api-basics) guides.\n",
      "* See more detail on [state management](../../how-tos/#state-management).\n",
      "\n",
      "Was this page helpful?\n",
      "\n",
      "Thanks for your feedback!\n",
      "\n",
      "Thanks for your feedback! Please help us improve this page by adding to the discussion below.\n",
      "\n",
      "Comments\n",
      "--------\n",
      "\n",
      "Back to top\n",
      "\n",
      "\n",
      "[Previous\n",
      "\n",
      "How-to Guides](../)\n",
      "[Next\n",
      "\n",
      "How to create a sequence of steps](../sequence/)\n",
      "\n",
      "Copyright © 2025 LangChain, Inc | [Consent Preferences](#__consent)\n",
      "\n",
      "Made with\n",
      "[Material for MkDocs Insiders](https://squidfunk.github.io/mkdocs-material/)\n",
      "\n",
      "#### Cookie consent\n",
      "\n",
      "We use cookies to recognize your repeated visits and preferences, as well as to measure the effectiveness of our documentation and whether users find what they're searching for. **Clicking \"Accept\" makes our documentation better. Thank you!** ❤️\n",
      "\n",
      "* Google Analytics\n",
      "* GitHub\n",
      "\n",
      "Accept\n",
      "Reject\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: researcher_agent\n",
      "\n",
      "[{'id': 'toolu_019mY42xe6F7yYrb17WB9QXb', 'input': {'url': 'https://langchain-ai.github.io/langgraph/how-tos/input_output_schema/'}, 'name': 'fetch_doc', 'type': 'tool_use'}]\n",
      "Tool Calls:\n",
      "  fetch_doc (toolu_019mY42xe6F7yYrb17WB9QXb)\n",
      " Call ID: toolu_019mY42xe6F7yYrb17WB9QXb\n",
      "  Args:\n",
      "    url: https://langchain-ai.github.io/langgraph/how-tos/input_output_schema/\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'tools'\n",
      "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
      "Name: fetch_doc\n",
      "\n",
      "How to define input/output schema for your graph\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "[Skip to content](#how-to-define-inputoutput-schema-for-your-graph)\n",
      "\n",
      "**Join us at  [Interrupt: The Agent AI Conference by LangChain](https://interrupt.langchain.com/) on May 13 & 14 in San Francisco!**\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "How to define input/output schema for your graph\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "Initializing search\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "\n",
      "\n",
      "[![logo](../../static/wordmark_dark.svg)\n",
      "![logo](../../static/wordmark_light.svg)](../..)\n",
      "\n",
      "[GitHub](https://github.com/langchain-ai/langgraph \"Go to repository\")\n",
      "\n",
      "* [Home](../..)\n",
      "\n",
      "  Home\n",
      "  + Get started\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Get started\n",
      "    - [Learn the basics](../../tutorials/introduction/)\n",
      "    - [Deployment](../../tutorials/deployment/)\n",
      "  + Guides\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Guides\n",
      "    - [How-to Guides](../)\n",
      "\n",
      "      How-to Guides\n",
      "      * LangGraph\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "        LangGraph\n",
      "        + [LangGraph](../../how-tos#langgraph)\n",
      "        + [Graph API Basics](../../how-tos#graph-api-basics)\n",
      "        + [Controllability](../../how-tos#controllability)\n",
      "        + [Persistence](../../how-tos#persistence)\n",
      "        + [Memory](../../how-tos#memory)\n",
      "        + [Human-in-the-loop](../../how-tos#human-in-the-loop)\n",
      "        + [Streaming](../../how-tos#streaming)\n",
      "        + [Tool calling](../../how-tos#tool-calling)\n",
      "        + [Subgraphs](../../how-tos#subgraphs)\n",
      "        + [Multi-agent](../../how-tos#multi-agent)\n",
      "        + State Management\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "          State Management\n",
      "          - [State Management](../../how-tos#state-management)\n",
      "          - [How to use Pydantic model as graph state](../state-model/)\n",
      "          - How to define input/output schema for your graph\n",
      "\n",
      "            [How to define input/output schema for your graph](./)\n",
      "\n",
      "\n",
      "\n",
      "            Table of contents\n",
      "            * [Setup](#setup)\n",
      "            * [Define and use the graph](#define-and-use-the-graph)\n",
      "          - [How to pass private state between nodes](../pass_private_state/)\n",
      "        + [Other](../../how-tos#other)\n",
      "        + [Prebuilt ReAct Agent](../../how-tos#prebuilt-react-agent)\n",
      "      * [LangGraph Platform](../../how-tos#langgraph-platform)\n",
      "    - [Concepts](../../concepts/)\n",
      "    - [Tutorials](../../tutorials/)\n",
      "  + Resources\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "    Resources\n",
      "    - [Prebuilt Agents](../../prebuilt/)\n",
      "    - [Companies using LangGraph](../../adopters/)\n",
      "    - [LLMS-txt](../../llms-txt-overview/)\n",
      "    - [FAQ](../../concepts/faq/)\n",
      "    - [Troubleshooting](../../troubleshooting/errors/)\n",
      "    - [LangGraph Academy Course](https://academy.langchain.com/courses/intro-to-langgraph)\n",
      "* [API reference](../../reference/graphs/)\n",
      "\n",
      "Table of contents\n",
      "\n",
      "* [Setup](#setup)\n",
      "* [Define and use the graph](#define-and-use-the-graph)\n",
      "\n",
      "1. [Home](../..)\n",
      "2. [Guides](../)\n",
      "3. [How-to Guides](../)\n",
      "4. [LangGraph](../../how-tos#langgraph)\n",
      "5. [State Management](../../how-tos#state-management)\n",
      "\n",
      "How to define input/output schema for your graph[¶](#how-to-define-inputoutput-schema-for-your-graph \"Permanent link\")\n",
      "======================================================================================================================\n",
      "\n",
      "Prerequisites\n",
      "\n",
      "This guide assumes familiarity with the following:\n",
      "\n",
      "* [Multiple Schemas](https://langchain-ai.github.io/langgraph/concepts/low_level/#multiple-schemas)\n",
      "* [State Graph](https://langchain-ai.github.io/langgraph/concepts/low_level/#stategraph)\n",
      "\n",
      "By default, `StateGraph` operates with a single schema, and all nodes are expected to communicate using that schema. However, it's also possible to define distinct input and output schemas for a graph.\n",
      "\n",
      "When distinct schemas are specified, an internal schema will still be used for communication between nodes. The input schema ensures that the provided input matches the expected structure, while the output schema filters the internal data to return only the relevant information according to the defined output schema.\n",
      "\n",
      "In this example, we'll see how to define distinct input and output schema.\n",
      "\n",
      "Setup[¶](#setup \"Permanent link\")\n",
      "---------------------------------\n",
      "\n",
      "First, let's install the required packages\n",
      "\n",
      "```\n",
      "%%capture --no-stderr\n",
      "%pip install -U langgraph\n",
      "\n",
      "```\n",
      "\n",
      "Set up [LangSmith](https://smith.langchain.com) for LangGraph development\n",
      "\n",
      "Sign up for LangSmith to quickly spot issues and improve the performance of your LangGraph projects. LangSmith lets you use trace data to debug, test, and monitor your LLM apps built with LangGraph — read more about how to get started [here](https://docs.smith.langchain.com).\n",
      "\n",
      "Define and use the graph[¶](#define-and-use-the-graph \"Permanent link\")\n",
      "-----------------------------------------------------------------------\n",
      "\n",
      "```\n",
      "from langgraph.graph import StateGraph, START, END\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "\n",
      "# Define the schema for the input\n",
      "class InputState(TypedDict):\n",
      "    question: str\n",
      "\n",
      "\n",
      "# Define the schema for the output\n",
      "class OutputState(TypedDict):\n",
      "    answer: str\n",
      "\n",
      "\n",
      "# Define the overall schema, combining both input and output\n",
      "class OverallState(InputState, OutputState):\n",
      "    pass\n",
      "\n",
      "\n",
      "# Define the node that processes the input and generates an answer\n",
      "def answer_node(state: InputState):\n",
      "    # Example answer and an extra key\n",
      "    return {\"answer\": \"bye\", \"question\": state[\"question\"]}\n",
      "\n",
      "\n",
      "# Build the graph with input and output schemas specified\n",
      "builder = StateGraph(OverallState, input=InputState, output=OutputState)\n",
      "builder.add_node(answer_node)  # Add the answer node\n",
      "builder.add_edge(START, \"answer_node\")  # Define the starting edge\n",
      "builder.add_edge(\"answer_node\", END)  # Define the ending edge\n",
      "graph = builder.compile()  # Compile the graph\n",
      "\n",
      "# Invoke the graph with an input and print the result\n",
      "print(graph.invoke({\"question\": \"hi\"}))\n",
      "\n",
      "```\n",
      "\n",
      "API Reference: [StateGraph](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.state.StateGraph) | [START](https://langchain-ai.github.io/langgraph/reference/constants/#langgraph.constants.START) | [END](https://langchain-ai.github.io/langgraph/reference/constants/#langgraph.constants.END)\n",
      "\n",
      "```\n",
      "{'answer': 'bye'}\n",
      "\n",
      "```\n",
      "\n",
      "Notice that the output of invoke only includes the output schema.\n",
      "\n",
      "Was this page helpful?\n",
      "\n",
      "Thanks for your feedback!\n",
      "\n",
      "Thanks for your feedback! Please help us improve this page by adding to the discussion below.\n",
      "\n",
      "Comments\n",
      "--------\n",
      "\n",
      "Back to top\n",
      "\n",
      "\n",
      "[Previous\n",
      "\n",
      "How to use Pydantic model as graph state](../state-model/)\n",
      "[Next\n",
      "\n",
      "How to pass private state between nodes](../pass_private_state/)\n",
      "\n",
      "Copyright © 2025 LangChain, Inc | [Consent Preferences](#__consent)\n",
      "\n",
      "Made with\n",
      "[Material for MkDocs Insiders](https://squidfunk.github.io/mkdocs-material/)\n",
      "\n",
      "#### Cookie consent\n",
      "\n",
      "We use cookies to recognize your repeated visits and preferences, as well as to measure the effectiveness of our documentation and whether users find what they're searching for. **Clicking \"Accept\" makes our documentation better. Thank you!** ❤️\n",
      "\n",
      "* Google Analytics\n",
      "* GitHub\n",
      "\n",
      "Accept\n",
      "Reject\n",
      "\n",
      "\n",
      "\n",
      "Namespace '('researcher_agent:b78cf453-b1bc-2e36-19a1-c32613963d1c',)'\n",
      "Update from node 'agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: researcher_agent\n",
      "\n",
      "Now that I've reviewed the documentation, I'll create a LangGraph application that follows your requirements. This application will be a joke generator that takes a topic from a user, generates a joke, and checks if it has a punchline.\n",
      "\n",
      "Here's the implementation:\n",
      "\n",
      "```python\n",
      "import os\n",
      "from typing import TypedDict, Annotated\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "from langchain_core.prompts import ChatPromptTemplate\n",
      "from langchain_openai import ChatOpenAI\n",
      "\n",
      "from langgraph.graph import StateGraph, START, END\n",
      "from langgraph.graph.message import add_messages\n",
      "\n",
      "# Set your OpenAI API key here or use environment variable\n",
      "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\"  # Replace with your API key or set in environment\n",
      "\n",
      "# Define the state schema\n",
      "class JokeState(TypedDict):\n",
      "    topic: str\n",
      "    joke: str\n",
      "    has_punchline: bool\n",
      "    validation_result: str\n",
      "\n",
      "# Define our input schema (what the user provides)\n",
      "class InputState(TypedDict):\n",
      "    topic: str\n",
      "\n",
      "# Define our output schema (what we return to the user)\n",
      "class OutputState(TypedDict):\n",
      "    joke: str\n",
      "    validation_result: str\n",
      "\n",
      "# Create the joke generation node\n",
      "def generate_joke(state: JokeState):\n",
      "    topic = state[\"topic\"]\n",
      "    \n",
      "    # Create a prompt for generating a joke\n",
      "    prompt = ChatPromptTemplate.from_template(\n",
      "        \"\"\"Generate a funny joke about {topic}.\n",
      "        The joke should have a setup and a punchline.\n",
      "        Format your response exactly like this:\n",
      "        setup: [setup of the joke]\n",
      "        punchline: [punchline of the joke]\n",
      "        \"\"\"\n",
      "    )\n",
      "    \n",
      "    # Initialize the model (using GPT-3.5 for cost efficiency)\n",
      "    model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
      "    \n",
      "    # Chain the prompt and model\n",
      "    chain = prompt | model\n",
      "    \n",
      "    # Generate the joke\n",
      "    response = chain.invoke({\"topic\": topic})\n",
      "    joke = response.content\n",
      "    \n",
      "    return {\"joke\": joke}\n",
      "\n",
      "# Create the punchline validation node\n",
      "def validate_punchline(state: JokeState):\n",
      "    joke = state[\"joke\"]\n",
      "    \n",
      "    # Check if the joke has both setup and punchline\n",
      "    has_punchline = \"punchline:\" in joke.lower() and \"setup:\" in joke.lower()\n",
      "    \n",
      "    # Prepare validation result message\n",
      "    if has_punchline:\n",
      "        validation_result = \"✅ The joke has a proper setup and punchline.\"\n",
      "    else:\n",
      "        validation_result = \"❌ The joke is missing a setup or punchline. Let me try again.\"\n",
      "    \n",
      "    return {\n",
      "        \"has_punchline\": has_punchline,\n",
      "        \"validation_result\": validation_result\n",
      "    }\n",
      "\n",
      "# Build the graph\n",
      "def build_graph():\n",
      "    # Create the workflow\n",
      "    workflow = StateGraph(JokeState, input=InputState, output=OutputState)\n",
      "    \n",
      "    # Add the nodes\n",
      "    workflow.add_node(\"generate_joke\", generate_joke)\n",
      "    workflow.add_node(\"validate_punchline\", validate_punchline)\n",
      "    \n",
      "    # Add the edges\n",
      "    workflow.add_edge(START, \"generate_joke\")\n",
      "    workflow.add_edge(\"generate_joke\", \"validate_punchline\")\n",
      "    workflow.add_edge(\"validate_punchline\", END)\n",
      "    \n",
      "    # Compile the graph\n",
      "    return workflow.compile()\n",
      "\n",
      "# Main function to run the application\n",
      "def main():\n",
      "    # Build the graph\n",
      "    graph = build_graph()\n",
      "    \n",
      "    # Get user input\n",
      "    topic = input(\"Enter a topic for a joke: \")\n",
      "    \n",
      "    # Invoke the graph\n",
      "    result = graph.invoke({\"topic\": topic})\n",
      "    \n",
      "    # Print the result\n",
      "    print(\"\\n--- Generated Joke ---\")\n",
      "    print(result[\"joke\"])\n",
      "    print(\"\\n--- Validation ---\")\n",
      "    print(result[\"validation_result\"])\n",
      "\n",
      "if __name__ == \"__main__\":\n",
      "    main()\n",
      "```\n",
      "\n",
      "## Enhanced Version with Retry Logic\n",
      "\n",
      "Here's an enhanced version that will retry joke generation if no punchline is found:\n",
      "\n",
      "```python\n",
      "import os\n",
      "from typing import TypedDict, Annotated\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "from langchain_core.prompts import ChatPromptTemplate\n",
      "from langchain_openai import ChatOpenAI\n",
      "\n",
      "from langgraph.graph import StateGraph, START, END\n",
      "\n",
      "# Set your OpenAI API key here or use environment variable\n",
      "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\"  # Replace with your API key or set in environment\n",
      "\n",
      "# Define the state schema\n",
      "class JokeState(TypedDict):\n",
      "    topic: str\n",
      "    joke: str\n",
      "    has_punchline: bool\n",
      "    validation_result: str\n",
      "    attempts: int\n",
      "\n",
      "# Define our input schema (what the user provides)\n",
      "class InputState(TypedDict):\n",
      "    topic: str\n",
      "\n",
      "# Define our output schema (what we return to the user)\n",
      "class OutputState(TypedDict):\n",
      "    joke: str\n",
      "    validation_result: str\n",
      "\n",
      "# Create the joke generation node\n",
      "def generate_joke(state: JokeState):\n",
      "    topic = state[\"topic\"]\n",
      "    attempts = state.get(\"attempts\", 0)\n",
      "    \n",
      "    # Create a prompt for generating a joke, with a stronger instruction if we're retrying\n",
      "    template = \"\"\"Generate a funny joke about {topic}.\n",
      "    The joke MUST have both a setup and a punchline.\n",
      "    Format your response exactly like this:\n",
      "    setup: [setup of the joke]\n",
      "    punchline: [punchline of the joke]\n",
      "    \"\"\"\n",
      "    \n",
      "    if attempts > 0:\n",
      "        template += \"\\n\\nIMPORTANT: Make sure to include BOTH 'setup:' and 'punchline:' labels in your response.\"\n",
      "    \n",
      "    prompt = ChatPromptTemplate.from_template(template)\n",
      "    \n",
      "    # Initialize the model\n",
      "    model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
      "    \n",
      "    # Chain the prompt and model\n",
      "    chain = prompt | model\n",
      "    \n",
      "    # Generate the joke\n",
      "    response = chain.invoke({\"topic\": topic})\n",
      "    joke = response.content\n",
      "    \n",
      "    return {\n",
      "        \"joke\": joke, \n",
      "        \"attempts\": attempts + 1\n",
      "    }\n",
      "\n",
      "# Create the punchline validation node\n",
      "def validate_punchline(state: JokeState):\n",
      "    joke = state[\"joke\"]\n",
      "    \n",
      "    # Check if the joke has both setup and punchline\n",
      "    has_punchline = \"punchline:\" in joke.lower() and \"setup:\" in joke.lower()\n",
      "    \n",
      "    # Prepare validation result message\n",
      "    if has_punchline:\n",
      "        validation_result = \"✅ The joke has a proper setup and punchline.\"\n",
      "    else:\n",
      "        validation_result = \"❌ The joke is missing a setup or punchline.\"\n",
      "    \n",
      "    return {\n",
      "        \"has_punchline\": has_punchline,\n",
      "        \"validation_result\": validation_result\n",
      "    }\n",
      "\n",
      "# Define our routing logic\n",
      "def should_retry(state: JokeState):\n",
      "    # If no punchline and we've tried less than 3 times, retry\n",
      "    if not state[\"has_punchline\"] and state[\"attempts\"] < 3:\n",
      "        return \"generate_joke\"\n",
      "    else:\n",
      "        return END\n",
      "\n",
      "# Build the graph\n",
      "def build_graph():\n",
      "    # Create the workflow\n",
      "    workflow = StateGraph(JokeState, input=InputState, output=OutputState)\n",
      "    \n",
      "    # Add the nodes\n",
      "    workflow.add_node(\"generate_joke\", generate_joke)\n",
      "    workflow.add_node(\"validate_punchline\", validate_punchline)\n",
      "    \n",
      "    # Add the edges with conditional routing\n",
      "    workflow.add_edge(START, \"generate_joke\")\n",
      "    workflow.add_edge(\"generate_joke\", \"validate_punchline\")\n",
      "    workflow.add_edge(\"validate_punchline\", should_retry)\n",
      "    \n",
      "    # Compile the graph\n",
      "    return workflow.compile()\n",
      "\n",
      "# Main function to run the application\n",
      "def main():\n",
      "    # Build the graph\n",
      "    graph = build_graph()\n",
      "    \n",
      "    # Get user input\n",
      "    topic = input(\"Enter a topic for a joke: \")\n",
      "    \n",
      "    # Invoke the graph\n",
      "    result = graph.invoke({\"topic\": topic})\n",
      "    \n",
      "    # Print the result\n",
      "    print(\"\\n--- Generated Joke ---\")\n",
      "    print(result[\"joke\"])\n",
      "    print(\"\\n--- Validation ---\")\n",
      "    print(result[\"validation_result\"])\n",
      "\n",
      "if __name__ == \"__main__\":\n",
      "    main()\n",
      "```\n",
      "\n",
      "## How to Use\n",
      "\n",
      "1. Save any of these scripts to a Python file (e.g., `joke_generator.py`)\n",
      "2. Make sure you have the necessary packages installed:\n",
      "   ```\n",
      "   pip install langgraph langchain-core langchain-openai\n",
      "   ```\n",
      "3. Set your OpenAI API key either in the script or as an environment variable\n",
      "4. Run the script:\n",
      "   ```\n",
      "   python joke_generator.py\n",
      "   ```\n",
      "5. Enter a topic when prompted and see the generated joke\n",
      "\n",
      "## Explanation\n",
      "\n",
      "This LangGraph application follows your requirements:\n",
      "\n",
      "1. **Input**: Takes a topic from the user.\n",
      "2. **Processing**: Uses a graph with nodes for joke generation and punchline validation.\n",
      "3. **Output**: Returns a formatted joke with \"setup:\" and \"punchline:\" sections.\n",
      "4. **Validation**: Checks if the joke includes both setup and punchline.\n",
      "\n",
      "The enhanced version adds a retry mechanism that will attempt to regenerate the joke up to 3 times if the initial joke doesn't have a proper punchline.\n",
      "\n",
      "This implementation demonstrates LangGraph's ability to create structured workflows for prompting LLMs and validating their outputs.\n",
      "\n",
      "\n",
      "\n",
      "Namespace '()'\n",
      "Update from node 'researcher_agent'\n",
      "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
      "Name: researcher_agent\n",
      "\n",
      "Now that I've reviewed the documentation, I'll create a LangGraph application that follows your requirements. This application will be a joke generator that takes a topic from a user, generates a joke, and checks if it has a punchline.\n",
      "\n",
      "Here's the implementation:\n",
      "\n",
      "```python\n",
      "import os\n",
      "from typing import TypedDict, Annotated\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "from langchain_core.prompts import ChatPromptTemplate\n",
      "from langchain_openai import ChatOpenAI\n",
      "\n",
      "from langgraph.graph import StateGraph, START, END\n",
      "from langgraph.graph.message import add_messages\n",
      "\n",
      "# Set your OpenAI API key here or use environment variable\n",
      "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\"  # Replace with your API key or set in environment\n",
      "\n",
      "# Define the state schema\n",
      "class JokeState(TypedDict):\n",
      "    topic: str\n",
      "    joke: str\n",
      "    has_punchline: bool\n",
      "    validation_result: str\n",
      "\n",
      "# Define our input schema (what the user provides)\n",
      "class InputState(TypedDict):\n",
      "    topic: str\n",
      "\n",
      "# Define our output schema (what we return to the user)\n",
      "class OutputState(TypedDict):\n",
      "    joke: str\n",
      "    validation_result: str\n",
      "\n",
      "# Create the joke generation node\n",
      "def generate_joke(state: JokeState):\n",
      "    topic = state[\"topic\"]\n",
      "    \n",
      "    # Create a prompt for generating a joke\n",
      "    prompt = ChatPromptTemplate.from_template(\n",
      "        \"\"\"Generate a funny joke about {topic}.\n",
      "        The joke should have a setup and a punchline.\n",
      "        Format your response exactly like this:\n",
      "        setup: [setup of the joke]\n",
      "        punchline: [punchline of the joke]\n",
      "        \"\"\"\n",
      "    )\n",
      "    \n",
      "    # Initialize the model (using GPT-3.5 for cost efficiency)\n",
      "    model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
      "    \n",
      "    # Chain the prompt and model\n",
      "    chain = prompt | model\n",
      "    \n",
      "    # Generate the joke\n",
      "    response = chain.invoke({\"topic\": topic})\n",
      "    joke = response.content\n",
      "    \n",
      "    return {\"joke\": joke}\n",
      "\n",
      "# Create the punchline validation node\n",
      "def validate_punchline(state: JokeState):\n",
      "    joke = state[\"joke\"]\n",
      "    \n",
      "    # Check if the joke has both setup and punchline\n",
      "    has_punchline = \"punchline:\" in joke.lower() and \"setup:\" in joke.lower()\n",
      "    \n",
      "    # Prepare validation result message\n",
      "    if has_punchline:\n",
      "        validation_result = \"✅ The joke has a proper setup and punchline.\"\n",
      "    else:\n",
      "        validation_result = \"❌ The joke is missing a setup or punchline. Let me try again.\"\n",
      "    \n",
      "    return {\n",
      "        \"has_punchline\": has_punchline,\n",
      "        \"validation_result\": validation_result\n",
      "    }\n",
      "\n",
      "# Build the graph\n",
      "def build_graph():\n",
      "    # Create the workflow\n",
      "    workflow = StateGraph(JokeState, input=InputState, output=OutputState)\n",
      "    \n",
      "    # Add the nodes\n",
      "    workflow.add_node(\"generate_joke\", generate_joke)\n",
      "    workflow.add_node(\"validate_punchline\", validate_punchline)\n",
      "    \n",
      "    # Add the edges\n",
      "    workflow.add_edge(START, \"generate_joke\")\n",
      "    workflow.add_edge(\"generate_joke\", \"validate_punchline\")\n",
      "    workflow.add_edge(\"validate_punchline\", END)\n",
      "    \n",
      "    # Compile the graph\n",
      "    return workflow.compile()\n",
      "\n",
      "# Main function to run the application\n",
      "def main():\n",
      "    # Build the graph\n",
      "    graph = build_graph()\n",
      "    \n",
      "    # Get user input\n",
      "    topic = input(\"Enter a topic for a joke: \")\n",
      "    \n",
      "    # Invoke the graph\n",
      "    result = graph.invoke({\"topic\": topic})\n",
      "    \n",
      "    # Print the result\n",
      "    print(\"\\n--- Generated Joke ---\")\n",
      "    print(result[\"joke\"])\n",
      "    print(\"\\n--- Validation ---\")\n",
      "    print(result[\"validation_result\"])\n",
      "\n",
      "if __name__ == \"__main__\":\n",
      "    main()\n",
      "```\n",
      "\n",
      "## Enhanced Version with Retry Logic\n",
      "\n",
      "Here's an enhanced version that will retry joke generation if no punchline is found:\n",
      "\n",
      "```python\n",
      "import os\n",
      "from typing import TypedDict, Annotated\n",
      "from typing_extensions import TypedDict\n",
      "\n",
      "from langchain_core.prompts import ChatPromptTemplate\n",
      "from langchain_openai import ChatOpenAI\n",
      "\n",
      "from langgraph.graph import StateGraph, START, END\n",
      "\n",
      "# Set your OpenAI API key here or use environment variable\n",
      "os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\"  # Replace with your API key or set in environment\n",
      "\n",
      "# Define the state schema\n",
      "class JokeState(TypedDict):\n",
      "    topic: str\n",
      "    joke: str\n",
      "    has_punchline: bool\n",
      "    validation_result: str\n",
      "    attempts: int\n",
      "\n",
      "# Define our input schema (what the user provides)\n",
      "class InputState(TypedDict):\n",
      "    topic: str\n",
      "\n",
      "# Define our output schema (what we return to the user)\n",
      "class OutputState(TypedDict):\n",
      "    joke: str\n",
      "    validation_result: str\n",
      "\n",
      "# Create the joke generation node\n",
      "def generate_joke(state: JokeState):\n",
      "    topic = state[\"topic\"]\n",
      "    attempts = state.get(\"attempts\", 0)\n",
      "    \n",
      "    # Create a prompt for generating a joke, with a stronger instruction if we're retrying\n",
      "    template = \"\"\"Generate a funny joke about {topic}.\n",
      "    The joke MUST have both a setup and a punchline.\n",
      "    Format your response exactly like this:\n",
      "    setup: [setup of the joke]\n",
      "    punchline: [punchline of the joke]\n",
      "    \"\"\"\n",
      "    \n",
      "    if attempts > 0:\n",
      "        template += \"\\n\\nIMPORTANT: Make sure to include BOTH 'setup:' and 'punchline:' labels in your response.\"\n",
      "    \n",
      "    prompt = ChatPromptTemplate.from_template(template)\n",
      "    \n",
      "    # Initialize the model\n",
      "    model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
      "    \n",
      "    # Chain the prompt and model\n",
      "    chain = prompt | model\n",
      "    \n",
      "    # Generate the joke\n",
      "    response = chain.invoke({\"topic\": topic})\n",
      "    joke = response.content\n",
      "    \n",
      "    return {\n",
      "        \"joke\": joke, \n",
      "        \"attempts\": attempts + 1\n",
      "    }\n",
      "\n",
      "# Create the punchline validation node\n",
      "def validate_punchline(state: JokeState):\n",
      "    joke = state[\"joke\"]\n",
      "    \n",
      "    # Check if the joke has both setup and punchline\n",
      "    has_punchline = \"punchline:\" in joke.lower() and \"setup:\" in joke.lower()\n",
      "    \n",
      "    # Prepare validation result message\n",
      "    if has_punchline:\n",
      "        validation_result = \"✅ The joke has a proper setup and punchline.\"\n",
      "    else:\n",
      "        validation_result = \"❌ The joke is missing a setup or punchline.\"\n",
      "    \n",
      "    return {\n",
      "        \"has_punchline\": has_punchline,\n",
      "        \"validation_result\": validation_result\n",
      "    }\n",
      "\n",
      "# Define our routing logic\n",
      "def should_retry(state: JokeState):\n",
      "    # If no punchline and we've tried less than 3 times, retry\n",
      "    if not state[\"has_punchline\"] and state[\"attempts\"] < 3:\n",
      "        return \"generate_joke\"\n",
      "    else:\n",
      "        return END\n",
      "\n",
      "# Build the graph\n",
      "def build_graph():\n",
      "    # Create the workflow\n",
      "    workflow = StateGraph(JokeState, input=InputState, output=OutputState)\n",
      "    \n",
      "    # Add the nodes\n",
      "    workflow.add_node(\"generate_joke\", generate_joke)\n",
      "    workflow.add_node(\"validate_punchline\", validate_punchline)\n",
      "    \n",
      "    # Add the edges with conditional routing\n",
      "    workflow.add_edge(START, \"generate_joke\")\n",
      "    workflow.add_edge(\"generate_joke\", \"validate_punchline\")\n",
      "    workflow.add_edge(\"validate_punchline\", should_retry)\n",
      "    \n",
      "    # Compile the graph\n",
      "    return workflow.compile()\n",
      "\n",
      "# Main function to run the application\n",
      "def main():\n",
      "    # Build the graph\n",
      "    graph = build_graph()\n",
      "    \n",
      "    # Get user input\n",
      "    topic = input(\"Enter a topic for a joke: \")\n",
      "    \n",
      "    # Invoke the graph\n",
      "    result = graph.invoke({\"topic\": topic})\n",
      "    \n",
      "    # Print the result\n",
      "    print(\"\\n--- Generated Joke ---\")\n",
      "    print(result[\"joke\"])\n",
      "    print(\"\\n--- Validation ---\")\n",
      "    print(result[\"validation_result\"])\n",
      "\n",
      "if __name__ == \"__main__\":\n",
      "    main()\n",
      "```\n",
      "\n",
      "## How to Use\n",
      "\n",
      "1. Save any of these scripts to a Python file (e.g., `joke_generator.py`)\n",
      "2. Make sure you have the necessary packages installed:\n",
      "   ```\n",
      "   pip install langgraph langchain-core langchain-openai\n",
      "   ```\n",
      "3. Set your OpenAI API key either in the script or as an environment variable\n",
      "4. Run the script:\n",
      "   ```\n",
      "   python joke_generator.py\n",
      "   ```\n",
      "5. Enter a topic when prompted and see the generated joke\n",
      "\n",
      "## Explanation\n",
      "\n",
      "This LangGraph application follows your requirements:\n",
      "\n",
      "1. **Input**: Takes a topic from the user.\n",
      "2. **Processing**: Uses a graph with nodes for joke generation and punchline validation.\n",
      "3. **Output**: Returns a formatted joke with \"setup:\" and \"punchline:\" sections.\n",
      "4. **Validation**: Checks if the joke includes both setup and punchline.\n",
      "\n",
      "The enhanced version adds a retry mechanism that will attempt to regenerate the joke up to 3 times if the initial joke doesn't have a proper punchline.\n",
      "\n",
      "This implementation demonstrates LangGraph's ability to create structured workflows for prompting LLMs and validating their outputs.\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "===\n",
      "\n"
     ]
    }
   ],
   "source": [
    "request = \"1. User will pass a topic. 2. Format the joke with 'setup:' and 'punchline:' 3. Just  py script. No UI or deployment. 4. Use OpenAI. 5. Format can be a string.\"\n",
    "print_stream(\n",
    "    app.stream(\n",
    "        {\"messages\": [{\"role\": \"user\", \"content\": request}]}, config, subgraphs=True\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Traces \n",
    "\n",
    "Example planning trace - \n",
    "\n",
    "https://smith.langchain.com/public/7a428dfc-b8b8-4e28-bc05-9508a848082c/r\n",
    "\n",
    "Example researcher trace - \n",
    "\n",
    "https://smith.langchain.com/public/1c9d0e28-56c4-4241-922b-afd614464edd/r\n",
    "\n",
    "### Test Code "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.messages import HumanMessage\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langgraph.graph import END, START, StateGraph\n",
    "from typing_extensions import TypedDict\n",
    "\n",
    "\n",
    "# Define state schema for our joke generator\n",
    "class State(TypedDict):\n",
    "    topic: str  # The topic provided by the user\n",
    "    joke: str  # The generated joke\n",
    "    has_punchline: bool  # Whether the joke has a punchline\n",
    "\n",
    "\n",
    "# Create the nodes for our graph\n",
    "def generate_joke(state: State):\n",
    "    \"\"\"Generate a joke with a punchline based on the provided topic.\"\"\"\n",
    "    # Initialize the LLM\n",
    "    llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
    "\n",
    "    # Create a prompt that explicitly asks for a setup followed by a punchline\n",
    "    prompt = f\"Create a joke about {state['topic']}. Format it with a setup followed by 'punchline:' and then the actual punchline.\"\n",
    "\n",
    "    # Generate the joke\n",
    "    response = llm.invoke([HumanMessage(content=prompt)])\n",
    "\n",
    "    # Return the joke\n",
    "    return {\"joke\": response.content}\n",
    "\n",
    "\n",
    "def check_punchline(state: State):\n",
    "    \"\"\"Check if the joke contains the word 'punchline'.\"\"\"\n",
    "    # Check if the joke contains the word 'punchline'\n",
    "    if \"punchline\" in state[\"joke\"].lower():\n",
    "        return {\"has_punchline\": True}\n",
    "    else:\n",
    "        return {\"has_punchline\": False}\n",
    "\n",
    "\n",
    "# Define conditional edge decision function\n",
    "def decide_next_step(state: State):\n",
    "    \"\"\"Decide whether to end the process.\"\"\"\n",
    "    if state[\"has_punchline\"]:\n",
    "        return \"end\"\n",
    "    else:\n",
    "        # If we were to handle cases without punchlines, we'd route to a fix node\n",
    "        # But per requirements, we'll just end without feedback\n",
    "        return \"end\"\n",
    "\n",
    "\n",
    "# Build the graph\n",
    "def build_graph():\n",
    "    # Create a graph with the defined state schema\n",
    "    graph = StateGraph(State)\n",
    "\n",
    "    # Add nodes\n",
    "    graph.add_node(\"generate_joke\", generate_joke)\n",
    "    graph.add_node(\"check_punchline\", check_punchline)\n",
    "\n",
    "    # Add edges\n",
    "    graph.add_edge(START, \"generate_joke\")\n",
    "    graph.add_edge(\"generate_joke\", \"check_punchline\")\n",
    "    graph.add_conditional_edges(\"check_punchline\", decide_next_step, {\"end\": END})\n",
    "\n",
    "    # Compile the graph\n",
    "    return graph.compile()\n",
    "\n",
    "\n",
    "joke_graph = build_graph()\n",
    "\n",
    "# Invoke the graph with the provided topic\n",
    "result = joke_graph.invoke({\"topic\": \"cats\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'topic': 'cats',\n",
       " 'joke': 'Why are cats bad storytellers?\\n\\nPunchline: Because they always paws in the middle of the sentence.',\n",
       " 'has_punchline': True}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
