{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# ==============================================================================\n",
        "# LangGraph HITL Example 1: Basic Approval Workflow\n",
        "#\n",
        "# Description:\n",
        "# This script demonstrates a simple Human-in-the-Loop (HITL) workflow using\n",
        "# LangGraph. The graph generates a short, creative sentence and then pauses\n",
        "# to ask a human for approval. The graph's execution path changes based on\n",
        "# whether the human input is \"approve\" or \"reject\".\n",
        "#\n",
        "# This example is designed to be run in a Jupyter Notebook or a similar\n",
        "# interactive environment where `input()` can be used.\n",
        "#\n",
        "# Required Libraries:\n",
        "# pip install langchain langchain_core langchain_openai langgraph pytest\n",
        "#\n",
        "# Environment Setup:\n",
        "# You need to have an OpenAI API key set as an environment variable named\n",
        "# `OPENAI_API_KEY`.\n",
        "# ==============================================================================\n",
        "\n",
        "import os\n",
        "import pytest\n",
        "from typing import TypedDict, Annotated, Literal\n",
        "from langchain_openai import ChatOpenAI\n",
        "from langchain_core.messages import BaseMessage, HumanMessage\n",
        "from langgraph.graph import StateGraph, END\n",
        "import uuid\n",
        "import subprocess\n",
        "import sys\n",
        "\n",
        "# --- Set up OpenAI API Key ---\n",
        "# Make sure to set your OpenAI API key as an environment variable.\n",
        "# For example: os.environ[\"OPENAI_API_KEY\"] = \"your_key_here\"\n",
        "if 'OPENAI_API_KEY' not in os.environ:\n",
        "    print('WARNING: OPENAI_API_KEY not found in environment variables. The script may fail.')\n",
        "    # You can uncomment the next line and add your key for quick testing\n",
        "    # os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 1. Problem Statement\n",
        "# ==============================================================================\n",
        "#\n",
        "# The task is to build a simple content creation workflow where an AI generates\n",
        "# a piece of text (e.g., a creative sentence for a marketing campaign), and a\n",
        "# human supervisor must approve or reject it before it's finalized. This ensures\n",
        "# that only human-approved content proceeds to the \"end\" state.\n",
        "#\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 2. Graph State Definition\n",
        "# ==============================================================================\n",
        "#\n",
        "# Define the state that will be passed between the nodes of the graph.\n",
        "# It contains the generated text and the decision from the human.\n",
        "#\n",
        "class BasicGraphState(TypedDict):\n",
        "    \"\"\"\n",
        "    Represents the state of our graph.\n",
        "\n",
        "    Attributes:\n",
        "        generation: The text generated by the language model.\n",
        "        human_decision: The decision made by the human ('approve' or 'reject').\n",
        "    \"\"\"\n",
        "\n",
        "    generation: str\n",
        "    human_decision: Literal['approve', 'reject']\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 3. Graph Nodes and Conditional Logic\n",
        "# ==============================================================================\n",
        "#\n",
        "# Define the functions that will act as nodes in our graph.\n",
        "#\n",
        "\n",
        "# --- Initialize the LLM ---\n",
        "# We'll use OpenAI's gpt-4o-mini as it is fast and capable for this task.\n",
        "llm = ChatOpenAI(model='gpt-4o-mini')\n",
        "\n",
        "\n",
        "def generate_text_node(state: BasicGraphState) -> BasicGraphState:\n",
        "    \"\"\"\n",
        "    Generates a creative sentence using the LLM.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        An updated state with the generated text.\n",
        "    \"\"\"\n",
        "    print('--- Node: Generating Text ---')\n",
        "    try:\n",
        "        response = llm.invoke('Generate a single, short, creative sentence for a new coffee brand.')\n",
        "        generation = response.content\n",
        "        print(f\"   AI Generated: '{generation}'\")\n",
        "        return {'generation': generation}\n",
        "    except Exception as e:\n",
        "        print(f'Error during text generation: {e}')\n",
        "        return {'generation': 'Error: Could not generate text.'}\n",
        "\n",
        "\n",
        "def human_intervention_node(state: BasicGraphState) -> BasicGraphState:\n",
        "    \"\"\"\n",
        "    Pauses the graph and asks for human input.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        An updated state with the human's decision.\n",
        "    \"\"\"\n",
        "    print('\\n--- Node: Human Intervention ---')\n",
        "    print('Please review the generated text.')\n",
        "    print(f\"   Generated Text: '{state['generation']}'\")\n",
        "\n",
        "    decision = ''\n",
        "    # Loop until valid input is received\n",
        "    while decision.lower() not in ['approve', 'reject']:\n",
        "        decision = input('Your decision (approve/reject): ').strip().lower()\n",
        "        if decision.lower() not in ['approve', 'reject']:\n",
        "            print(\"Invalid input. Please enter 'approve' or 'reject'.\")\n",
        "\n",
        "    print(f'   Human Decision: {decision}')\n",
        "    return {'human_decision': decision}\n",
        "\n",
        "\n",
        "def conditional_branch(state: BasicGraphState) -> Literal['approved_end', 'rejected_end']:\n",
        "    \"\"\"\n",
        "    Determines the next step based on the human's decision.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        A string indicating which path to take next.\n",
        "    \"\"\"\n",
        "    print('\\n--- Conditional Branch ---')\n",
        "    decision = state['human_decision']\n",
        "    if decision == 'approve':\n",
        "        print('   Routing to: Approved')\n",
        "        return 'approved_end'\n",
        "    else:\n",
        "        print('   Routing to: Rejected')\n",
        "        return 'rejected_end'\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 4. LangGraph Definition and Execution\n",
        "# ==============================================================================\n",
        "#\n",
        "# Now, we assemble the nodes into a graph.\n",
        "#\n",
        "\n",
        "\n",
        "def build_graph():\n",
        "    \"\"\"Builds and returns the StateGraph.\"\"\"\n",
        "    workflow = StateGraph(BasicGraphState)\n",
        "\n",
        "    # Add nodes to the graph\n",
        "    workflow.add_node('generator', generate_text_node)\n",
        "    workflow.add_node('human_intervention', human_intervention_node)\n",
        "\n",
        "    # Define the graph's flow\n",
        "    workflow.set_entry_point('generator')\n",
        "    workflow.add_edge('generator', 'human_intervention')\n",
        "\n",
        "    # Add the conditional branch\n",
        "    workflow.add_conditional_edges(\n",
        "        'human_intervention',\n",
        "        conditional_branch,\n",
        "        {\n",
        "            'approved_end': END,\n",
        "            'rejected_end': END,\n",
        "        },\n",
        "    )\n",
        "\n",
        "    # Compile the graph into a runnable object\n",
        "    app = workflow.compile()\n",
        "    return app\n",
        "\n",
        "\n",
        "def run_basic_workflow():\n",
        "    \"\"\"Runs the full basic approval workflow.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Basic HITL Approval Workflow   =')\n",
        "    print('==========================================')\n",
        "    app = build_graph()\n",
        "    final_state = app.invoke({})\n",
        "\n",
        "    print('\\n--- Workflow Finished ---')\n",
        "    print('Final State:')\n",
        "    print(f'  - Generated Text: {final_state.get(\"generation\")}')\n",
        "    print(f'  - Final Decision: {final_state.get(\"human_decision\")}')\n",
        "    print('==========================================\\n')\n",
        "    return final_state\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 5. Unit Tests using Pytest\n",
        "# ==============================================================================\n",
        "#\n",
        "# To test this, we'll create a separate file named `test_basic_workflow.py`\n",
        "# and run it with `pytest`. Below is the content for that file.\n",
        "#\n",
        "# We use mocking to simulate human input without requiring actual interaction\n",
        "# during the automated test run.\n",
        "#\n",
        "# To run tests:\n",
        "# 1. Save the code below as `test_basic_workflow.py`.\n",
        "# 2. Run `pytest test_basic_workflow.py` in your terminal.\n",
        "#\n",
        "# --- Content for test_basic_workflow.py ---\n",
        "\n",
        "\n",
        "# test_basic_workflow.py\n",
        "def create_test_file():\n",
        "    test_code = \"\"\"\n",
        "import pytest\n",
        "from unittest.mock import patch\n",
        "from langgraph_hitl_basic import conditional_branch, human_intervention_node\n",
        "\n",
        "# Test the conditional logic directly\n",
        "def test_conditional_branch_approve():\n",
        "    \\\"\\\"\\\"Tests the 'approve' path of the conditional branch.\\\"\\\"\\\"\n",
        "    state = {\"generation\": \"Test text\", \"human_decision\": \"approve\"}\n",
        "    assert conditional_branch(state) == \"approved_end\"\n",
        "\n",
        "def test_conditional_branch_reject():\n",
        "    \\\"\\\"\\\"Tests the 'reject' path of the conditional branch.\\\"\\\"\\\"\n",
        "    state = {\"generation\": \"Test text\", \"human_decision\": \"reject\"}\n",
        "    assert conditional_branch(state) == \"rejected_end\"\n",
        "\n",
        "# Test the human intervention node with mocking\n",
        "@patch('builtins.input', lambda _: 'approve')\n",
        "def test_human_intervention_node_approve():\n",
        "    \\\"\\\"\\\"Tests the human intervention node for an 'approve' input.\\\"\\\"\\\"\n",
        "    state = {\"generation\": \"A fresh take on your morning cup.\"}\n",
        "    result = human_intervention_node(state)\n",
        "    assert result['human_decision'] == 'approve'\n",
        "\n",
        "@patch('builtins.input', lambda _: 'reject')\n",
        "def test_human_intervention_node_reject():\n",
        "    \\\"\\\"\\\"Tests the human intervention node for a 'reject' input.\\\"\\\"\\\"\n",
        "    state = {\"generation\": \"A bold new world of flavor.\"}\n",
        "    result = human_intervention_node(state)\n",
        "    assert result['human_decision'] == 'reject'\n",
        "\n",
        "@patch('builtins.input', side_effect=['invalid', 'approve'])\n",
        "def test_human_intervention_node_invalid_then_approve(mock_input):\n",
        "    \\\"\\\"\\\"Tests robustness against invalid user input.\\\"\\\"\\\"\n",
        "    state = {\"generation\": \"Test text\"}\n",
        "    result = human_intervention_node(state)\n",
        "    assert result['human_decision'] == 'approve'\n",
        "    assert mock_input.call_count == 2\n",
        "\"\"\"\n",
        "    with open('test_basic_workflow.py', 'w') as f:\n",
        "        f.write(test_code)\n",
        "\n",
        "\n",
        "def run_tests():\n",
        "    \"\"\"Creates and runs the pytest file.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Unit Tests                     =')\n",
        "    print('==========================================')\n",
        "    create_test_file()\n",
        "    # Use subprocess to run pytest and capture output\n",
        "    try:\n",
        "        result = subprocess.run(\n",
        "            [sys.executable, '-m', 'pytest', 'test_basic_workflow.py'], capture_output=True, text=True, check=True\n",
        "        )\n",
        "        print(result.stdout)\n",
        "    except subprocess.CalledProcessError as e:\n",
        "        print('Pytest execution failed:')\n",
        "        print(e.stdout)\n",
        "        print(e.stderr)\n",
        "    finally:\n",
        "        # Clean up the test file\n",
        "        if os.path.exists('test_basic_workflow.py'):\n",
        "            os.remove('test_basic_workflow.py')\n",
        "    print('==========================================\\n')\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 6. Main Execution Block\n",
        "# ==============================================================================\n",
        "if __name__ == '__main__':\n",
        "    # Note: To run this interactively in a notebook, you might call the\n",
        "    # functions separately.\n",
        "\n",
        "    # Run the interactive workflow\n",
        "    run_basic_workflow()\n",
        "\n",
        "    # Run the automated tests\n",
        "    run_tests()\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "dFe1wwQDmsnF"
      },
      "outputs": [],
      "source": [
        "# ==============================================================================\n",
        "# LangGraph HITL Example 2: Intermediate Editing Workflow\n",
        "#\n",
        "# Description:\n",
        "# This script builds on the basic example by creating a workflow where an AI\n",
        "# generates a summary of a document. A human is then given the opportunity\n",
        "# to review and edit this summary. The final (potentially edited) summary is\n",
        "# then passed to another AI agent to create a social media post.\n",
        "#\n",
        "# This demonstrates how human input can modify the state that subsequent\n",
        "# nodes in the graph will use.\n",
        "#\n",
        "# Required Libraries:\n",
        "# pip install langchain langchain_core langchain_openai langgraph pytest\n",
        "#\n",
        "# Environment Setup:\n",
        "# Requires an OpenAI API key set as the `OPENAI_API_KEY` environment variable.\n",
        "# ==============================================================================\n",
        "\n",
        "import os\n",
        "import pytest\n",
        "from typing import TypedDict\n",
        "from langchain_openai import ChatOpenAI\n",
        "from langchain_core.messages import SystemMessage\n",
        "from langgraph.graph import StateGraph, END\n",
        "import textwrap\n",
        "import subprocess\n",
        "import sys\n",
        "\n",
        "# --- Set up OpenAI API Key ---\n",
        "if 'OPENAI_API_KEY' not in os.environ:\n",
        "    print('WARNING: OPENAI_API_KEY not found in environment variables. The script may fail.')\n",
        "    # You can uncomment the next line and add your key for quick testing\n",
        "    # os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 1. Problem Statement\n",
        "# ==============================================================================\n",
        "#\n",
        "# The task is to create a semi-automated content pipeline. Given a long piece\n",
        "# of text, the system should first generate a concise summary. A human expert\n",
        "# then reviews this summary for accuracy and nuance, editing it if necessary.\n",
        "# Finally, the system uses this polished summary to generate a ready-to-publish\n",
        "# social media post, ensuring the final output aligns with human quality standards.\n",
        "#\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 2. Graph State Definition\n",
        "# ==============================================================================\n",
        "#\n",
        "# The state now needs to hold the original document, the initial summary,\n",
        "# the edited summary, and the final social media post.\n",
        "#\n",
        "class EditingGraphState(TypedDict):\n",
        "    \"\"\"\n",
        "    Represents the state of our editing graph.\n",
        "\n",
        "    Attributes:\n",
        "        document: The original text to be processed.\n",
        "        initial_summary: The summary generated by the first LLM call.\n",
        "        edited_summary: The summary after human review and potential edits.\n",
        "        social_post: The final social media post generated from the edited summary.\n",
        "    \"\"\"\n",
        "\n",
        "    document: str\n",
        "    initial_summary: str\n",
        "    edited_summary: str\n",
        "    social_post: str\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 3. Graph Nodes Definition\n",
        "# ==============================================================================\n",
        "#\n",
        "# We define three nodes: one to summarize, one for human editing, and one\n",
        "# to create the social media post.\n",
        "#\n",
        "\n",
        "# --- Initialize the LLM ---\n",
        "llm = ChatOpenAI(model='gpt-4o-mini', temperature=0.7)\n",
        "\n",
        "\n",
        "def summarize_document_node(state: EditingGraphState) -> EditingGraphState:\n",
        "    \"\"\"\n",
        "    Generates an initial summary of the document in the state.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        An updated state with the initial summary.\n",
        "    \"\"\"\n",
        "    print('--- Node: Summarizing Document ---')\n",
        "    document = state['document']\n",
        "    prompt = [\n",
        "        SystemMessage(\n",
        "            content='You are an expert summarizer. Create a concise, one-paragraph summary of the following text.'\n",
        "        ),\n",
        "        SystemMessage(content=f'TEXT:\\n\\n{document}'),\n",
        "    ]\n",
        "    try:\n",
        "        response = llm.invoke(prompt)\n",
        "        summary = response.content\n",
        "        print('   AI Generated Summary (Initial):')\n",
        "        print(textwrap.fill(summary, width=80))\n",
        "        return {'initial_summary': summary}\n",
        "    except Exception as e:\n",
        "        print(f'Error during summarization: {e}')\n",
        "        return {'initial_summary': 'Error: Could not generate summary.'}\n",
        "\n",
        "\n",
        "def human_edit_node(state: EditingGraphState) -> EditingGraphState:\n",
        "    \"\"\"\n",
        "    Allows a human to edit the generated summary.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        An updated state with the (potentially) edited summary.\n",
        "    \"\"\"\n",
        "    print('\\n--- Node: Human Editing ---')\n",
        "    initial_summary = state['initial_summary']\n",
        "    print('Please review the summary. You can either accept it by pressing Enter or provide your own edited version.')\n",
        "    print('\\nInitial Summary:')\n",
        "    print(textwrap.fill(initial_summary, width=80))\n",
        "\n",
        "    # Provide a multi-line input prompt\n",
        "    print(\n",
        "        '\\nEnter your edited summary below. Press Ctrl+D (or Ctrl+Z on Windows) when you are done, or just press Enter to accept the original.'\n",
        "    )\n",
        "    edited_lines = []\n",
        "    try:\n",
        "        while True:\n",
        "            line = input()\n",
        "            edited_lines.append(line)\n",
        "    except EOFError:\n",
        "        pass  # This is expected when the user finishes input\n",
        "\n",
        "    edited_summary = '\\n'.join(edited_lines).strip()\n",
        "\n",
        "    if not edited_summary:\n",
        "        print('   No changes made. Using original summary.')\n",
        "        final_summary = initial_summary\n",
        "    else:\n",
        "        print('\\n   Summary has been edited by human.')\n",
        "        final_summary = edited_summary\n",
        "\n",
        "    print('\\nFinal Summary to be used:')\n",
        "    print(textwrap.fill(final_summary, width=80))\n",
        "    return {'edited_summary': final_summary}\n",
        "\n",
        "\n",
        "def generate_social_post_node(state: EditingGraphState) -> EditingGraphState:\n",
        "    \"\"\"\n",
        "    Generates a social media post based on the final, human-approved summary.\n",
        "\n",
        "    Args:\n",
        "        state: The current graph state.\n",
        "\n",
        "    Returns:\n",
        "        An updated state with the generated social media post.\n",
        "    \"\"\"\n",
        "    print('\\n--- Node: Generating Social Media Post ---')\n",
        "    summary_to_use = state['edited_summary']\n",
        "    prompt = [\n",
        "        SystemMessage(\n",
        "            content='You are a social media manager. Based on the following summary, write an engaging and concise Twitter post. Include relevant hashtags.'\n",
        "        ),\n",
        "        SystemMessage(content=f'SUMMARY:\\n\\n{summary_to_use}'),\n",
        "    ]\n",
        "    try:\n",
        "        response = llm.invoke(prompt)\n",
        "        post = response.content\n",
        "        print('   AI Generated Social Post:')\n",
        "        print(textwrap.fill(post, width=80))\n",
        "        return {'social_post': post}\n",
        "    except Exception as e:\n",
        "        print(f'Error during post generation: {e}')\n",
        "        return {'social_post': 'Error: Could not generate social post.'}\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 4. LangGraph Definition and Execution\n",
        "# ==============================================================================\n",
        "\n",
        "\n",
        "def build_graph():\n",
        "    \"\"\"Builds and returns the StateGraph for the editing workflow.\"\"\"\n",
        "    workflow = StateGraph(EditingGraphState)\n",
        "\n",
        "    # Add nodes\n",
        "    workflow.add_node('summarizer', summarize_document_node)\n",
        "    workflow.add_node('human_editor', human_edit_node)\n",
        "    workflow.add_node('social_poster', generate_social_post_node)\n",
        "\n",
        "    # Define the flow (linear in this case)\n",
        "    workflow.set_entry_point('summarizer')\n",
        "    workflow.add_edge('summarizer', 'human_editor')\n",
        "    workflow.add_edge('human_editor', 'social_poster')\n",
        "    workflow.add_edge('social_poster', END)\n",
        "\n",
        "    app = workflow.compile()\n",
        "    return app\n",
        "\n",
        "\n",
        "def run_editing_workflow():\n",
        "    \"\"\"Runs the full intermediate editing workflow.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Intermediate HITL Editing Workflow =')\n",
        "    print('==========================================')\n",
        "\n",
        "    document_text = \"\"\"\n",
        "    Quantum computing represents a fundamental shift in computation. Unlike classical\n",
        "    computers that use bits (0s and 1s), quantum computers use qubits, which can\n",
        "    exist in a superposition of both 0 and 1 simultaneously. This property, along\n",
        "    with another quantum phenomenon called entanglement, allows quantum computers to\n",
        "    explore a vast number of possibilities at once. While still in its early stages,\n",
        "    quantum computing holds the potential to solve complex problems currently\n",
        "    intractable for even the most powerful supercomputers, with applications in\n",
        "    drug discovery, materials science, and financial modeling. However, building\n",
        "    and controlling stable quantum computers is a massive engineering challenge due\n",
        "    to their sensitivity to environmental noise, a phenomenon known as decoherence.\n",
        "    \"\"\"\n",
        "\n",
        "    app = build_graph()\n",
        "    initial_state = {'document': document_text.strip()}\n",
        "    final_state = app.invoke(initial_state)\n",
        "\n",
        "    print('\\n--- Workflow Finished ---')\n",
        "    print('Final State Contains:')\n",
        "    print(f'  - Document: {\"Yes\" if final_state.get(\"document\") else \"No\"}')\n",
        "    print(f'  - Initial Summary: {\"Yes\" if final_state.get(\"initial_summary\") else \"No\"}')\n",
        "    print(f'  - Edited Summary: {\"Yes\" if final_state.get(\"edited_summary\") else \"No\"}')\n",
        "    print(f'  - Social Post: {\"Yes\" if final_state.get(\"social_post\") else \"No\"}')\n",
        "    print('==========================================\\n')\n",
        "    return final_state\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 5. Unit Tests using Pytest\n",
        "# ==============================================================================\n",
        "#\n",
        "# As before, save the code below as `test_editing_workflow.py` and run `pytest`.\n",
        "#\n",
        "# --- Content for test_editing_workflow.py ---\n",
        "\n",
        "\n",
        "def create_test_file():\n",
        "    test_code = \"\"\"\n",
        "import pytest\n",
        "from unittest.mock import patch\n",
        "from langgraph_hitl_intermediate import human_edit_node, generate_social_post_node\n",
        "\n",
        "# Test the human editing node\n",
        "@patch('builtins.input', side_effect=EOFError) # Simulates pressing Enter with no input\n",
        "def test_human_edit_node_no_edit(mock_input):\n",
        "    \\\"\\\"\\\"Tests the case where the human does not edit the summary.\\\"\\\"\\\"\n",
        "    initial_summary = \"This is the original summary.\"\n",
        "    state = {\"initial_summary\": initial_summary}\n",
        "    result = human_edit_node(state)\n",
        "    assert result['edited_summary'] == initial_summary\n",
        "\n",
        "@patch('builtins.input', side_effect=[\"This is the new, better summary.\", EOFError])\n",
        "def test_human_edit_node_with_edit(mock_input):\n",
        "    \\\"\\\"\\\"Tests the case where the human provides an edited summary.\\\"\\\"\\\"\n",
        "    initial_summary = \"This is the original summary.\"\n",
        "    state = {\"initial_summary\": initial_summary}\n",
        "    result = human_edit_node(state)\n",
        "    assert result['edited_summary'] == \"This is the new, better summary.\"\n",
        "\n",
        "# Test the social post generation node\n",
        "# We don't mock the LLM here, but we can test that it uses the correct input field\n",
        "def test_social_post_uses_edited_summary():\n",
        "    \\\"\\\"\\\"Ensures the social post node uses the 'edited_summary' from the state.\\\"\\\"\\\"\n",
        "    # This is more of an integration test for the node's logic\n",
        "    state = {\n",
        "        \"document\": \"doc\",\n",
        "        \"initial_summary\": \"initial\",\n",
        "        # The key piece of information for this node\n",
        "        \"edited_summary\": \"The final, human-approved summary about quantum mechanics.\",\n",
        "        \"social_post\": \"\"\n",
        "    }\n",
        "    # We can't easily assert the LLM's output, but we can confirm the node runs\n",
        "    # and produces some output in the correct key.\n",
        "    result = generate_social_post_node(state)\n",
        "    assert \"social_post\" in result\n",
        "    assert isinstance(result[\"social_post\"], str)\n",
        "    assert len(result[\"social_post\"]) > 0\n",
        "    # A more advanced test would mock the LLM call and verify the prompt\n",
        "\"\"\"\n",
        "    with open('test_editing_workflow.py', 'w') as f:\n",
        "        f.write(test_code)\n",
        "\n",
        "\n",
        "def run_tests():\n",
        "    \"\"\"Creates and runs the pytest file for the editing workflow.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Unit Tests                     =')\n",
        "    print('==========================================')\n",
        "    create_test_file()\n",
        "    try:\n",
        "        result = subprocess.run(\n",
        "            [sys.executable, '-m', 'pytest', 'test_editing_workflow.py'], capture_output=True, text=True, check=True\n",
        "        )\n",
        "        print(result.stdout)\n",
        "    except subprocess.CalledProcessError as e:\n",
        "        print('Pytest execution failed:')\n",
        "        print(e.stdout)\n",
        "        print(e.stderr)\n",
        "    finally:\n",
        "        if os.path.exists('test_editing_workflow.py'):\n",
        "            os.remove('test_editing_workflow.py')\n",
        "    print('==========================================\\n')\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 6. Main Execution Block\n",
        "# ==============================================================================\n",
        "if __name__ == '__main__':\n",
        "    run_editing_workflow()\n",
        "    run_tests()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# ==============================================================================\n",
        "# LangGraph HITL Example 3: Advanced Code Debugging Loop\n",
        "#\n",
        "# Description:\n",
        "# This script implements a sophisticated, cyclical \"human-in-the-loop\" workflow\n",
        "# for code generation and debugging. The process is as follows:\n",
        "# 1. An AI agent generates a Python function based on a problem description.\n",
        "# 2. A second AI agent generates unit tests (`pytest`) for that function.\n",
        "# 3. The system executes the tests against the generated code.\n",
        "# 4. If tests pass, the workflow ends successfully.\n",
        "# 5. If tests fail, the graph pauses for human intervention. The human is shown\n",
        "#    the failing code and the test error, and is asked to provide a fix.\n",
        "# 6. The graph loops back, re-running the tests with the human-corrected code.\n",
        "#\n",
        "# This demonstrates a powerful collaborative pattern between an AI and a human\n",
        "# developer.\n",
        "#\n",
        "# Required Libraries:\n",
        "# pip install langchain langchain_core langchain_openai langgraph pytest\n",
        "#\n",
        "# Environment Setup:\n",
        "# Requires an OpenAI API key set as the `OPENAI_API_KEY` environment variable.\n",
        "# ==============================================================================\n",
        "\n",
        "import os\n",
        "import pytest\n",
        "import subprocess\n",
        "import sys\n",
        "import uuid\n",
        "import textwrap\n",
        "from typing import TypedDict, Annotated, List\n",
        "\n",
        "from langchain_openai import ChatOpenAI\n",
        "from langchain_core.messages import SystemMessage, HumanMessage\n",
        "from langgraph.graph import StateGraph, END\n",
        "\n",
        "# --- Set up OpenAI API Key ---\n",
        "if 'OPENAI_API_KEY' not in os.environ:\n",
        "    print('WARNING: OPENAI_API_KEY not found in environment variables. The script may fail.')\n",
        "    # You can uncomment the next line and add your key for quick testing\n",
        "    # os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
        "\n",
        "# ==============================================================================\n",
        "# 1. Problem Statement\n",
        "# ==============================================================================\n",
        "#\n",
        "# The task is to automate the initial phases of software development. Given a\n",
        "# clear requirement (e.g., \"write a Python function to check if a string is a\n",
        "# palindrome\"), the system should generate both the functional code and the\n",
        "# corresponding unit tests. Crucially, it must then verify its own work. If the\n",
        "# verification fails, it should present the problem to a human developer for\n",
        "# debugging, incorporating the fix into its next attempt. This loop continues\n",
        "# until a verified, working solution is produced.\n",
        "#\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 2. Graph State Definition\n",
        "# ==============================================================================\n",
        "#\n",
        "class CodeGenState(TypedDict):\n",
        "    \"\"\"\n",
        "    Represents the state of our code generation and debugging graph.\n",
        "\n",
        "    Attributes:\n",
        "        problem_statement: The initial requirement for the code.\n",
        "        code: The Python code for the function, generated or human-corrected.\n",
        "        tests: The pytest unit tests for the function.\n",
        "        test_results: The output from the last pytest run.\n",
        "        error: A flag indicating if the last test run failed.\n",
        "        retries: A counter for how many times the loop has run.\n",
        "    \"\"\"\n",
        "\n",
        "    problem_statement: str\n",
        "    code: str\n",
        "    tests: str\n",
        "    test_results: str\n",
        "    error: bool\n",
        "    retries: int\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 3. Graph Nodes and Conditional Logic\n",
        "# ==============================================================================\n",
        "\n",
        "# --- Initialize LLM ---\n",
        "# Using a more capable model is better for code generation.\n",
        "llm = ChatOpenAI(model='gpt-4o', temperature=0.2)\n",
        "\n",
        "\n",
        "def generate_code_node(state: CodeGenState) -> CodeGenState:\n",
        "    \"\"\"Generates the function code based on the problem statement.\"\"\"\n",
        "    print('--- Node: Generating Function Code ---')\n",
        "    prompt = [\n",
        "        SystemMessage(\n",
        "            content='You are a senior Python developer. Write a single Python function that solves the following problem. Do not include any example usage or explanations, just the function definition.'\n",
        "        ),\n",
        "        HumanMessage(content=state['problem_statement']),\n",
        "    ]\n",
        "    try:\n",
        "        response = llm.invoke(prompt)\n",
        "        code = response.content.strip()\n",
        "        # Clean up Markdown code blocks if present\n",
        "        if code.startswith('```python'):\n",
        "            code = code[9:]\n",
        "        if code.endswith('```'):\n",
        "            code = code[:-3]\n",
        "        print('   AI Generated Code:\\n', textwrap.indent(code, '    '))\n",
        "        return {'code': code, 'retries': 0}\n",
        "    except Exception as e:\n",
        "        print(f'Error during code generation: {e}')\n",
        "        return {'code': f'# Error: {e}', 'error': True}\n",
        "\n",
        "\n",
        "def generate_tests_node(state: CodeGenState) -> CodeGenState:\n",
        "    \"\"\"Generates unit tests for the given code.\"\"\"\n",
        "    print('--- Node: Generating Unit Tests ---')\n",
        "    prompt = [\n",
        "        SystemMessage(\n",
        "            content='You are a quality assurance engineer. Write a set of pytest unit tests for the following Python function. Include a variety of edge cases. Do not include any explanations, just the test code. Assume the function is in a file named `solution.py` and can be imported.'\n",
        "        ),\n",
        "        HumanMessage(content=f'Function:\\n{state[\"code\"]}\\n\\nProblem: {state[\"problem_statement\"]}'),\n",
        "    ]\n",
        "    try:\n",
        "        response = llm.invoke(prompt)\n",
        "        tests = response.content.strip()\n",
        "        if tests.startswith('```python'):\n",
        "            tests = tests[9:]\n",
        "        if tests.endswith('```'):\n",
        "            tests = tests[:-3]\n",
        "        print('   AI Generated Tests:\\n', textwrap.indent(tests, '    '))\n",
        "        return {'tests': tests}\n",
        "    except Exception as e:\n",
        "        print(f'Error during test generation: {e}')\n",
        "        return {'tests': f'# Error: {e}', 'error': True}\n",
        "\n",
        "\n",
        "def execute_tests_node(state: CodeGenState) -> CodeGenState:\n",
        "    \"\"\"Saves the code and tests to files and runs pytest.\"\"\"\n",
        "    print('\\n--- Node: Executing Tests ---')\n",
        "\n",
        "    # Error handling for missing code/tests\n",
        "    if not state.get('code') or not state.get('tests'):\n",
        "        print('   ERROR: Code or tests are missing.')\n",
        "        return {'error': True, 'test_results': 'Code or tests not generated.'}\n",
        "\n",
        "    # Create temporary files for the code and tests\n",
        "    solution_filename = 'solution.py'\n",
        "    test_filename = f'test_{uuid.uuid4().hex}.py'\n",
        "\n",
        "    with open(solution_filename, 'w') as f:\n",
        "        f.write(state['code'])\n",
        "    with open(test_filename, 'w') as f:\n",
        "        f.write(state['tests'])\n",
        "\n",
        "    try:\n",
        "        # Execute pytest using a subprocess\n",
        "        result = subprocess.run(\n",
        "            [sys.executable, '-m', 'pytest', test_filename],\n",
        "            capture_output=True,\n",
        "            text=True,\n",
        "            timeout=30,  # Add a timeout for safety\n",
        "        )\n",
        "\n",
        "        if result.returncode == 0:\n",
        "            print('   ✅ Tests Passed!')\n",
        "            return {'error': False, 'test_results': result.stdout}\n",
        "        else:\n",
        "            print('   ❌ Tests Failed!')\n",
        "            return {'error': True, 'test_results': result.stdout + result.stderr}\n",
        "\n",
        "    except subprocess.TimeoutExpired:\n",
        "        print('   ❌ Test execution timed out.')\n",
        "        return {'error': True, 'test_results': 'Test execution timed out.'}\n",
        "    except Exception as e:\n",
        "        print(f'   ❌ An unexpected error occurred during test execution: {e}')\n",
        "        return {'error': True, 'test_results': f'An unexpected error occurred: {e}'}\n",
        "    finally:\n",
        "        # Clean up the created files\n",
        "        for filename in [solution_filename, test_filename]:\n",
        "            if os.path.exists(filename):\n",
        "                os.remove(filename)\n",
        "\n",
        "\n",
        "def human_debug_node(state: CodeGenState) -> CodeGenState:\n",
        "    \"\"\"Allows a human to debug the failing code.\"\"\"\n",
        "    print('\\n--- Node: Human Debugging ---')\n",
        "\n",
        "    # Safety check for max retries\n",
        "    if state['retries'] >= 3:\n",
        "        print('   Max retries reached. Exiting workflow.')\n",
        "        return {'code': state['code'], 'test_results': 'Max retries reached.'}\n",
        "\n",
        "    print('The generated code failed the tests. Please review and provide a fix.')\n",
        "    print('\\n' + '=' * 80)\n",
        "    print('Problem Statement:', state['problem_statement'])\n",
        "    print('-' * 80)\n",
        "    print('Failing Code:\\n')\n",
        "    print(state['code'])\n",
        "    print('-' * 80)\n",
        "    print('Test Results:\\n')\n",
        "    print(state['test_results'])\n",
        "    print('=' * 80 + '\\n')\n",
        "\n",
        "    print('Enter your corrected code below. Press Ctrl+D (or Ctrl+Z on Windows) when you are done.')\n",
        "    edited_lines = []\n",
        "    try:\n",
        "        while True:\n",
        "            line = input()\n",
        "            edited_lines.append(line)\n",
        "    except EOFError:\n",
        "        pass\n",
        "\n",
        "    corrected_code = '\\n'.join(edited_lines).strip()\n",
        "\n",
        "    if not corrected_code:\n",
        "        print('   No changes provided. Aborting workflow.')\n",
        "        return {'code': state['code'], 'test_results': 'Aborted by user.'}\n",
        "\n",
        "    print('\\n   Received corrected code from human.')\n",
        "    return {'code': corrected_code, 'retries': state['retries'] + 1}\n",
        "\n",
        "\n",
        "def decide_next_step(state: CodeGenState) -> Literal['human_debugger', 'end']:\n",
        "    \"\"\"Determines if the loop should continue (debug) or end (success).\"\"\"\n",
        "    print('--- Conditional Branch: Checking Test Results ---')\n",
        "    if state['error']:\n",
        "        print('   Routing to: Human Debugger')\n",
        "        return 'human_debugger'\n",
        "    else:\n",
        "        print('   Routing to: End')\n",
        "        return 'end'\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 4. LangGraph Definition and Execution\n",
        "# ==============================================================================\n",
        "\n",
        "\n",
        "def build_graph():\n",
        "    \"\"\"Builds and returns the StateGraph for the debugging loop.\"\"\"\n",
        "    workflow = StateGraph(CodeGenState)\n",
        "\n",
        "    workflow.add_node('code_generator', generate_code_node)\n",
        "    workflow.add_node('test_generator', generate_tests_node)\n",
        "    workflow.add_node('test_executor', execute_tests_node)\n",
        "    workflow.add_node('human_debugger', human_debug_node)\n",
        "\n",
        "    workflow.set_entry_point('code_generator')\n",
        "    workflow.add_edge('code_generator', 'test_generator')\n",
        "    workflow.add_edge('test_generator', 'test_executor')\n",
        "\n",
        "    workflow.add_conditional_edges('test_executor', decide_next_step, {'human_debugger': 'human_debugger', 'end': END})\n",
        "\n",
        "    # The crucial loop back to the test executor\n",
        "    workflow.add_edge('human_debugger', 'test_executor')\n",
        "\n",
        "    app = workflow.compile()\n",
        "    return app\n",
        "\n",
        "\n",
        "def run_debugging_workflow():\n",
        "    \"\"\"Runs the full advanced debugging workflow.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Advanced HITL Debugging Workflow =')\n",
        "    print('==========================================')\n",
        "\n",
        "    problem = 'Write a Python function `is_palindrome(s: str) -> bool` that checks if a string is a palindrome, ignoring case and non-alphanumeric characters.'\n",
        "\n",
        "    app = build_graph()\n",
        "    initial_state = {'problem_statement': problem}\n",
        "\n",
        "    # The AI might get this right on the first try, or it might fail,\n",
        "    # which would trigger the human loop.\n",
        "\n",
        "    final_state = app.invoke(initial_state)\n",
        "\n",
        "    print('\\n--- Workflow Finished ---')\n",
        "    if not final_state['error']:\n",
        "        print('✅ Successfully generated and verified the code.')\n",
        "        print('\\nFinal Code:\\n', final_state['code'])\n",
        "    else:\n",
        "        print('❌ Workflow finished with an error or was aborted.')\n",
        "        print('\\nLast State:\\n', final_state)\n",
        "    print('==========================================\\n')\n",
        "    return final_state\n",
        "\n",
        "\n",
        "def run_tests():\n",
        "    \"\"\"Creates and runs the pytest file for the debugging workflow.\"\"\"\n",
        "    print('==========================================')\n",
        "    print('= Running Unit Tests                     =')\n",
        "    print('==========================================')\n",
        "    create_test_file()\n",
        "    try:\n",
        "        result = subprocess.run(\n",
        "            [sys.executable, '-m', 'pytest', 'test_debugging_workflow.py'], capture_output=True, text=True, check=True\n",
        "        )\n",
        "        print(result.stdout)\n",
        "    except subprocess.CalledProcessError as e:\n",
        "        print('Pytest execution failed:')\n",
        "        print(e.stdout)\n",
        "        print(e.stderr)\n",
        "    finally:\n",
        "        if os.path.exists('test_debugging_workflow.py'):\n",
        "            os.remove('test_debugging_workflow.py')\n",
        "    print('==========================================\\n')\n",
        "\n",
        "\n",
        "# ==============================================================================\n",
        "# 6. Main Execution Block\n",
        "# ==============================================================================\n",
        "if __name__ == '__main__':\n",
        "    run_debugging_workflow()\n",
        "    run_tests()\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {},
      "outputs": [
        {
          "ename": "ValueError",
          "evalue": "Must provide state_schema or input and output",
          "output_type": "error",
          "traceback": [
            "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)",
            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 27\u001b[39m\n\u001b[32m     23\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m state\n\u001b[32m     26\u001b[39m \u001b[38;5;66;03m# Build the graph\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m27\u001b[39m graph = \u001b[43mStateGraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     28\u001b[39m graph.add_node(\u001b[33m'\u001b[39m\u001b[33mgenerate\u001b[39m\u001b[33m'\u001b[39m, generate_text_node)\n\u001b[32m     29\u001b[39m graph.add_node(\u001b[33m'\u001b[39m\u001b[33mapprove\u001b[39m\u001b[33m'\u001b[39m, approval_node)\n",
            "\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/py312/lib/python3.12/site-packages/langgraph/graph/state.py:190\u001b[39m, in \u001b[36mStateGraph.__init__\u001b[39m\u001b[34m(self, state_schema, config_schema, input, output)\u001b[39m\n\u001b[32m    188\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m state_schema \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m    189\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28minput\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m output \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m190\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33m\"\u001b[39m\u001b[33mMust provide state_schema or input and output\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m    191\u001b[39m     state_schema = \u001b[38;5;28minput\u001b[39m\n\u001b[32m    192\u001b[39m     warnings.warn(\n\u001b[32m    193\u001b[39m         \u001b[33m\"\u001b[39m\u001b[33mInitializing StateGraph without state_schema is deprecated. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m    194\u001b[39m         \u001b[33m\"\u001b[39m\u001b[33mPlease pass in an explicit state_schema instead of just an input and output schema.\u001b[39m\u001b[33m\"\u001b[39m,\n\u001b[32m    195\u001b[39m         LangGraphDeprecationWarning,\n\u001b[32m    196\u001b[39m         stacklevel=\u001b[32m2\u001b[39m,\n\u001b[32m    197\u001b[39m     )\n",
            "\u001b[31mValueError\u001b[39m: Must provide state_schema or input and output"
          ]
        }
      ],
      "source": [
        "from langchain.chat_models import ChatOpenAI\n",
        "from langchain.schema import HumanMessage\n",
        "from langgraph.graph import StateGraph, START, END\n",
        "\n",
        "\n",
        "# Node 1: Generate text using the LLM\n",
        "def generate_text_node(state):\n",
        "    llm = ChatOpenAI(temperature=0)\n",
        "    resp = llm.invoke([HumanMessage(content='Generate a friendly greeting.')])\n",
        "    state['generated_text'] = resp.content.strip()\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 2: Ask human to approve or reject\n",
        "def approval_node(state):\n",
        "    print(f'\\nGenerated text: “{state[\"generated_text\"]}”')\n",
        "    while True:\n",
        "        ans = input('Approve this text? (y/n): ').strip().lower()\n",
        "        if ans in ('y', 'n'):\n",
        "            break\n",
        "        print(\"Please enter 'y' or 'n'.\")\n",
        "    state['approved'] = ans == 'y'\n",
        "    return state\n",
        "\n",
        "\n",
        "# Build the graph\n",
        "graph = StateGraph()\n",
        "graph.add_node('generate', generate_text_node)\n",
        "graph.add_node('approve', approval_node)\n",
        "graph.add_node(END, lambda s: s)\n",
        "graph.add_edge(START, 'generate')\n",
        "graph.add_edge('generate', 'approve')\n",
        "# loop or finish\n",
        "graph.add_edge('approve', END, condition=lambda s: s['approved'])\n",
        "graph.add_edge('approve', 'generate', condition=lambda s: not s['approved'])\n",
        "\n",
        "# Execute\n",
        "result = graph.invoke({})\n",
        "print('\\nFinal state:', result)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# In [1]: Imports\n",
        "from langchain.chat_models import ChatOpenAI\n",
        "from langchain.schema import HumanMessage\n",
        "from langgraph.graph import StateGraph, START, END\n",
        "\n",
        "# Sample document\n",
        "DOCUMENT = \"\"\"\n",
        "Jupyter Notebook is an open-source web application that allows you to create and share documents\n",
        "containing live code, equations, visualizations, and narrative text.\n",
        "\"\"\"\n",
        "\n",
        "\n",
        "# Node 1: Summarize document\n",
        "def summarize_node(state):\n",
        "    llm = ChatOpenAI(temperature=0)\n",
        "    prompt = f'Summarize the following text:\\n\\n{state[\"document\"]}'\n",
        "    resp = llm.invoke([HumanMessage(content=prompt)])\n",
        "    state['summary'] = resp.content.strip()\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 2: Let human edit the summary\n",
        "def human_edit_node(state):\n",
        "    print('\\nGenerated summary:')\n",
        "    print(state['summary'])\n",
        "    while True:\n",
        "        edited = input('\\nEdit summary for accuracy:\\n').strip()\n",
        "        if edited:\n",
        "            break\n",
        "        print('Summary cannot be empty.')\n",
        "    state['edited_summary'] = edited\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 3: Generate keywords from edited summary\n",
        "def keywords_node(state):\n",
        "    llm = ChatOpenAI(temperature=0)\n",
        "    prompt = f'Extract three comma-separated keywords from this summary:\\n\\n{state[\"edited_summary\"]}'\n",
        "    resp = llm.invoke([HumanMessage(content=prompt)])\n",
        "    state['keywords'] = [w.strip() for w in resp.content.split(',') if w.strip()]\n",
        "    return state\n",
        "\n",
        "\n",
        "# Build and run the graph\n",
        "graph = StateGraph()\n",
        "graph.add_node('summarize', summarize_node)\n",
        "graph.add_node('edit', human_edit_node)\n",
        "graph.add_node('keywords', keywords_node)\n",
        "graph.add_node(END, lambda s: s)\n",
        "\n",
        "graph.add_edge(START, 'summarize')\n",
        "graph.add_edge('summarize', 'edit')\n",
        "graph.add_edge('edit', 'keywords')\n",
        "graph.add_edge('keywords', END)\n",
        "\n",
        "result = graph.invoke({'document': DOCUMENT})\n",
        "print('\\nFinal state:', result)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# In [1]: Imports\n",
        "import subprocess, os\n",
        "from langchain.chat_models import ChatOpenAI\n",
        "from langchain.schema import HumanMessage\n",
        "from langgraph.graph import StateGraph, START, END\n",
        "\n",
        "# Template for our test suite\n",
        "TEST_TEMPLATE = \"\"\"\n",
        "import generated_code\n",
        "def test_add():\n",
        "    assert generated_code.add(2, 3) == 5\n",
        "\"\"\"\n",
        "\n",
        "\n",
        "# Node 1: Generate code\n",
        "def generate_code_node(state):\n",
        "    llm = ChatOpenAI(temperature=0)\n",
        "    prompt = 'Write a Python module named generated_code.py that defines a function add(a, b) returning their sum.'\n",
        "    resp = llm.invoke([HumanMessage(content=prompt)])\n",
        "    state['code'] = resp.content\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 2: Write code & tests to disk\n",
        "def write_code_node(state):\n",
        "    with open('generated_code.py', 'w') as f:\n",
        "        f.write(state['code'])\n",
        "    with open('test_generated_code.py', 'w') as f:\n",
        "        f.write(TEST_TEMPLATE)\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 3: Run pytest\n",
        "def run_tests_node(state):\n",
        "    result = subprocess.run(['pytest', '-q', 'test_generated_code.py'], capture_output=True, text=True)\n",
        "    state['tests_passed'] = result.returncode == 0\n",
        "    state['pytest_output'] = result.stdout + result.stderr\n",
        "    return state\n",
        "\n",
        "\n",
        "# Node 4: Human debugging if tests fail\n",
        "def human_debug_node(state):\n",
        "    if not state['tests_passed']:\n",
        "        print('\\nPytest output:\\n', state['pytest_output'])\n",
        "        corrected = input('Tests failed. Please provide corrected code for generated_code.py:\\n')\n",
        "        state['code'] = corrected\n",
        "    return state\n",
        "\n",
        "\n",
        "# Build looping graph\n",
        "graph = StateGraph()\n",
        "graph.add_node('gen_code', generate_code_node)\n",
        "graph.add_node('write_code', write_code_node)\n",
        "graph.add_node('run_tests', run_tests_node)\n",
        "graph.add_node('debug', human_debug_node)\n",
        "graph.add_node(END, lambda s: s)\n",
        "\n",
        "graph.add_edge(START, 'gen_code')\n",
        "graph.add_edge('gen_code', 'write_code')\n",
        "graph.add_edge('write_code', 'run_tests')\n",
        "graph.add_edge('run_tests', END, condition=lambda s: s['tests_passed'])\n",
        "graph.add_edge('run_tests', 'debug', condition=lambda s: not s['tests_passed'])\n",
        "graph.add_edge('debug', 'write_code')\n",
        "\n",
        "# Execute\n",
        "final_state = graph.invoke({})\n",
        "print('\\nFinal result – tests passed?', final_state['tests_passed'])\n"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "py312",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.12.9"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
