{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# `run` function examples with event processing\n",
    "\n",
    "AG2's `run` function natively provides an iterator that allows you to step through the events and easily integrate with frontends or other systems.\n",
    "\n",
    "This notebook provides a set of simple code examples that show how to use the `run` function and iterate through events.\n",
    "\n",
    "If you want the *original* console output experience, you can use the `RunResponse` iterator's `process` method."
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Two agent chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Chat between two comedian agents\n",
    "\n",
    "# 1. Import our agent class\n",
    "from autogen import ConversableAgent, LLMConfig\n",
    "from autogen.io.run_response import Cost, RunResponseProtocol\n",
    "\n",
    "# 2. Define our LLM configuration for OpenAI's GPT-4o mini,\n",
    "#    uses the OPENAI_API_KEY environment variable\n",
    "# llm_config = LLMConfig(config_list={\"api_type\": \"openai\", \"model\": \"gpt-5-nano\"})\n",
    "llm_config = LLMConfig.from_json(path=\"OAI_CONFIG_LIST\").where(model=\"gpt-5-nano\")\n",
    "print(f\"Using LLM: {llm_config}\")\n",
    "\n",
    "# 3. Create our agents who will tell each other jokes,\n",
    "#    with Jack ending the chat when Emma says FINISH\n",
    "jack = ConversableAgent(\n",
    "    \"Jack\",\n",
    "    system_message=(\"Your name is Jack and you are a comedian in a two-person comedy show.\"),\n",
    "    is_termination_msg=lambda x: \"FINISH\" in x[\"content\"],\n",
    "    llm_config=llm_config,\n",
    ")\n",
    "emma = ConversableAgent(\n",
    "    \"Emma\",\n",
    "    system_message=(\n",
    "        \"Your name is Emma and you are a comedian \"\n",
    "        \"in a two-person comedy show. Say the word FINISH \"\n",
    "        \"ONLY AFTER you've heard 2 of Jack's jokes.\"\n",
    "    ),\n",
    "    llm_config=llm_config,\n",
    ")\n",
    "\n",
    "# 4. Run the chat\n",
    "response: RunResponseProtocol = jack.run(\n",
    "    emma, message=\"Emma, tell me a joke about goldfish and peanut butter.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "for event in response.events:\n",
    "    print(event)\n",
    "\n",
    "    if event.type == \"input_request\":\n",
    "        event.content.respond(\"exit\")\n",
    "\n",
    "print(f\"{response.summary=}\")\n",
    "print(f\"{response.messages=}\")\n",
    "print(f\"{response.events=}\")\n",
    "print(f\"{response.context_variables=}\")\n",
    "print(f\"{response.last_speaker=}\")\n",
    "print(f\"{response.cost=}\")\n",
    "assert response.last_speaker in [\"Jack\", \"Emma\"], \"Last speaker should be one of the agents\"\n",
    "assert isinstance(response.cost, Cost)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## With console processor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Chat between two comedian agents\n",
    "\n",
    "# 1. Import console event processor\n",
    "\n",
    "# 2. Create our agents who will tell each other jokes,\n",
    "#    with Jack ending the chat when Emma says FINISH\n",
    "jack = ConversableAgent(\n",
    "    \"Jack\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\"Your name is Jack and you are a comedian in a two-person comedy show.\"),\n",
    "    is_termination_msg=lambda x: \"FINISH\" in x[\"content\"],\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "emma = ConversableAgent(\n",
    "    \"Emma\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\n",
    "        \"Your name is Emma and you are a comedian \"\n",
    "        \"in a two-person comedy show. Say the word FINISH \"\n",
    "        \"ONLY AFTER you've heard 2 of Jack's jokes.\"\n",
    "    ),\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "\n",
    "# 3. Run the chat\n",
    "response = jack.run(\n",
    "    emma, message=\"Emma, tell me a joke about goldfish and peanut butter.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "response.process()\n",
    "\n",
    "assert response.last_speaker in [\"Jack\", \"Emma\"], \"Last speaker should be one of the agents\"\n",
    "assert response.summary is not None, \"Summary should not be None\"\n",
    "assert len(response.messages) > 0, \"Messages should not be empty\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Single agent run\n",
    "\n",
    "It creates a user proxy agent automatically."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Create our LLM agent\n",
    "my_agent = ConversableAgent(\n",
    "    name=\"helpful_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"You are a poetic AI assistant, respond in rhyme.\",\n",
    ")\n",
    "\n",
    "# 2. Run the agent with a prompt\n",
    "response = my_agent.run(\n",
    "    message=\"In one sentence, what's the big deal about AI?\", max_turns=1, summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "response.process()\n",
    "print(f\"{response.summary=}\")\n",
    "print(f\"{response.messages=}\")\n",
    "print(f\"{response.last_speaker=}\")\n",
    "\n",
    "assert response.summary is not None, \"Summary should not be None\"\n",
    "assert len(response.messages) == 2, \"Messages should not be empty\"\n",
    "assert response.last_speaker == \"helpful_agent\", \"Last speaker should be an agent\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Group chat run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Group chat amongst agents to create a 4th grade lesson plan\n",
    "# Flow determined by Group Chat Manager automatically, and\n",
    "# should be Teacher > Planner > Reviewer > Teacher (repeats if necessary)\n",
    "\n",
    "# 1. Import our agent and group chat classes\n",
    "from autogen import GroupChat, GroupChatManager\n",
    "\n",
    "# Planner agent setup\n",
    "planner_message = \"Create lesson plans for 4th grade. Use format: <title>, <learning_objectives>, <script>\"\n",
    "planner = ConversableAgent(\n",
    "    name=\"planner_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=planner_message,\n",
    "    description=\"Creates lesson plans\",\n",
    ")\n",
    "\n",
    "# Reviewer agent setup\n",
    "reviewer_message = \"Review lesson plans against 4th grade curriculum. Provide max 3 changes.\"\n",
    "reviewer = ConversableAgent(\n",
    "    name=\"reviewer_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=reviewer_message,\n",
    "    description=\"Reviews lesson plans\",\n",
    ")\n",
    "\n",
    "# Teacher agent setup\n",
    "teacher_message = \"Choose topics and work with planner and reviewer. Say DONE! when finished.\"\n",
    "teacher = ConversableAgent(\n",
    "    name=\"teacher_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=teacher_message,\n",
    ")\n",
    "\n",
    "# Setup group chat\n",
    "groupchat = GroupChat(agents=[teacher, planner, reviewer], speaker_selection_method=\"auto\", messages=[])\n",
    "\n",
    "# Create manager\n",
    "# At each turn, the manager will check if the message contains DONE! and end the chat if so\n",
    "# Otherwise, it will select the next appropriate agent using its LLM\n",
    "manager = GroupChatManager(\n",
    "    name=\"group_manager\",\n",
    "    groupchat=groupchat,\n",
    "    llm_config=llm_config,\n",
    "    is_termination_msg=lambda x: \"DONE!\" in (x.get(\"content\", \"\") or \"\").upper(),\n",
    ")\n",
    "\n",
    "# Start the conversation\n",
    "response = teacher.run(\n",
    "    recipient=manager, message=\"Let's teach the kids about the solar system.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "response.process()\n",
    "\n",
    "print(f\"{response.summary=}\")\n",
    "print(f\"{response.messages=}\")\n",
    "print(f\"{response.last_speaker=}\")\n",
    "\n",
    "assert response.summary is not None, \"Summary should not be None\"\n",
    "assert len(response.messages) > 0, \"Messages should not be empty\"\n",
    "assert response.last_speaker in [\"teacher_agent\", \"planner_agent\", \"reviewer_agent\"], (\n",
    "    \"Last speaker should be one of the agents\"\n",
    ")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Swarm chat run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from autogen import run_swarm\n",
    "from autogen.agentchat.contrib.swarm_agent import AfterWorkOption\n",
    "\n",
    "# 1. Create our agents\n",
    "planner_message = \"\"\"You are a classroom lesson planner.\n",
    "Given a topic, write a lesson plan for a fourth grade class.\n",
    "If you are given revision feedback, update your lesson plan and record it.\n",
    "Use the following format:\n",
    "<title>Lesson plan title</title>\n",
    "<learning_objectives>Key learning objectives</learning_objectives>\n",
    "<script>How to introduce the topic to the kids</script>\n",
    "\"\"\"\n",
    "\n",
    "reviewer_message = \"\"\"You are a classroom lesson reviewer.\n",
    "You compare the lesson plan to the fourth grade curriculum\n",
    "and provide a maximum of 3 recommended changes for each review.\n",
    "Make sure you provide recommendations each time the plan is updated.\n",
    "\"\"\"\n",
    "\n",
    "teacher_message = \"\"\"You are a classroom teacher.\n",
    "You decide topics for lessons and work with a lesson planner.\n",
    "and reviewer to create and finalise lesson plans.\n",
    "\"\"\"\n",
    "\n",
    "lesson_planner = ConversableAgent(\n",
    "    name=\"planner_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=planner_message,\n",
    ")\n",
    "\n",
    "lesson_reviewer = ConversableAgent(\n",
    "    name=\"reviewer_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=reviewer_message,\n",
    ")\n",
    "\n",
    "teacher = ConversableAgent(\n",
    "    name=\"teacher_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=teacher_message,\n",
    ")\n",
    "\n",
    "# 2. Initiate the swarm chat using a swarm manager who will\n",
    "# select agents automatically\n",
    "response = run_swarm(\n",
    "    initial_agent=teacher,\n",
    "    agents=[lesson_planner, lesson_reviewer, teacher],\n",
    "    messages=\"Today, let's introduce our kids to the solar system.\",\n",
    "    max_rounds=10,\n",
    "    swarm_manager_args={\"llm_config\": llm_config},\n",
    "    after_work=AfterWorkOption.SWARM_MANAGER,\n",
    ")\n",
    "\n",
    "response.process()\n",
    "\n",
    "# for events in response.events:\n",
    "#     if events.type == \"input_request\":\n",
    "#         events.content.respond(\"exit\")\n",
    "\n",
    "print(f\"{response.summary=}\")\n",
    "print(f\"{response.messages=}\")\n",
    "print(f\"{response.last_speaker=}\")\n",
    "\n",
    "assert response.summary is not None, \"Summary should not be None\"\n",
    "assert len(response.messages) > 0, \"Messages should not be empty\"\n",
    "assert response.last_speaker in [\"teacher_agent\", \"planner_agent\", \"reviewer_agent\"], (\n",
    "    \"Last speaker should be one of the agents\"\n",
    ")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sequential run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from autogen.agentchat.user_proxy_agent import UserProxyAgent\n",
    "\n",
    "financial_tasks = [\n",
    "    \"\"\"What are the current stock prices of NVDA and TESLA, and how is the performance over the past month in terms of percentage change?\"\"\",\n",
    "    \"\"\"Investigate possible reasons of the stock performance.\"\"\",\n",
    "]\n",
    "\n",
    "writing_tasks = [\"\"\"Develop an engaging blog post using any information provided.\"\"\"]\n",
    "\n",
    "financial_assistant = ConversableAgent(\n",
    "    name=\"Financial_assistant\",\n",
    "    system_message=\"You are a financial assistant, helping with stock market analysis. Reply 'TERMINATE' when financial tasks are done.\",\n",
    "    llm_config=llm_config,\n",
    ")\n",
    "research_assistant = ConversableAgent(\n",
    "    name=\"Researcher\",\n",
    "    system_message=\"You are a research assistant, helping with stock market analysis. Reply 'TERMINATE' when research tasks are done.\",\n",
    "    llm_config=llm_config,\n",
    ")\n",
    "writer = ConversableAgent(\n",
    "    name=\"writer\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"\"\"\n",
    "        You are a professional writer, known for\n",
    "        your insightful and engaging articles.\n",
    "        You transform complex concepts into compelling narratives.\n",
    "        Reply \"TERMINATE\" in the end when everything is done.\n",
    "        \"\"\",\n",
    ")\n",
    "\n",
    "user = UserProxyAgent(\n",
    "    name=\"User\",\n",
    "    human_input_mode=\"NEVER\",\n",
    "    is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n",
    "    code_execution_config={\n",
    "        \"last_n_messages\": 1,\n",
    "        \"work_dir\": \"tasks\",\n",
    "        \"use_docker\": False,\n",
    "    },\n",
    ")\n",
    "\n",
    "responses = user.sequential_run([\n",
    "    {\n",
    "        \"chat_id\": 1,\n",
    "        \"recipient\": financial_assistant,\n",
    "        \"message\": financial_tasks[0],\n",
    "        \"silent\": False,\n",
    "        \"summary_method\": \"reflection_with_llm\",\n",
    "    },\n",
    "    {\n",
    "        \"chat_id\": 2,\n",
    "        \"prerequisites\": [1],\n",
    "        \"recipient\": research_assistant,\n",
    "        \"message\": financial_tasks[1],\n",
    "        \"silent\": False,\n",
    "        \"summary_method\": \"reflection_with_llm\",\n",
    "    },\n",
    "    {\"chat_id\": 3, \"prerequisites\": [1, 2], \"recipient\": writer, \"silent\": False, \"message\": writing_tasks[0]},\n",
    "])\n",
    "\n",
    "for response in responses:\n",
    "    response.process()\n",
    "\n",
    "for response in responses:\n",
    "    assert len(response.messages) > 0, \"Messages should not be empty\"\n",
    "    assert response.last_speaker in [\"Financial_assistant\", \"Researcher\", \"writer\", \"User\"], (\n",
    "        \"Last speaker should be one of the agents\"\n",
    "    )\n",
    "    assert response.summary is not None, \"Summary should not be None\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Async cases"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Two agent chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Chat between two comedian agents\n",
    "\n",
    "# 1. Import our agent class\n",
    "from autogen import ConversableAgent\n",
    "from autogen.io.run_response import AsyncRunResponseProtocol\n",
    "\n",
    "# 2. Define our LLM configuration for OpenAI's GPT-4o mini,\n",
    "#    uses the OPENAI_API_KEY environment variable\n",
    "# llm_config = LLMConfig(config_list={\"api_type\": \"openai\", \"model\": \"gpt-5-nano\"})\n",
    "\n",
    "# 3. Create our agents who will tell each other jokes,\n",
    "#    with Jack ending the chat when Emma says FINISH\n",
    "jack = ConversableAgent(\n",
    "    \"Jack\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\"Your name is Jack and you are a comedian in a two-person comedy show.\"),\n",
    "    is_termination_msg=lambda x: \"FINISH\" in x[\"content\"],\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "emma = ConversableAgent(\n",
    "    \"Emma\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\n",
    "        \"Your name is Emma and you are a comedian \"\n",
    "        \"in a two-person comedy show. Say the word FINISH \"\n",
    "        \"ONLY AFTER you've heard 2 of Jack's jokes.\"\n",
    "    ),\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "\n",
    "# 4. Run the chat\n",
    "response: AsyncRunResponseProtocol = await jack.a_run(\n",
    "    emma, message=\"Emma, tell me a joke about goldfish and peanut butter.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "async for event in response.events:\n",
    "    print(event)\n",
    "\n",
    "    if event.type == \"input_request\":\n",
    "        await event.content.respond(input())\n",
    "\n",
    "print(f\"{await response.summary=}\")\n",
    "print(f\"{await response.messages=}\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## With console processor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Chat between two comedian agents\n",
    "\n",
    "# 1. Import our agent class\n",
    "from autogen import ConversableAgent\n",
    "from autogen.io.run_response import AsyncRunResponseProtocol\n",
    "\n",
    "# 2. Define our LLM configuration for OpenAI's GPT-4o mini,\n",
    "#    uses the OPENAI_API_KEY environment variable\n",
    "# llm_config = LLMConfig(config_list={\"api_type\": \"openai\", \"model\": \"gpt-5-nano\"})\n",
    "\n",
    "# 3. Create our agents who will tell each other jokes,\n",
    "#    with Jack ending the chat when Emma says FINISH\n",
    "jack = ConversableAgent(\n",
    "    \"Jack\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\"Your name is Jack and you are a comedian in a two-person comedy show.\"),\n",
    "    is_termination_msg=lambda x: \"FINISH\" in x[\"content\"],\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "emma = ConversableAgent(\n",
    "    \"Emma\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=(\n",
    "        \"Your name is Emma and you are a comedian \"\n",
    "        \"in a two-person comedy show. Say the word FINISH \"\n",
    "        \"ONLY AFTER you've heard 2 of Jack's jokes.\"\n",
    "    ),\n",
    "    human_input_mode=\"NEVER\",\n",
    ")\n",
    "\n",
    "# 4. Run the chat\n",
    "response: AsyncRunResponseProtocol = await jack.a_run(\n",
    "    emma, message=\"Emma, tell me a joke about goldfish and peanut butter.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "await response.process()\n",
    "\n",
    "print(f\"{await response.summary=}\")\n",
    "print(f\"{await response.messages=}\")\n",
    "\n",
    "assert await response.last_speaker in [\"Jack\", \"Emma\"], \"Last speaker should be one of the agents\"\n",
    "assert await response.summary is not None, \"Summary should not be None\"\n",
    "assert len(await response.messages) > 0, \"Messages should not be empty\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Group chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Group chat amongst agents to create a 4th grade lesson plan\n",
    "# Flow determined by Group Chat Manager automatically, and\n",
    "# should be Teacher > Planner > Reviewer > Teacher (repeats if necessary)\n",
    "\n",
    "# 1. Import our agent and group chat classes\n",
    "from autogen import GroupChat, GroupChatManager\n",
    "\n",
    "# Planner agent setup\n",
    "planner_message = \"Create lesson plans for 4th grade. Use format: <title>, <learning_objectives>, <script>\"\n",
    "planner = ConversableAgent(\n",
    "    name=\"planner_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=planner_message,\n",
    "    description=\"Creates lesson plans\",\n",
    ")\n",
    "\n",
    "# Reviewer agent setup\n",
    "reviewer_message = \"Review lesson plans against 4th grade curriculum. Provide max 3 changes.\"\n",
    "reviewer = ConversableAgent(\n",
    "    name=\"reviewer_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=reviewer_message,\n",
    "    description=\"Reviews lesson plans\",\n",
    ")\n",
    "\n",
    "# Teacher agent setup\n",
    "teacher_message = \"Choose topics and work with planner and reviewer. Say DONE! when finished.\"\n",
    "teacher = ConversableAgent(\n",
    "    name=\"teacher_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=teacher_message,\n",
    ")\n",
    "\n",
    "# Setup group chat\n",
    "groupchat = GroupChat(agents=[teacher, planner, reviewer], speaker_selection_method=\"auto\", messages=[])\n",
    "\n",
    "# Create manager\n",
    "# At each turn, the manager will check if the message contains DONE! and end the chat if so\n",
    "# Otherwise, it will select the next appropriate agent using its LLM\n",
    "manager = GroupChatManager(\n",
    "    name=\"group_manager\",\n",
    "    groupchat=groupchat,\n",
    "    llm_config=llm_config,\n",
    "    is_termination_msg=lambda x: \"DONE!\" in (x.get(\"content\", \"\") or \"\").upper(),\n",
    ")\n",
    "\n",
    "# Start the conversation\n",
    "response = await teacher.a_run(\n",
    "    recipient=manager, message=\"Let's teach the kids about the solar system.\", summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "await response.process()\n",
    "\n",
    "assert await response.summary is not None, \"Summary should not be None\"\n",
    "assert len(await response.messages) > 0, \"Messages should not be empty\"\n",
    "assert await response.last_speaker in [\"teacher_agent\", \"planner_agent\", \"reviewer_agent\"], (\n",
    "    \"Last speaker should be one of the agents\"\n",
    ")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Swarm chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from autogen import a_run_swarm\n",
    "from autogen.agentchat.contrib.swarm_agent import AfterWorkOption\n",
    "\n",
    "# 1. Create our agents\n",
    "planner_message = \"\"\"You are a classroom lesson planner.\n",
    "Given a topic, write a lesson plan for a fourth grade class.\n",
    "If you are given revision feedback, update your lesson plan and record it.\n",
    "Use the following format:\n",
    "<title>Lesson plan title</title>\n",
    "<learning_objectives>Key learning objectives</learning_objectives>\n",
    "<script>How to introduce the topic to the kids</script>\n",
    "\"\"\"\n",
    "\n",
    "reviewer_message = \"\"\"You are a classroom lesson reviewer.\n",
    "You compare the lesson plan to the fourth grade curriculum\n",
    "and provide a maximum of 3 recommended changes for each review.\n",
    "Make sure you provide recommendations each time the plan is updated.\n",
    "\"\"\"\n",
    "\n",
    "teacher_message = \"\"\"You are a classroom teacher.\n",
    "You decide topics for lessons and work with a lesson planner.\n",
    "and reviewer to create and finalise lesson plans.\n",
    "\"\"\"\n",
    "\n",
    "lesson_planner = ConversableAgent(\n",
    "    name=\"planner_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=planner_message,\n",
    ")\n",
    "\n",
    "lesson_reviewer = ConversableAgent(\n",
    "    name=\"reviewer_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=reviewer_message,\n",
    ")\n",
    "\n",
    "teacher = ConversableAgent(\n",
    "    name=\"teacher_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=teacher_message,\n",
    ")\n",
    "\n",
    "# 2. Initiate the swarm chat using a swarm manager who will\n",
    "# select agents automatically\n",
    "response = await a_run_swarm(\n",
    "    initial_agent=teacher,\n",
    "    agents=[lesson_planner, lesson_reviewer, teacher],\n",
    "    messages=\"Today, let's introduce our kids to the solar system.\",\n",
    "    max_rounds=10,\n",
    "    swarm_manager_args={\"llm_config\": llm_config},\n",
    "    after_work=AfterWorkOption.SWARM_MANAGER,\n",
    ")\n",
    "\n",
    "# await response.process()\n",
    "\n",
    "async for event in response.events:\n",
    "    if event.type == \"input_request\":\n",
    "        await event.content.respond(\"exit\")\n",
    "\n",
    "assert await response.summary is not None, \"Summary should not be None\"\n",
    "assert len(await response.messages) > 0, \"Messages should not be empty\"\n",
    "assert await response.last_speaker in [\"teacher_agent\", \"planner_agent\", \"reviewer_agent\"], (\n",
    "    \"Last speaker should be one of the agents\"\n",
    ")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Single agent run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Create our LLM agent\n",
    "my_agent = ConversableAgent(\n",
    "    name=\"helpful_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"You are a poetic AI assistant, respond in rhyme.\",\n",
    ")\n",
    "\n",
    "# 2. Run the agent with a prompt\n",
    "response = await my_agent.a_run(\n",
    "    message=\"In one sentence, what's the big deal about AI?\", max_turns=1, summary_method=\"reflection_with_llm\"\n",
    ")\n",
    "\n",
    "await response.process()\n",
    "print(f\"{await response.summary=}\")\n",
    "print(f\"{await response.messages=}\")\n",
    "print(f\"{await response.last_speaker=}\")\n",
    "\n",
    "assert await response.summary is not None, \"Summary should not be None\"\n",
    "assert len(await response.messages) == 2, \"Messages should not be empty\"\n",
    "assert await response.last_speaker == \"helpful_agent\", \"Last speaker should be an agent\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sequential run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "financial_tasks = [\n",
    "    \"\"\"What are the current stock prices of NVDA and TESLA, and how is the performance over the past month in terms of percentage change?\"\"\",\n",
    "    \"\"\"Investigate possible reasons of the stock performance.\"\"\",\n",
    "]\n",
    "\n",
    "writing_tasks = [\"\"\"Develop an engaging blog post using any information provided.\"\"\"]\n",
    "\n",
    "financial_assistant = ConversableAgent(\n",
    "    name=\"Financial_assistant\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"You are a financial assistant, helping with stock market analysis. Reply 'TERMINATE' when financial tasks are done.\",\n",
    ")\n",
    "research_assistant = ConversableAgent(\n",
    "    name=\"Researcher\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"You are a research assistant, helping with stock market analysis. Reply 'TERMINATE' when research tasks are done.\",\n",
    ")\n",
    "writer = ConversableAgent(\n",
    "    name=\"writer\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"\"\"\n",
    "        You are a professional writer, known for\n",
    "        your insightful and engaging articles.\n",
    "        You transform complex concepts into compelling narratives.\n",
    "        Reply \"TERMINATE\" in the end when everything is done.\n",
    "        \"\"\",\n",
    ")\n",
    "\n",
    "user = UserProxyAgent(\n",
    "    name=\"User\",\n",
    "    human_input_mode=\"NEVER\",\n",
    "    is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n",
    "    code_execution_config={\n",
    "        \"last_n_messages\": 1,\n",
    "        \"work_dir\": \"tasks\",\n",
    "        \"use_docker\": False,\n",
    "    },\n",
    ")\n",
    "\n",
    "responses = await user.a_sequential_run([\n",
    "    {\n",
    "        \"chat_id\": 1,\n",
    "        \"recipient\": financial_assistant,\n",
    "        \"message\": financial_tasks[0],\n",
    "        \"silent\": False,\n",
    "        \"summary_method\": \"reflection_with_llm\",\n",
    "    },\n",
    "    {\n",
    "        \"chat_id\": 2,\n",
    "        \"prerequisites\": [1],\n",
    "        \"recipient\": research_assistant,\n",
    "        \"message\": financial_tasks[1],\n",
    "        \"silent\": False,\n",
    "        \"summary_method\": \"reflection_with_llm\",\n",
    "    },\n",
    "    {\"chat_id\": 3, \"prerequisites\": [1, 2], \"recipient\": writer, \"silent\": False, \"message\": writing_tasks[0]},\n",
    "])\n",
    "\n",
    "for response in responses:\n",
    "    async for event in response.events:\n",
    "        print(event)\n",
    "        if event.type == \"input_request\":\n",
    "            await event.content.respond(\"exit\")\n",
    "\n",
    "    assert len(await response.messages) > 0, \"Messages should not be empty\"\n",
    "    assert await response.last_speaker in [\"Financial_assistant\", \"Researcher\", \"writer\", \"User\"], (\n",
    "        \"Last speaker should be one of the agents\"\n",
    "    )\n",
    "    assert await response.summary is not None, \"Summary should not be None\""
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Group chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from autogen.agentchat.group.patterns.auto import AutoPattern\n",
    "\n",
    "triage_agent = ConversableAgent(\n",
    "    name=\"triage_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"\"\"You are a triage agent. For each user query,\n",
    "    identify whether it is a technical issue or a general question. Route\n",
    "    technical issues to the tech agent and general questions to the general agent.\n",
    "    Do not provide suggestions or answers, only route the query.\"\"\",\n",
    ")\n",
    "\n",
    "tech_agent = ConversableAgent(\n",
    "    name=\"tech_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"\"\"You solve technical problems like software bugs\n",
    "    and hardware issues.\"\"\",\n",
    ")\n",
    "\n",
    "general_agent = ConversableAgent(\n",
    "    name=\"general_agent\",\n",
    "    llm_config=llm_config,\n",
    "    system_message=\"You handle general, non-technical support questions.\",\n",
    ")\n",
    "\n",
    "user = ConversableAgent(name=\"user\", human_input_mode=\"ALWAYS\")\n",
    "\n",
    "pattern = AutoPattern(\n",
    "    initial_agent=triage_agent,\n",
    "    agents=[triage_agent, tech_agent, general_agent],\n",
    "    user_agent=user,\n",
    "    group_manager_args={\"llm_config\": llm_config},\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Run the chat\n",
    "\n",
    "from autogen.agentchat import run_group_chat\n",
    "from autogen.io.run_response import Cost\n",
    "\n",
    "response = run_group_chat(\n",
    "    pattern=pattern, messages=\"My laptop keeps shutting down randomly. Can you help?\", max_rounds=15\n",
    ")\n",
    "\n",
    "for event in response.events:\n",
    "    print(event)\n",
    "    if event.type == \"input_request\":\n",
    "        event.content.respond(\"exit\")\n",
    "\n",
    "assert response.summary is not None, \"Summary should not be None\"\n",
    "assert len(response.messages) > 0, \"Messages should not be empty\"\n",
    "assert response.last_speaker in [\"tech_agent\", \"general_agent\", \"triage_agent\"]\n",
    "assert isinstance(response.cost, Cost)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Run the chat\n",
    "\n",
    "from autogen.agentchat import a_run_group_chat\n",
    "\n",
    "response = await a_run_group_chat(\n",
    "    pattern=pattern, messages=\"My laptop keeps shutting down randomly. Can you help?\", max_rounds=15\n",
    ")\n",
    "\n",
    "async for event in response.events:\n",
    "    print(event)\n",
    "    if event.type == \"input_request\":\n",
    "        await event.content.respond(\"exit\")\n",
    "\n",
    "assert await response.summary is not None, \"Summary should not be None\"\n",
    "assert len(await response.messages) > 0, \"Messages should not be empty\"\n",
    "assert await response.last_speaker in [\"tech_agent\", \"general_agent\", \"triage_agent\"]\n",
    "assert isinstance(await response.cost, Cost)"
   ]
  }
 ],
 "metadata": {
  "front_matter": {
   "description": "Using run and event processing",
   "tags": [
    "run",
    "event-processing",
    "integrate",
    "frontend"
   ]
  },
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
