{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Subgraphs\n",
    "\n",
    "Useful in creating multi agent systems.\n",
    "\n",
    "There are 2 ways how to utilize subgraphs:\n",
    "\n",
    "## 1. Register subgraph as a node\n",
    "\n",
    "```\n",
    "builder.add_node(\"sub_graph\", graph_builder.compile())\n",
    "```\n",
    "\n",
    "## 2. Invoke a subgraph from a node\n",
    "\n",
    "```\n",
    "def sub_graph_node(state: State) -> State:\n",
    "    sub_graph.invoke({\"question\": \"should i invest in AI stocks now\"})\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.messages import HumanMessage, SystemMessage\n",
    "from IPython.display import Image, display\n",
    "from typing_extensions import TypedDict\n",
    "from typing import Annotated, List\n",
    "import operator\n",
    "from langgraph.graph import StateGraph, START, END\n",
    "\n",
    "from langchain_community.document_loaders import WikipediaLoader\n",
    "from langchain_community.tools import TavilySearchResults\n",
    "\n",
    "\n",
    "from langchain_openai import ChatOpenAI\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![Subgraphs](images/subgraphs.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Optimized Web Search Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_community.tools import TavilySearchResults\n",
    "\n",
    "class OptimizedWebSearchState(TypedDict):\n",
    "    question: List[str]\n",
    "    classification: List[str]\n",
    "    optimized_web_request: str\n",
    "    documents: List[str]\n",
    "\n",
    "\n",
    "def optimize_request(state: OptimizedWebSearchState):\n",
    "    system_message = SystemMessage(content=(\"\"\"\n",
    "        You are a query optimization assistant.  \n",
    "        You receive two inputs:  \n",
    "            - `category`: The predefined category of the query.  \n",
    "            - `user_request`: The original user query.  \n",
    "\n",
    "        Your task is to refine user queries for better search results while ensuring \n",
    "        relevance to the given category. Keep the original intent but improve clarity, \n",
    "        specificity, and structure.  \n",
    "\n",
    "        - **Do not return a short, vague phrase.** The query must be structured for search engines to retrieve high-quality results.  \n",
    "        - **Convert questions into search-friendly statements.** Do not return a question.  \n",
    "        - **Ensure the query is fact-based**—remove subjective or vague phrasing like “should I,” “is it a good idea,” or “viable option.”  \n",
    "        - **Use the category to improve relevance** but do not assume unnecessary details.  \n",
    "        - **Never add a specific year unless the user explicitly includes one.** If a time reference is needed, use ‘now’ instead.  \n",
    "        - **If the query is broad, narrow it down** by adding relevant **context** (e.g., key factors, market trends, risks, industries).  \n",
    "        - **Avoid single-word or ultra-short responses.** The optimized query must be **concise but meaningful**.  \n",
    "\n",
    "        Return **only the optimized query as plain text**, with no explanations, formatting, or extra text—just the improved query.\n",
    "    \"\"\"))\n",
    "\n",
    "    human_message = HumanMessage(content=(f\"\"\"\n",
    "        category: {state[\"classification\"][0]}\n",
    "\n",
    "        user_request: {state[\"question\"][0]}\n",
    "    \"\"\"))\n",
    "    \n",
    "    optimized_web_request = llm.invoke([system_message, human_message])\n",
    "\n",
    "    return {\"optimized_web_request\": optimized_web_request.content}\n",
    "\n",
    "\n",
    "def search_web(state: OptimizedWebSearchState):\n",
    "    tavily_search = TavilySearchResults(max_results=10)\n",
    "    search_docs = tavily_search.invoke(state['optimized_web_request'])\n",
    "\n",
    "    # Keywords that indicate an error or access restriction\n",
    "    forbidden_keywords = [\n",
    "        \"403 Forbidden\", \"Access denied\", \"CAPTCHA\", \n",
    "        \"has been denied\", \"not authorized\", \"verify you are a human\"\n",
    "    ]\n",
    "\n",
    "    results = [\n",
    "        f'<Document url=\"{doc[\"url\"]}\">\\n{doc[\"content\"]}\\n</Document>'\n",
    "        for doc in search_docs\n",
    "        if not any(keyword.lower() in doc[\"content\"].lower() for keyword in forbidden_keywords)\n",
    "    ]\n",
    "\n",
    "    return {\"documents\": results}\n",
    "\n",
    "\n",
    "optimized_web_search_builder = StateGraph(OptimizedWebSearchState)\n",
    "optimized_web_search_builder.add_node(optimize_request)\n",
    "optimized_web_search_builder.add_node(search_web)\n",
    "\n",
    "optimized_web_search_builder.add_edge(START, \"optimize_request\")\n",
    "optimized_web_search_builder.add_edge(\"optimize_request\", \"search_web\")\n",
    "optimized_web_search_builder.add_edge(\"search_web\", END)\n",
    "\n",
    "graph = optimized_web_search_builder.compile()\n",
    "\n",
    "display(Image(graph.get_graph().draw_mermaid_png()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = graph.invoke({\n",
    "    \"question\": [\"should i invest in AI stocks now\"],\n",
    "    \"classification\": [\"Investment Advice\"]\n",
    "})\n",
    "result"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Structured Wikipedia Lookup Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_community.document_loaders import WikipediaLoader\n",
    "\n",
    "class StructuredWikipediaLookupState(TypedDict):\n",
    "    question: List[str]\n",
    "    classification: List[str]\n",
    "    optimized_structured_wikipedia: str\n",
    "    documents: List[str]\n",
    "\n",
    "\n",
    "def optimize_request(state: StructuredWikipediaLookupState):\n",
    "    system_message = SystemMessage(content=(\"\"\"\n",
    "        You optimize queries for searching Wikipedia articles.  \n",
    "        You receive two inputs: \n",
    "            - `category`: The predefined category of the query. \n",
    "            - `user_request`: The original user query. \n",
    "\n",
    "        Use the **category** to refine the query while ensuring it aligns with Wikipedia's factual, encyclopedia-style content.  \n",
    "\n",
    "            - **Never frame the query as a question.** Convert it into a Wikipedia-style article title.  \n",
    "            - **Avoid subjective, speculative, or opinion-based wording.** Focus on factual, well-defined topics.  \n",
    "            - **Ensure the optimized query is informative and structured as an encyclopedia entry.**  \n",
    "            - **Use the category to guide query refinement** but do **not assume** or limit responses to a specific domain.  \n",
    "            - **If the category suggests an advisory or decision-making topic (e.g., \"Investment Advice\"), shift the focus to established concepts, historical context, or factual analysis.**  \n",
    "\n",
    "        Return **only the optimized query as plain text**, with no explanations, formatting, or extra text—just the improved query.\n",
    "    \"\"\"))\n",
    "\n",
    "    human_message = HumanMessage(content=(f\"\"\"\n",
    "        category: {state[\"classification\"][0]}\n",
    "\n",
    "        user_request: {state[\"question\"][0]}\n",
    "    \"\"\"))\n",
    "    \n",
    "    optimized_structured_wikipedia = llm.invoke([system_message, human_message])\n",
    "\n",
    "    return {\"optimized_structured_wikipedia\": optimized_structured_wikipedia.content}\n",
    "\n",
    "\n",
    "def search_wikipedia(state: StructuredWikipediaLookupState):\n",
    "    search_docs = WikipediaLoader(query=state['optimized_structured_wikipedia'], load_max_docs=10).load()\n",
    "\n",
    "    results = [\n",
    "        f'<Document url=\"{doc.metadata[\"source\"]}\">\\n{doc.page_content}\\n</Document>'\n",
    "        for doc in search_docs\n",
    "    ]\n",
    "    \n",
    "    return {\"documents\": results} \n",
    "\n",
    "\n",
    "structured_wikipedia_lookup_builder = StateGraph(StructuredWikipediaLookupState)\n",
    "structured_wikipedia_lookup_builder.add_node(optimize_request)\n",
    "structured_wikipedia_lookup_builder.add_node(search_wikipedia)\n",
    "\n",
    "structured_wikipedia_lookup_builder.add_edge(START, \"optimize_request\")\n",
    "structured_wikipedia_lookup_builder.add_edge(\"optimize_request\", \"search_wikipedia\")\n",
    "structured_wikipedia_lookup_builder.add_edge(\"search_wikipedia\", END)\n",
    "\n",
    "graph = structured_wikipedia_lookup_builder.compile()\n",
    "\n",
    "display(Image(structured_wikipedia_lookup.get_graph().draw_mermaid_png()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = graph.invoke({\n",
    "    \"question\": [\"should i invest in AI stocks now\"],\n",
    "    \"classification\": [\"Investment Advice\"]\n",
    "})\n",
    "result"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Entire Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import operator\n",
    "\n",
    "class EntireGraphState(TypedDict):\n",
    "    question: Annotated[list, operator.add]\n",
    "    classification: Annotated[list, operator.add]\n",
    "    documents: Annotated[list, operator.add]\n",
    "    answer: str\n",
    "\n",
    "\n",
    "def query_classification(state: EntireGraphState):\n",
    "    system_message = SystemMessage(content=(\"\"\"\n",
    "        You are a query classification assistant. Your task is to analyze user queries and assign \n",
    "        them to the most relevant category. Do not force-fit queries into predefined categories—only \n",
    "        use an existing category if it is a clear match. Otherwise, create a new category that \n",
    "        accurately represents the topic.\n",
    "\t        - Assign a category based on the core subject of the query.\n",
    "\t        - If the query clearly fits an existing category, use it.\n",
    "\t        - If no existing category is a perfect match, create a new descriptive category.\n",
    "\t        - Do not return ‘unknown’—always generate a meaningful category name.\n",
    "\t    \n",
    "        Return only the category name as plain text, without explanations or extra text.\n",
    "    \"\"\"))\n",
    "\n",
    "    classification = llm.invoke([system_message, HumanMessage(content=state[\"question\"][0])])\n",
    "\n",
    "    return {\"classification\": [classification.content]}\n",
    "\n",
    "\n",
    "def generate_answer(state: EntireGraphState):\n",
    "    # System message\n",
    "    system_message = SystemMessage(content=(\"\"\"\n",
    "        You are an AI assistant that answers questions based on the provided documents.  \n",
    "\n",
    "        ### **Guidelines:**  \n",
    "            - Provide **direct, concise, and accurate** answers.  \n",
    "            - When possible, **cite the relevant document or URL** next to the information used.  \n",
    "            - If multiple documents contain relevant information, **synthesize the best answer** while ensuring clarity.  \n",
    "            - If a document contains **conflicting information**, mention both perspectives.  \n",
    "            - **When using a specific document for a prediction or key statement, include its source URL immediately after the referenced information** so the user can verify details.\n",
    "    \"\"\"))\n",
    "\n",
    "\n",
    "    formatted_docs = \"\\n\".join(\n",
    "        [f\"- {doc}\" for doc in state[\"documents\"]]\n",
    "    )\n",
    "\n",
    "    system_context = SystemMessage(content=(f\"Use the following documents as context for your response:\\n\\n{formatted_docs}\"))\n",
    "\n",
    "    answer = llm.invoke([system_message, system_context, HumanMessage(content=state[\"question\"][0])])\n",
    "    \n",
    "    # Append it to state\n",
    "    return {\"answer\": answer}\n",
    "\n",
    "\n",
    "builder = StateGraph(EntireGraphState)\n",
    "builder.add_node(query_classification)\n",
    "builder.add_node(\"optimized_web_search\", optimized_web_search_builder.compile())\n",
    "builder.add_node(\"structured_wikipedia_lookup\", structured_wikipedia_lookup_builder.compile())\n",
    "builder.add_node(generate_answer)\n",
    "\n",
    "builder.add_edge(START, \"query_classification\")\n",
    "builder.add_edge(\"query_classification\", \"optimized_web_search\")\n",
    "builder.add_edge(\"query_classification\", \"structured_wikipedia_lookup\")\n",
    "builder.add_edge(\"optimized_web_search\", \"generate_answer\")\n",
    "builder.add_edge(\"structured_wikipedia_lookup\", \"generate_answer\")\n",
    "builder.add_edge(\"generate_answer\", END)\n",
    "\n",
    "graph = builder.compile()\n",
    "\n",
    "display(Image(graph.get_graph(xray=1).draw_mermaid_png()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = graph.invoke({\"question\": [\"should i invest in AI stocks now\"]})\n",
    "result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(result[\"answer\"].content)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
