{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "source": [
        "<a href=\"https://colab.research.google.com/drive/19yl6LcECtk657I1d7iLYjeM4R8k4cCUc?usp=sharing\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>"
      ],
      "metadata": {
        "id": "zwRar1gDIyvj"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Graph of Thoughts (GoT)"
      ],
      "metadata": {
        "id": "c2eB7zRfOFsi"
      }
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "id": "X8uAQvWYIvQu"
      },
      "outputs": [],
      "source": [
        "!pip install -qU google-generativeai"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import google.generativeai as genai\n",
        "import getpass"
      ],
      "metadata": {
        "id": "UmQRi9plOOR3"
      },
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "Get free-tier Google's Gemini API Key here: https://aistudio.google.com/app/apikey"
      ],
      "metadata": {
        "id": "AdNzA05TOUz2"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "API_KEY = getpass.getpass(\"Enter your Google API key: \")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "kQI9HdmxOW_9",
        "outputId": "b3db17d0-1fdc-4565-fe0e-46ce16d326f5"
      },
      "execution_count": 3,
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Enter your Google API key: ··········\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "genai.configure(api_key=API_KEY)"
      ],
      "metadata": {
        "id": "MyFelyMeObUm"
      },
      "execution_count": 5,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class ThoughtNode:\n",
        "    def __init__(self, content, node_id):\n",
        "        self.id = node_id\n",
        "        self.content = content\n",
        "        self.score = 0.0\n",
        "        self.predecessors = []  # Can have multiple parents (graph, not tree)\n",
        "        self.successors = []    # Can have multiple children\n",
        "\n",
        "    def add_successor(self, node):\n",
        "        if node not in self.successors:\n",
        "            self.successors.append(node)\n",
        "            node.predecessors.append(self)\n",
        "\n",
        "class GoTAgent:\n",
        "    def __init__(self):\n",
        "        self.model = genai.GenerativeModel(\"gemini-2.0-flash-exp\")\n",
        "        self.nodes = {}\n",
        "        self.node_counter = 0\n",
        "\n",
        "    def create_node(self, content):\n",
        "        \"\"\"Create a new thought node\"\"\"\n",
        "        node_id = f\"N{self.node_counter}\"\n",
        "        self.node_counter += 1\n",
        "        node = ThoughtNode(content, node_id)\n",
        "        self.nodes[node_id] = node\n",
        "        return node\n",
        "\n",
        "    def generate(self, problem, context_nodes, num_thoughts=3):\n",
        "        \"\"\"Generate new candidate thoughts\"\"\"\n",
        "        context = \"\\n\".join([f\"- {n.content}\" for n in context_nodes])\n",
        "\n",
        "        prompt = f\"\"\"Problem: {problem}\n",
        "\n",
        "        Current thoughts:\n",
        "        {context}\n",
        "\n",
        "        Generate {num_thoughts} different next ideas or reasoning steps.\n",
        "        List them numbered:\"\"\"\n",
        "\n",
        "        response = self.model.generate_content(prompt).text\n",
        "\n",
        "        # Parse thoughts\n",
        "        thoughts = []\n",
        "        for line in response.split(\"\\n\"):\n",
        "            line = line.strip()\n",
        "            if line and (line[0].isdigit() or line.startswith(\"-\")):\n",
        "                thought = line.lstrip(\"0123456789.-) \").strip()\n",
        "                if thought and len(thought) > 10:\n",
        "                    thoughts.append(thought)\n",
        "\n",
        "        return thoughts[:num_thoughts]\n",
        "\n",
        "    def score(self, problem, thought):\n",
        "        \"\"\"Score a thought's quality (0-10)\"\"\"\n",
        "        prompt = f\"\"\"Problem: {problem}\n",
        "\n",
        "        Thought: {thought}\n",
        "\n",
        "        Rate this thought (0-10) based on:\n",
        "        - Relevance to problem\n",
        "        - Logical soundness\n",
        "        - Potential to lead to solution\n",
        "\n",
        "        Score (just number):\"\"\"\n",
        "\n",
        "        response = self.model.generate_content(prompt).text\n",
        "\n",
        "        try:\n",
        "            score = float(response.strip().split()[0])\n",
        "            return min(max(score / 10, 0), 1)\n",
        "        except:\n",
        "            return 0.5\n",
        "\n",
        "    def aggregate(self, problem, nodes):\n",
        "        \"\"\"Combine multiple thoughts into one\"\"\"\n",
        "        thoughts = \"\\n\".join([f\"{i+1}. {n.content}\" for i, n in enumerate(nodes)])\n",
        "\n",
        "        prompt = f\"\"\"Problem: {problem}\n",
        "\n",
        "        Multiple thoughts to combine:\n",
        "        {thoughts}\n",
        "\n",
        "        Synthesize these into one coherent, stronger thought:\"\"\"\n",
        "\n",
        "        response = self.model.generate_content(prompt).text\n",
        "        return response.strip()\n",
        "\n",
        "    def refine(self, problem, node):\n",
        "        \"\"\"Improve a single thought\"\"\"\n",
        "        prompt = f\"\"\"Problem: {problem}\n",
        "\n",
        "        Current thought: {node.content}\n",
        "\n",
        "        Refine and improve this thought:\"\"\"\n",
        "\n",
        "        response = self.model.generate_content(prompt).text\n",
        "        return response.strip()\n",
        "\n",
        "    def solve(self, problem, max_iterations=4, branch_factor=2):\n",
        "        \"\"\"Solve using Graph of Thoughts\"\"\"\n",
        "        print(f\"\\n{'='*60}\")\n",
        "        print(f\"🕸️  Graph of Thoughts\")\n",
        "        print(f\"{'='*60}\")\n",
        "        print(f\"Problem: {problem}\\n\")\n",
        "\n",
        "        # Initialize with root node\n",
        "        root = self.create_node(\"Starting to analyze the problem\")\n",
        "        root.score = 1.0\n",
        "\n",
        "        active_nodes = [root]\n",
        "        all_paths = []\n",
        "\n",
        "        for iteration in range(max_iterations):\n",
        "            print(f\"{'─'*60}\")\n",
        "            print(f\"ITERATION {iteration + 1}\")\n",
        "            print(f\"{'─'*60}\\n\")\n",
        "\n",
        "            new_active = []\n",
        "\n",
        "            # GENERATE: Create new thoughts from active nodes\n",
        "            print(\"🌱 Generating new thoughts...\")\n",
        "            for node in active_nodes:\n",
        "                thoughts = self.generate(problem, [node], branch_factor)\n",
        "\n",
        "                for thought in thoughts:\n",
        "                    new_node = self.create_node(thought)\n",
        "                    node.add_successor(new_node)\n",
        "\n",
        "                    # SCORE: Evaluate thought\n",
        "                    new_node.score = self.score(problem, thought)\n",
        "\n",
        "                    print(f\"  {new_node.id}: [Score: {new_node.score:.2f}] {thought[:60]}...\")\n",
        "\n",
        "                    if new_node.score > 0.4:\n",
        "                        new_active.append(new_node)\n",
        "\n",
        "            print()\n",
        "\n",
        "            # AGGREGATE: Merge promising parallel thoughts\n",
        "            if len(new_active) >= 2:\n",
        "                print(\"🔗 Aggregating thoughts...\")\n",
        "                # Take top 2 nodes to merge\n",
        "                sorted_nodes = sorted(new_active, key=lambda n: n.score, reverse=True)\n",
        "                to_merge = sorted_nodes[:2]\n",
        "\n",
        "                merged_content = self.aggregate(problem, to_merge)\n",
        "                merged_node = self.create_node(merged_content)\n",
        "\n",
        "                # Connect to both predecessors (graph structure!)\n",
        "                for node in to_merge:\n",
        "                    node.add_successor(merged_node)\n",
        "\n",
        "                merged_node.score = self.score(problem, merged_content)\n",
        "                print(f\"  {merged_node.id}: [Score: {merged_node.score:.2f}] {merged_content[:60]}...\")\n",
        "                print()\n",
        "\n",
        "                new_active.append(merged_node)\n",
        "\n",
        "            # REFINE: Improve best thought\n",
        "            if new_active:\n",
        "                print(\"✨ Refining best thought...\")\n",
        "                best_node = max(new_active, key=lambda n: n.score)\n",
        "\n",
        "                refined_content = self.refine(problem, best_node)\n",
        "                refined_node = self.create_node(refined_content)\n",
        "                best_node.add_successor(refined_node)\n",
        "\n",
        "                refined_node.score = self.score(problem, refined_content)\n",
        "                print(f\"  {refined_node.id}: [Score: {refined_node.score:.2f}] {refined_content[:60]}...\")\n",
        "                print()\n",
        "\n",
        "                new_active.append(refined_node)\n",
        "\n",
        "            # Keep top nodes for next iteration\n",
        "            active_nodes = sorted(new_active, key=lambda n: n.score, reverse=True)[:3]\n",
        "\n",
        "            # Track paths\n",
        "            for node in active_nodes:\n",
        "                path = self._get_path_to_node(node)\n",
        "                all_paths.append((node, path, node.score))\n",
        "\n",
        "        # Find best path\n",
        "        best_node, best_path, best_score = max(all_paths, key=lambda x: x[2])\n",
        "\n",
        "        print(f\"{'='*60}\")\n",
        "        print(f\"🏆 BEST REASONING PATH\")\n",
        "        print(f\"{'='*60}\")\n",
        "        for i, node in enumerate(best_path):\n",
        "            print(f\"{i}. [{node.id}, Score: {node.score:.2f}] {node.content}\")\n",
        "        print()\n",
        "\n",
        "        # Generate final answer\n",
        "        path_text = \"\\n\".join([f\"{i+1}. {n.content}\" for i, n in enumerate(best_path)])\n",
        "\n",
        "        final_prompt = f\"\"\"Problem: {problem}\n",
        "\n",
        "        Reasoning path:\n",
        "        {path_text}\n",
        "\n",
        "        Provide final answer:\"\"\"\n",
        "\n",
        "        final_answer = self.model.generate_content(final_prompt).text\n",
        "\n",
        "        print(f\"{'='*60}\")\n",
        "        print(f\"💡 FINAL ANSWER\")\n",
        "        print(f\"{'='*60}\")\n",
        "        print(final_answer)\n",
        "        print()\n",
        "\n",
        "        self._visualize_graph()\n",
        "\n",
        "        return final_answer\n",
        "\n",
        "    def _get_path_to_node(self, node):\n",
        "        \"\"\"Get one path from root to node (BFS)\"\"\"\n",
        "        # Simple path - just track backwards through first predecessor\n",
        "        path = []\n",
        "        current = node\n",
        "        while current:\n",
        "            path.append(current)\n",
        "            current = current.predecessors[0] if current.predecessors else None\n",
        "        return list(reversed(path))\n",
        "\n",
        "    def _visualize_graph(self):\n",
        "        \"\"\"Show graph structure\"\"\"\n",
        "        print(f\"{'='*60}\")\n",
        "        print(f\"📊 GRAPH STRUCTURE\")\n",
        "        print(f\"{'='*60}\")\n",
        "        print(f\"Total nodes: {len(self.nodes)}\")\n",
        "        print(f\"Connections:\")\n",
        "        for node_id, node in self.nodes.items():\n",
        "            if node.successors:\n",
        "                successors = \", \".join([n.id for n in node.successors])\n",
        "                print(f\"  {node_id} → {successors}\")\n",
        "        print()"
      ],
      "metadata": {
        "id": "FJ14dP2rOdgt"
      },
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Example 1: Document Merging\n",
        "print(\"=\"*60)\n",
        "print(\"EXAMPLE 1: Document Merging\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "got1 = GoTAgent()\n",
        "got1.solve(\n",
        "    \"Merge insights from three reports: Report A says 'sales up 20%', \"\n",
        "    \"Report B says 'customer satisfaction at 4.2/5', Report C says 'costs increased 15%'. \"\n",
        "    \"What's the overall business health?\",\n",
        "    max_iterations=3,\n",
        "    branch_factor=2\n",
        ")\n",
        "\n",
        "\n",
        "# Example 2: Sorting with Rationale\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"EXAMPLE 2: Sorting with Rationale\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "got2 = GoTAgent()\n",
        "got2.solve(\n",
        "    \"Sort these priorities for a startup: A) Customer acquisition, B) Product development, \"\n",
        "    \"C) Fundraising, D) Team building. Consider dependencies and timing.\",\n",
        "    max_iterations=3,\n",
        "    branch_factor=2\n",
        ")\n",
        "\n",
        "\n",
        "# Example 3: Complex Reasoning\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"EXAMPLE 3: Complex Multi-Path Reasoning\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "got3 = GoTAgent()\n",
        "got3.solve(\n",
        "    \"A company can invest in: AI research (high risk, high reward), \"\n",
        "    \"market expansion (medium risk/reward), or cost optimization (low risk/reward). \"\n",
        "    \"Budget allows 2 choices. Which combination is best?\",\n",
        "    max_iterations=3,\n",
        "    branch_factor=2\n",
        ")\n",
        "\n",
        "\n",
        "# Example 4: Knowledge Synthesis\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"EXAMPLE 4: Knowledge Synthesis\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "got4 = GoTAgent()\n",
        "got4.solve(\n",
        "    \"Synthesize solution: Climate experts say 'reduce emissions 50% by 2030', \"\n",
        "    \"economists say 'transition must be gradual to avoid disruption', \"\n",
        "    \"technologists say 'renewable tech is now cost-effective'. What's the best approach?\",\n",
        "    max_iterations=3,\n",
        "    branch_factor=2\n",
        ")\n",
        "\n",
        "\n",
        "# Example 5: Strategic Planning\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"EXAMPLE 5: Strategic Decision with Contingencies\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "got5 = GoTAgent()\n",
        "got5.solve(\n",
        "    \"Plan software release strategy. Options: A) Big bang release (all features at once), \"\n",
        "    \"B) Phased rollout (gradual), C) Beta program first. Consider risks, user feedback, and resources.\",\n",
        "    max_iterations=3,\n",
        "    branch_factor=2\n",
        ")\n",
        "\n",
        "print(\"✅ Graph of Thoughts Complete!\")"
      ],
      "metadata": {
        "id": "N3ZUMJL9OzMD"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}