{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "!pip install AutoGen\n",
        "!pip install pyautogen google-generativeai litellm\n",
        "\n",
        "import os\n",
        "import json\n",
        "import asyncio\n",
        "from typing import Dict, List, Any, Optional, Callable\n",
        "from datetime import datetime\n",
        "import logging\n",
        "\n",
        "import autogen\n",
        "from autogen import AssistantAgent, UserProxyAgent, GroupChat, GroupChatManager\n",
        "from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent\n",
        "from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n",
        "\n",
        "import google.generativeai as genai\n",
        "import litellm\n",
        "\n",
        "logging.basicConfig(level=logging.INFO)\n",
        "logger = logging.getLogger(__name__)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "ZCWTabWwrNkj",
        "outputId": "9d014d3d-92c6-43ca-b513-972e9cdde81f"
      },
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Requirement already satisfied: AutoGen in /usr/local/lib/python3.11/dist-packages (0.9.7)\n",
            "Requirement already satisfied: ag2==0.9.7 in /usr/local/lib/python3.11/dist-packages (from AutoGen) (0.9.7)\n",
            "Requirement already satisfied: anyio<5.0.0,>=3.0.0 in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (4.9.0)\n",
            "Requirement already satisfied: asyncer==0.0.8 in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (0.0.8)\n",
            "Requirement already satisfied: diskcache in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (5.6.3)\n",
            "Requirement already satisfied: docker in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (7.1.0)\n",
            "Requirement already satisfied: httpx<1,>=0.28.1 in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (0.28.1)\n",
            "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (25.0)\n",
            "Requirement already satisfied: pydantic<3,>=2.6.1 in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (2.11.7)\n",
            "Requirement already satisfied: python-dotenv in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (1.1.1)\n",
            "Requirement already satisfied: termcolor in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (3.1.0)\n",
            "Requirement already satisfied: tiktoken in /usr/local/lib/python3.11/dist-packages (from ag2==0.9.7->AutoGen) (0.9.0)\n",
            "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.11/dist-packages (from anyio<5.0.0,>=3.0.0->ag2==0.9.7->AutoGen) (3.10)\n",
            "Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.11/dist-packages (from anyio<5.0.0,>=3.0.0->ag2==0.9.7->AutoGen) (1.3.1)\n",
            "Requirement already satisfied: typing_extensions>=4.5 in /usr/local/lib/python3.11/dist-packages (from anyio<5.0.0,>=3.0.0->ag2==0.9.7->AutoGen) (4.14.1)\n",
            "Requirement already satisfied: certifi in /usr/local/lib/python3.11/dist-packages (from httpx<1,>=0.28.1->ag2==0.9.7->AutoGen) (2025.7.14)\n",
            "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx<1,>=0.28.1->ag2==0.9.7->AutoGen) (1.0.9)\n",
            "Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx<1,>=0.28.1->ag2==0.9.7->AutoGen) (0.16.0)\n",
            "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic<3,>=2.6.1->ag2==0.9.7->AutoGen) (0.7.0)\n",
            "Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.11/dist-packages (from pydantic<3,>=2.6.1->ag2==0.9.7->AutoGen) (2.33.2)\n",
            "Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from pydantic<3,>=2.6.1->ag2==0.9.7->AutoGen) (0.4.1)\n",
            "Requirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.11/dist-packages (from docker->ag2==0.9.7->AutoGen) (2.32.3)\n",
            "Requirement already satisfied: urllib3>=1.26.0 in /usr/local/lib/python3.11/dist-packages (from docker->ag2==0.9.7->AutoGen) (2.5.0)\n",
            "Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.11/dist-packages (from tiktoken->ag2==0.9.7->AutoGen) (2024.11.6)\n",
            "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests>=2.26.0->docker->ag2==0.9.7->AutoGen) (3.4.2)\n",
            "Collecting pyautogen\n",
            "  Downloading pyautogen-0.10.0-py3-none-any.whl.metadata (2.6 kB)\n",
            "Requirement already satisfied: google-generativeai in /usr/local/lib/python3.11/dist-packages (0.8.5)\n",
            "Collecting litellm\n",
            "  Downloading litellm-1.74.15.post1.tar.gz (9.7 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.7/9.7 MB\u001b[0m \u001b[31m72.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
            "  Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
            "  Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "Collecting autogen-agentchat>=0.6.4 (from pyautogen)\n",
            "  Downloading autogen_agentchat-0.7.1-py3-none-any.whl.metadata (2.5 kB)\n",
            "Requirement already satisfied: google-ai-generativelanguage==0.6.15 in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (0.6.15)\n",
            "Requirement already satisfied: google-api-core in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (2.25.1)\n",
            "Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (2.177.0)\n",
            "Requirement already satisfied: google-auth>=2.15.0 in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (2.38.0)\n",
            "Requirement already satisfied: protobuf in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (5.29.5)\n",
            "Requirement already satisfied: pydantic in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (2.11.7)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (4.67.1)\n",
            "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.11/dist-packages (from google-generativeai) (4.14.1)\n",
            "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in /usr/local/lib/python3.11/dist-packages (from google-ai-generativelanguage==0.6.15->google-generativeai) (1.26.1)\n",
            "Requirement already satisfied: aiohttp>=3.10 in /usr/local/lib/python3.11/dist-packages (from litellm) (3.12.14)\n",
            "Requirement already satisfied: click in /usr/local/lib/python3.11/dist-packages (from litellm) (8.2.1)\n",
            "Requirement already satisfied: httpx>=0.23.0 in /usr/local/lib/python3.11/dist-packages (from litellm) (0.28.1)\n",
            "Requirement already satisfied: importlib-metadata>=6.8.0 in /usr/local/lib/python3.11/dist-packages (from litellm) (8.7.0)\n",
            "Requirement already satisfied: jinja2<4.0.0,>=3.1.2 in /usr/local/lib/python3.11/dist-packages (from litellm) (3.1.6)\n",
            "Requirement already satisfied: jsonschema<5.0.0,>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from litellm) (4.25.0)\n",
            "Requirement already satisfied: openai>=1.68.2 in /usr/local/lib/python3.11/dist-packages (from litellm) (1.97.1)\n",
            "Requirement already satisfied: python-dotenv>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from litellm) (1.1.1)\n",
            "Requirement already satisfied: tiktoken>=0.7.0 in /usr/local/lib/python3.11/dist-packages (from litellm) (0.9.0)\n",
            "Requirement already satisfied: tokenizers in /usr/local/lib/python3.11/dist-packages (from litellm) (0.21.2)\n",
            "Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (2.6.1)\n",
            "Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (1.4.0)\n",
            "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (25.3.0)\n",
            "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (1.7.0)\n",
            "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (6.6.3)\n",
            "Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (0.3.2)\n",
            "Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp>=3.10->litellm) (1.20.1)\n",
            "Collecting autogen-core==0.7.1 (from autogen-agentchat>=0.6.4->pyautogen)\n",
            "  Downloading autogen_core-0.7.1-py3-none-any.whl.metadata (2.3 kB)\n",
            "Collecting jsonref~=1.1.0 (from autogen-core==0.7.1->autogen-agentchat>=0.6.4->pyautogen)\n",
            "  Downloading jsonref-1.1.0-py3-none-any.whl.metadata (2.7 kB)\n",
            "Collecting opentelemetry-api>=1.34.1 (from autogen-core==0.7.1->autogen-agentchat>=0.6.4->pyautogen)\n",
            "  Downloading opentelemetry_api-1.36.0-py3-none-any.whl.metadata (1.5 kB)\n",
            "Requirement already satisfied: pillow>=11.0.0 in /usr/local/lib/python3.11/dist-packages (from autogen-core==0.7.1->autogen-agentchat>=0.6.4->pyautogen) (11.3.0)\n",
            "Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.11/dist-packages (from google-api-core->google-generativeai) (1.70.0)\n",
            "Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.11/dist-packages (from google-api-core->google-generativeai) (2.32.3)\n",
            "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from google-auth>=2.15.0->google-generativeai) (5.5.2)\n",
            "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.11/dist-packages (from google-auth>=2.15.0->google-generativeai) (0.4.2)\n",
            "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.11/dist-packages (from google-auth>=2.15.0->google-generativeai) (4.9.1)\n",
            "Requirement already satisfied: anyio in /usr/local/lib/python3.11/dist-packages (from httpx>=0.23.0->litellm) (4.9.0)\n",
            "Requirement already satisfied: certifi in /usr/local/lib/python3.11/dist-packages (from httpx>=0.23.0->litellm) (2025.7.14)\n",
            "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx>=0.23.0->litellm) (1.0.9)\n",
            "Requirement already satisfied: idna in /usr/local/lib/python3.11/dist-packages (from httpx>=0.23.0->litellm) (3.10)\n",
            "Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx>=0.23.0->litellm) (0.16.0)\n",
            "Requirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.11/dist-packages (from importlib-metadata>=6.8.0->litellm) (3.23.0)\n",
            "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from jinja2<4.0.0,>=3.1.2->litellm) (3.0.2)\n",
            "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema<5.0.0,>=4.22.0->litellm) (2025.4.1)\n",
            "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema<5.0.0,>=4.22.0->litellm) (0.36.2)\n",
            "Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema<5.0.0,>=4.22.0->litellm) (0.26.0)\n",
            "Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.11/dist-packages (from openai>=1.68.2->litellm) (1.9.0)\n",
            "Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from openai>=1.68.2->litellm) (0.10.0)\n",
            "Requirement already satisfied: sniffio in /usr/local/lib/python3.11/dist-packages (from openai>=1.68.2->litellm) (1.3.1)\n",
            "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic->google-generativeai) (0.7.0)\n",
            "Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.11/dist-packages (from pydantic->google-generativeai) (2.33.2)\n",
            "Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from pydantic->google-generativeai) (0.4.1)\n",
            "Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.11/dist-packages (from tiktoken>=0.7.0->litellm) (2024.11.6)\n",
            "Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.11/dist-packages (from google-api-python-client->google-generativeai) (0.22.0)\n",
            "Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from google-api-python-client->google-generativeai) (0.2.0)\n",
            "Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.11/dist-packages (from google-api-python-client->google-generativeai) (4.2.0)\n",
            "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.11/dist-packages (from tokenizers->litellm) (0.34.1)\n",
            "Requirement already satisfied: grpcio<2.0.0,>=1.33.2 in /usr/local/lib/python3.11/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.15->google-generativeai) (1.74.0)\n",
            "Requirement already satisfied: grpcio-status<2.0.0,>=1.33.2 in /usr/local/lib/python3.11/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.15->google-generativeai) (1.71.2)\n",
            "Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /usr/local/lib/python3.11/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->google-generativeai) (3.2.3)\n",
            "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers->litellm) (3.18.0)\n",
            "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers->litellm) (2025.3.0)\n",
            "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers->litellm) (25.0)\n",
            "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers->litellm) (6.0.2)\n",
            "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers->litellm) (1.1.5)\n",
            "Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.11/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=2.15.0->google-generativeai) (0.6.1)\n",
            "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (3.4.2)\n",
            "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (2.5.0)\n",
            "Downloading pyautogen-0.10.0-py3-none-any.whl (3.0 kB)\n",
            "Downloading autogen_agentchat-0.7.1-py3-none-any.whl (117 kB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m117.1/117.1 kB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading autogen_core-0.7.1-py3-none-any.whl (101 kB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m101.4/101.4 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading jsonref-1.1.0-py3-none-any.whl (9.4 kB)\n",
            "Downloading opentelemetry_api-1.36.0-py3-none-any.whl (65 kB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m65.6/65.6 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hBuilding wheels for collected packages: litellm\n",
            "  Building wheel for litellm (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for litellm: filename=litellm-1.74.15.post1-py3-none-any.whl size=8835509 sha256=dedfa68ec2d8865d2886717d574a78c344e1afe016a2592ffd6a0d169a7365f2\n",
            "  Stored in directory: /root/.cache/pip/wheels/77/04/c0/f3d1eaeba92e43bf6fbfc9ca64026d83e7a611299428ba10c8\n",
            "Successfully built litellm\n",
            "Installing collected packages: jsonref, opentelemetry-api, autogen-core, litellm, autogen-agentchat, pyautogen\n",
            "Successfully installed autogen-agentchat-0.7.1 autogen-core-0.7.1 jsonref-1.1.0 litellm-1.74.15.post1 opentelemetry-api-1.36.0 pyautogen-0.10.0\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "class GeminiAutoGenFramework:\n",
        "    \"\"\"\n",
        "    Complete AutoGen framework using free Gemini API\n",
        "    Supports multi-agent conversations, code execution, and retrieval\n",
        "    \"\"\"\n",
        "\n",
        "    def __init__(self, gemini_api_key: str):\n",
        "        \"\"\"Initialize with Gemini API key\"\"\"\n",
        "        self.gemini_api_key = gemini_api_key\n",
        "        self.setup_gemini_config()\n",
        "        self.agents: Dict[str, autogen.Agent] = {}\n",
        "        self.group_chats: Dict[str, GroupChat] = {}\n",
        "\n",
        "    def setup_gemini_config(self):\n",
        "        \"\"\"Configure Gemini for AutoGen\"\"\"\n",
        "        os.environ[\"GOOGLE_API_KEY\"] = self.gemini_api_key\n",
        "\n",
        "        self.llm_config = {\n",
        "            \"config_list\": [\n",
        "                {\n",
        "                    \"model\": \"gemini/gemini-1.5-flash\",\n",
        "                    \"api_key\": self.gemini_api_key,\n",
        "                    \"api_type\": \"google\",\n",
        "                    \"temperature\": 0.7,\n",
        "                    \"max_tokens\": 4096,\n",
        "                }\n",
        "            ],\n",
        "            \"timeout\": 120,\n",
        "            \"cache_seed\": 42,\n",
        "        }\n",
        "\n",
        "        self.llm_config_pro = {\n",
        "            \"config_list\": [\n",
        "                {\n",
        "                    \"model\": \"gemini/gemini-1.5-pro\",\n",
        "                    \"api_key\": self.gemini_api_key,\n",
        "                    \"api_type\": \"google\",\n",
        "                    \"temperature\": 0.5,\n",
        "                    \"max_tokens\": 8192,\n",
        "                }\n",
        "            ],\n",
        "            \"timeout\": 180,\n",
        "            \"cache_seed\": 42,\n",
        "        }\n",
        "\n",
        "    def create_assistant_agent(self, name: str, system_message: str,\n",
        "                             use_pro_model: bool = False) -> AssistantAgent:\n",
        "        \"\"\"Create a specialized assistant agent\"\"\"\n",
        "        config = self.llm_config_pro if use_pro_model else self.llm_config\n",
        "\n",
        "        agent = AssistantAgent(\n",
        "            name=name,\n",
        "            system_message=system_message,\n",
        "            llm_config=config,\n",
        "            human_input_mode=\"NEVER\",\n",
        "            max_consecutive_auto_reply=10,\n",
        "            code_execution_config=False,\n",
        "        )\n",
        "\n",
        "        self.agents[name] = agent\n",
        "        return agent\n",
        "\n",
        "    def create_user_proxy(self, name: str = \"UserProxy\",\n",
        "                         enable_code_execution: bool = True) -> UserProxyAgent:\n",
        "        \"\"\"Create user proxy agent with optional code execution\"\"\"\n",
        "\n",
        "        code_config = {\n",
        "            \"work_dir\": \"autogen_workspace\",\n",
        "            \"use_docker\": False,\n",
        "            \"timeout\": 60,\n",
        "            \"last_n_messages\": 3,\n",
        "        } if enable_code_execution else False\n",
        "\n",
        "        agent = UserProxyAgent(\n",
        "            name=name,\n",
        "            human_input_mode=\"TERMINATE\",\n",
        "            max_consecutive_auto_reply=0,\n",
        "            is_termination_msg=lambda x: x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n",
        "            code_execution_config=code_config,\n",
        "            system_message=\"\"\"A human admin. Interact with the agents to solve tasks.\n",
        "            Reply TERMINATE when the task is solved.\"\"\"\n",
        "        )\n",
        "\n",
        "        self.agents[name] = agent\n",
        "        return agent\n",
        "\n",
        "    def create_research_team(self) -> Dict[str, autogen.Agent]:\n",
        "        \"\"\"Create a research-focused agent team\"\"\"\n",
        "\n",
        "        researcher = self.create_assistant_agent(\n",
        "            name=\"Researcher\",\n",
        "            system_message=\"\"\"You are a Senior Research Analyst. Your role is to:\n",
        "            1. Gather and analyze information on given topics\n",
        "            2. Identify key trends, patterns, and insights\n",
        "            3. Provide comprehensive research summaries\n",
        "            4. Cite sources and maintain objectivity\n",
        "\n",
        "            Always structure your research with clear sections and bullet points.\n",
        "            Be thorough but concise.\"\"\"\n",
        "        )\n",
        "\n",
        "        analyst = self.create_assistant_agent(\n",
        "            name=\"DataAnalyst\",\n",
        "            system_message=\"\"\"You are a Data Analysis Expert. Your role is to:\n",
        "            1. Analyze quantitative data and statistics\n",
        "            2. Create data visualizations and charts\n",
        "            3. Identify patterns and correlations\n",
        "            4. Provide statistical insights and interpretations\n",
        "\n",
        "            Use Python code when needed for calculations and visualizations.\n",
        "            Always explain your analytical approach.\"\"\"\n",
        "        )\n",
        "\n",
        "        writer = self.create_assistant_agent(\n",
        "            name=\"Writer\",\n",
        "            system_message=\"\"\"You are a Technical Writer and Content Strategist. Your role is to:\n",
        "            1. Transform research and analysis into clear, engaging content\n",
        "            2. Create well-structured reports and articles\n",
        "            3. Ensure content is accessible to the target audience\n",
        "            4. Maintain professional tone and accuracy\n",
        "\n",
        "            Structure content with clear headings, bullet points, and conclusions.\"\"\"\n",
        "        )\n",
        "\n",
        "        executor = self.create_user_proxy(\"CodeExecutor\", enable_code_execution=True)\n",
        "\n",
        "        return {\n",
        "            \"researcher\": researcher,\n",
        "            \"analyst\": analyst,\n",
        "            \"writer\": writer,\n",
        "            \"executor\": executor\n",
        "        }\n",
        "\n",
        "    def create_business_team(self) -> Dict[str, autogen.Agent]:\n",
        "        \"\"\"Create business analysis team\"\"\"\n",
        "\n",
        "        strategist = self.create_assistant_agent(\n",
        "            name=\"BusinessStrategist\",\n",
        "            system_message=\"\"\"You are a Senior Business Strategy Consultant. Your role is to:\n",
        "            1. Analyze business problems and opportunities\n",
        "            2. Develop strategic recommendations and action plans\n",
        "            3. Assess market dynamics and competitive landscape\n",
        "            4. Provide implementation roadmaps\n",
        "\n",
        "            Think systematically and consider multiple perspectives.\n",
        "            Always provide actionable recommendations.\"\"\",\n",
        "            use_pro_model=True\n",
        "        )\n",
        "\n",
        "        financial_analyst = self.create_assistant_agent(\n",
        "            name=\"FinancialAnalyst\",\n",
        "            system_message=\"\"\"You are a Financial Analysis Expert. Your role is to:\n",
        "            1. Perform financial modeling and analysis\n",
        "            2. Assess financial risks and opportunities\n",
        "            3. Calculate ROI, NPV, and other financial metrics\n",
        "            4. Provide budget and investment recommendations\n",
        "\n",
        "            Use quantitative analysis and provide clear financial insights.\"\"\"\n",
        "        )\n",
        "\n",
        "        market_researcher = self.create_assistant_agent(\n",
        "            name=\"MarketResearcher\",\n",
        "            system_message=\"\"\"You are a Market Research Specialist. Your role is to:\n",
        "            1. Analyze market trends and consumer behavior\n",
        "            2. Research competitive landscape and positioning\n",
        "            3. Identify target markets and customer segments\n",
        "            4. Provide market sizing and opportunity assessment\n",
        "\n",
        "            Focus on actionable market insights and recommendations.\"\"\"\n",
        "        )\n",
        "\n",
        "        return {\n",
        "            \"strategist\": strategist,\n",
        "            \"financial_analyst\": financial_analyst,\n",
        "            \"market_researcher\": market_researcher,\n",
        "            \"executor\": self.create_user_proxy(\"BusinessExecutor\")\n",
        "        }\n",
        "\n",
        "    def create_development_team(self) -> Dict[str, autogen.Agent]:\n",
        "        \"\"\"Create software development team\"\"\"\n",
        "\n",
        "        developer = self.create_assistant_agent(\n",
        "            name=\"SeniorDeveloper\",\n",
        "            system_message=\"\"\"You are a Senior Software Developer. Your role is to:\n",
        "            1. Write high-quality, efficient code\n",
        "            2. Design software architecture and solutions\n",
        "            3. Debug and optimize existing code\n",
        "            4. Follow best practices and coding standards\n",
        "\n",
        "            Always explain your code and design decisions.\n",
        "            Focus on clean, maintainable solutions.\"\"\"\n",
        "        )\n",
        "\n",
        "        devops = self.create_assistant_agent(\n",
        "            name=\"DevOpsEngineer\",\n",
        "            system_message=\"\"\"You are a DevOps Engineer. Your role is to:\n",
        "            1. Design deployment and infrastructure solutions\n",
        "            2. Automate build, test, and deployment processes\n",
        "            3. Monitor system performance and reliability\n",
        "            4. Implement security and scalability best practices\n",
        "\n",
        "            Focus on automation, reliability, and scalability.\"\"\"\n",
        "        )\n",
        "\n",
        "        qa_engineer = self.create_assistant_agent(\n",
        "            name=\"QAEngineer\",\n",
        "            system_message=\"\"\"You are a Quality Assurance Engineer. Your role is to:\n",
        "            1. Design comprehensive test strategies and cases\n",
        "            2. Identify potential bugs and edge cases\n",
        "            3. Ensure code quality and performance standards\n",
        "            4. Validate requirements and user acceptance criteria\n",
        "\n",
        "            Be thorough and think about edge cases and failure scenarios.\"\"\"\n",
        "        )\n",
        "\n",
        "        return {\n",
        "            \"developer\": developer,\n",
        "            \"devops\": devops,\n",
        "            \"qa_engineer\": qa_engineer,\n",
        "            \"executor\": self.create_user_proxy(\"DevExecutor\", enable_code_execution=True)\n",
        "        }\n",
        "\n",
        "    def create_group_chat(self, agents: List[autogen.Agent], chat_name: str,\n",
        "                         max_round: int = 10) -> GroupChat:\n",
        "        \"\"\"Create group chat with specified agents\"\"\"\n",
        "\n",
        "        group_chat = GroupChat(\n",
        "            agents=agents,\n",
        "            messages=[],\n",
        "            max_round=max_round,\n",
        "            speaker_selection_method=\"round_robin\",\n",
        "            allow_repeat_speaker=False,\n",
        "        )\n",
        "\n",
        "        self.group_chats[chat_name] = group_chat\n",
        "        return group_chat\n",
        "\n",
        "    def run_research_project(self, topic: str, max_rounds: int = 8) -> str:\n",
        "        \"\"\"Run a comprehensive research project\"\"\"\n",
        "\n",
        "        team = self.create_research_team()\n",
        "        agents_list = [team[\"researcher\"], team[\"analyst\"], team[\"writer\"], team[\"executor\"]]\n",
        "\n",
        "        group_chat = self.create_group_chat(agents_list, \"research_chat\", max_rounds)\n",
        "        manager = GroupChatManager(\n",
        "            groupchat=group_chat,\n",
        "            llm_config=self.llm_config\n",
        "        )\n",
        "\n",
        "        initial_message = f\"\"\"\n",
        "        Research Project: {topic}\n",
        "\n",
        "        Please collaborate to produce a comprehensive research report following this workflow:\n",
        "        1. Researcher: Gather information and key insights about {topic}\n",
        "        2. DataAnalyst: Analyze any quantitative aspects and create visualizations if needed\n",
        "        3. Writer: Create a well-structured final report based on the research and analysis\n",
        "        4. CodeExecutor: Execute any code needed for analysis or visualization\n",
        "\n",
        "        The final deliverable should be a professional research report with:\n",
        "        - Executive summary\n",
        "        - Key findings and insights\n",
        "        - Data analysis (if applicable)\n",
        "        - Conclusions and recommendations\n",
        "\n",
        "        Begin the research process now.\n",
        "        \"\"\"\n",
        "\n",
        "        chat_result = team[\"executor\"].initiate_chat(\n",
        "            manager,\n",
        "            message=initial_message,\n",
        "            max_consecutive_auto_reply=0\n",
        "        )\n",
        "\n",
        "        return self._extract_final_result(chat_result)\n",
        "\n",
        "    def run_business_analysis(self, business_problem: str, max_rounds: int = 8) -> str:\n",
        "        \"\"\"Run business analysis project\"\"\"\n",
        "\n",
        "        team = self.create_business_team()\n",
        "        agents_list = [team[\"strategist\"], team[\"financial_analyst\"],\n",
        "                      team[\"market_researcher\"], team[\"executor\"]]\n",
        "\n",
        "        group_chat = self.create_group_chat(agents_list, \"business_chat\", max_rounds)\n",
        "        manager = GroupChatManager(\n",
        "            groupchat=group_chat,\n",
        "            llm_config=self.llm_config_pro\n",
        "        )\n",
        "\n",
        "        initial_message = f\"\"\"\n",
        "        Business Analysis Project: {business_problem}\n",
        "\n",
        "        Please collaborate to provide comprehensive business analysis following this approach:\n",
        "        1. BusinessStrategist: Analyze the business problem and develop strategic framework\n",
        "        2. FinancialAnalyst: Assess financial implications and create financial models\n",
        "        3. MarketResearcher: Research market context and competitive landscape\n",
        "        4. BusinessExecutor: Coordinate and compile final recommendations\n",
        "\n",
        "        Final deliverable should include:\n",
        "        - Problem analysis and root causes\n",
        "        - Strategic recommendations\n",
        "        - Financial impact assessment\n",
        "        - Market opportunity analysis\n",
        "        - Implementation roadmap\n",
        "\n",
        "        Begin the analysis now.\n",
        "        \"\"\"\n",
        "\n",
        "        chat_result = team[\"executor\"].initiate_chat(\n",
        "            manager,\n",
        "            message=initial_message,\n",
        "            max_consecutive_auto_reply=0\n",
        "        )\n",
        "\n",
        "        return self._extract_final_result(chat_result)\n",
        "\n",
        "    def run_development_project(self, project_description: str, max_rounds: int = 10) -> str:\n",
        "        \"\"\"Run software development project\"\"\"\n",
        "\n",
        "        team = self.create_development_team()\n",
        "        agents_list = [team[\"developer\"], team[\"devops\"], team[\"qa_engineer\"], team[\"executor\"]]\n",
        "\n",
        "        group_chat = self.create_group_chat(agents_list, \"dev_chat\", max_rounds)\n",
        "        manager = GroupChatManager(\n",
        "            groupchat=group_chat,\n",
        "            llm_config=self.llm_config\n",
        "        )\n",
        "\n",
        "        initial_message = f\"\"\"\n",
        "        Development Project: {project_description}\n",
        "\n",
        "        Please collaborate to deliver a complete software solution:\n",
        "        1. SeniorDeveloper: Design architecture and write core code\n",
        "        2. DevOpsEngineer: Plan deployment and infrastructure\n",
        "        3. QAEngineer: Design tests and quality assurance approach\n",
        "        4. DevExecutor: Execute code and coordinate implementation\n",
        "\n",
        "        Deliverables should include:\n",
        "        - System architecture and design\n",
        "        - Working code implementation\n",
        "        - Deployment configuration\n",
        "        - Test cases and QA plan\n",
        "        - Documentation\n",
        "\n",
        "        Start development now.\n",
        "        \"\"\"\n",
        "\n",
        "        chat_result = team[\"executor\"].initiate_chat(\n",
        "            manager,\n",
        "            message=initial_message,\n",
        "            max_consecutive_auto_reply=0\n",
        "        )\n",
        "\n",
        "        return self._extract_final_result(chat_result)\n",
        "\n",
        "    def _extract_final_result(self, chat_result) -> str:\n",
        "        \"\"\"Extract and format final result from chat\"\"\"\n",
        "        if hasattr(chat_result, 'chat_history'):\n",
        "            messages = chat_result.chat_history\n",
        "        else:\n",
        "            messages = chat_result\n",
        "\n",
        "        final_messages = []\n",
        "        for msg in messages[-5:]:\n",
        "            if isinstance(msg, dict) and 'content' in msg:\n",
        "                final_messages.append(f\"{msg.get('name', 'Agent')}: {msg['content']}\")\n",
        "\n",
        "        return \"\\n\\n\".join(final_messages)\n",
        "\n",
        "    def get_framework_stats(self) -> Dict[str, Any]:\n",
        "        \"\"\"Get framework statistics\"\"\"\n",
        "        return {\n",
        "            \"agents\": list(self.agents.keys()),\n",
        "            \"group_chats\": list(self.group_chats.keys()),\n",
        "            \"llm_config\": {\n",
        "                \"model\": self.llm_config[\"config_list\"][0][\"model\"],\n",
        "                \"temperature\": self.llm_config[\"config_list\"][0][\"temperature\"]\n",
        "            },\n",
        "            \"timestamp\": datetime.now().isoformat()\n",
        "        }"
      ],
      "metadata": {
        "id": "KCK2Ro_hsdxC"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Z4hVzSRerIAT",
        "outputId": "1b67dc05-35d9-4139-e90e-b1100bea8c95"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Microsoft AutoGen + Gemini Framework Ready! 🚀\n",
            "\n",
            "📦 For Google Colab, run:\n",
            "!pip install pyautogen google-generativeai litellm\n",
            "\n",
            "🔑 Get your free Gemini API key:\n",
            "https://makersuite.google.com/app/apikey\n",
            "\n",
            "🚀 Quick start:\n",
            "\n",
            "# Initialize framework\n",
            "# framework = GeminiAutoGenFramework(\"your-gemini-api-key\")\n",
            "\n",
            "# Run research project  \n",
            "result = framework.run_research_project(\"AI Trends 2025\")\n",
            "print(result)\n",
            "\n",
            "# Run business analysis\n",
            "result = framework.run_business_analysis(\"Market entry strategy for AI startup\")\n",
            "print(result)\n",
            "\n",
            "# Run development project\n",
            "result = framework.run_development_project(\"Build a REST API for user management\")\n",
            "print(result)\n",
            "    \n"
          ]
        }
      ],
      "source": [
        "def demo_autogen_framework():\n",
        "    \"\"\"Demo the AutoGen framework\"\"\"\n",
        "    print(\"🚀 Microsoft AutoGen + Gemini Framework Demo\")\n",
        "    print(\"=\" * 60)\n",
        "\n",
        "    GEMINI_API_KEY = \"your-gemini-api-key-here\"\n",
        "\n",
        "    framework = GeminiAutoGenFramework(GEMINI_API_KEY)\n",
        "\n",
        "    print(\"✅ Framework initialized successfully!\")\n",
        "    print(f\"📊 Stats: {json.dumps(framework.get_framework_stats(), indent=2)}\")\n",
        "\n",
        "    return framework\n",
        "\n",
        "async def run_demo_projects(framework):\n",
        "    \"\"\"Run demonstration projects\"\"\"\n",
        "\n",
        "    print(\"\\n🔬 Running Research Project...\")\n",
        "    research_result = framework.run_research_project(\n",
        "        \"Impact of Generative AI on Software Development in 2025\"\n",
        "    )\n",
        "    print(\"Research Result (excerpt):\")\n",
        "    print(research_result[:500] + \"...\" if len(research_result) > 500 else research_result)\n",
        "\n",
        "    print(\"\\n💼 Running Business Analysis...\")\n",
        "    business_result = framework.run_business_analysis(\n",
        "        \"A mid-sized company wants to implement AI-powered customer service. \"\n",
        "        \"They currently have 50 support staff and handle 1000 tickets daily. \"\n",
        "        \"Budget is $500K annually.\"\n",
        "    )\n",
        "    print(\"Business Analysis Result (excerpt):\")\n",
        "    print(business_result[:500] + \"...\" if len(business_result) > 500 else business_result)\n",
        "\n",
        "    print(\"\\n💻 Running Development Project...\")\n",
        "    dev_result = framework.run_development_project(\n",
        "        \"Build a Python web scraper that extracts product information from e-commerce sites, \"\n",
        "        \"stores data in a database, and provides a REST API for data access.\"\n",
        "    )\n",
        "    print(\"Development Result (excerpt):\")\n",
        "    print(dev_result[:500] + \"...\" if len(dev_result) > 500 else dev_result)\n",
        "\n",
        "if __name__ == \"__main__\":\n",
        "    print(\"Microsoft AutoGen + Gemini Framework Ready! 🚀\")\n",
        "    print(\"\\n📦 For Google Colab, run:\")\n",
        "    print(\"!pip install pyautogen google-generativeai litellm\")\n",
        "    print(\"\\n🔑 Get your free Gemini API key:\")\n",
        "    print(\"https://makersuite.google.com/app/apikey\")\n",
        "    print(\"\\n🚀 Quick start:\")\n",
        "    print(\"\"\"\n",
        "# Initialize framework\n",
        "# framework = GeminiAutoGenFramework(\"your-gemini-api-key\")\n",
        "\n",
        "# Run research project\n",
        "result = framework.run_research_project(\"AI Trends 2025\")\n",
        "print(result)\n",
        "\n",
        "# Run business analysis\n",
        "result = framework.run_business_analysis(\"Market entry strategy for AI startup\")\n",
        "print(result)\n",
        "\n",
        "# Run development project\n",
        "result = framework.run_development_project(\"Build a REST API for user management\")\n",
        "print(result)\n",
        "    \"\"\")"
      ]
    }
  ]
}