{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "!pip install -q pipecat-ai transformers torch accelerate numpy\n",
        "\n",
        "import asyncio\n",
        "import logging\n",
        "from typing import AsyncGenerator\n",
        "import numpy as np\n",
        "\n",
        "print(\"🔍 Checking available Pipecat frames...\")\n",
        "\n",
        "try:\n",
        "    from pipecat.frames.frames import (\n",
        "        Frame,\n",
        "        TextFrame,\n",
        "    )\n",
        "    print(\"✅ Basic frames imported successfully\")\n",
        "except ImportError as e:\n",
        "    print(f\"⚠️  Import error: {e}\")\n",
        "    from pipecat.frames.frames import Frame, TextFrame\n",
        "\n",
        "from pipecat.pipeline.pipeline import Pipeline\n",
        "from pipecat.pipeline.runner import PipelineRunner\n",
        "from pipecat.pipeline.task import PipelineTask\n",
        "from pipecat.processors.frame_processor import FrameDirection, FrameProcessor\n",
        "\n",
        "from transformers import pipeline as hf_pipeline\n",
        "import torch"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "rY_ghnEljsuA",
        "outputId": "a70c6fb3-be36-48bd-b9dc-8a07db78a7aa"
      },
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🔍 Checking available Pipecat frames...\n",
            "✅ Basic frames imported successfully\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "class SimpleChatProcessor(FrameProcessor):\n",
        "    \"\"\"Simple conversational AI processor using HuggingFace\"\"\"\n",
        "    def __init__(self):\n",
        "        super().__init__()\n",
        "        print(\"🔄 Loading HuggingFace text generation model...\")\n",
        "        self.chatbot = hf_pipeline(\n",
        "            \"text-generation\",\n",
        "            model=\"microsoft/DialoGPT-small\",\n",
        "            pad_token_id=50256,\n",
        "            do_sample=True,\n",
        "            temperature=0.8,\n",
        "            max_length=100\n",
        "        )\n",
        "        self.conversation_history = \"\"\n",
        "        print(\"✅ Chat model loaded successfully!\")\n",
        "\n",
        "    async def process_frame(self, frame: Frame, direction: FrameDirection):\n",
        "        await super().process_frame(frame, direction)\n",
        "        if isinstance(frame, TextFrame):\n",
        "            user_text = getattr(frame, \"text\", \"\").strip()\n",
        "            if user_text and not user_text.startswith(\"AI:\"):\n",
        "                print(f\"👤 USER: {user_text}\")\n",
        "                try:\n",
        "                    if self.conversation_history:\n",
        "                        input_text = f\"{self.conversation_history} User: {user_text} Bot:\"\n",
        "                    else:\n",
        "                        input_text = f\"User: {user_text} Bot:\"\n",
        "\n",
        "                    response = self.chatbot(\n",
        "                        input_text,\n",
        "                        max_new_tokens=50,\n",
        "                        num_return_sequences=1,\n",
        "                        temperature=0.7,\n",
        "                        do_sample=True,\n",
        "                        pad_token_id=self.chatbot.tokenizer.eos_token_id\n",
        "                    )\n",
        "\n",
        "                    generated_text = response[0][\"generated_text\"]\n",
        "                    if \"Bot:\" in generated_text:\n",
        "                        ai_response = generated_text.split(\"Bot:\")[-1].strip()\n",
        "                        ai_response = ai_response.split(\"User:\")[0].strip()\n",
        "                        if not ai_response:\n",
        "                            ai_response = \"That's interesting! Tell me more.\"\n",
        "                    else:\n",
        "                        ai_response = \"I'd love to hear more about that!\"\n",
        "\n",
        "                    self.conversation_history = f\"{input_text} {ai_response}\"\n",
        "                    await self.push_frame(TextFrame(text=f\"AI: {ai_response}\"), direction)\n",
        "                except Exception as e:\n",
        "                    print(f\"⚠️  Chat error: {e}\")\n",
        "                    await self.push_frame(\n",
        "                        TextFrame(text=\"AI: I'm having trouble processing that. Could you try rephrasing?\"),\n",
        "                        direction\n",
        "                    )\n",
        "        else:\n",
        "            await self.push_frame(frame, direction)"
      ],
      "metadata": {
        "id": "bf4xL0CQjxTv"
      },
      "execution_count": 10,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class TextDisplayProcessor(FrameProcessor):\n",
        "    \"\"\"Displays text frames in a conversational format\"\"\"\n",
        "    def __init__(self):\n",
        "        super().__init__()\n",
        "        self.conversation_count = 0\n",
        "\n",
        "    async def process_frame(self, frame: Frame, direction: FrameDirection):\n",
        "        await super().process_frame(frame, direction)\n",
        "        if isinstance(frame, TextFrame):\n",
        "            text = getattr(frame, \"text\", \"\")\n",
        "            if text.startswith(\"AI:\"):\n",
        "                print(f\"🤖 {text}\")\n",
        "                self.conversation_count += 1\n",
        "                print(f\"    💭 Exchange {self.conversation_count} complete\\n\")\n",
        "        await self.push_frame(frame, direction)\n",
        "\n",
        "\n",
        "class ConversationInputGenerator:\n",
        "    \"\"\"Generates demo conversation inputs\"\"\"\n",
        "    def __init__(self):\n",
        "        self.demo_conversations = [\n",
        "            \"Hello! How are you doing today?\",\n",
        "            \"What's your favorite thing to talk about?\",\n",
        "            \"Can you tell me something interesting about AI?\",\n",
        "            \"What makes conversation enjoyable for you?\",\n",
        "            \"Thanks for the great chat!\"\n",
        "        ]\n",
        "\n",
        "    async def generate_conversation(self) -> AsyncGenerator[TextFrame, None]:\n",
        "        print(\"🎭 Starting conversation simulation...\\n\")\n",
        "        for i, user_input in enumerate(self.demo_conversations):\n",
        "            yield TextFrame(text=user_input)\n",
        "            if i < len(self.demo_conversations) - 1:\n",
        "                await asyncio.sleep(2)"
      ],
      "metadata": {
        "id": "Ij9nWeNWj1rd"
      },
      "execution_count": 11,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class SimpleAIAgent:\n",
        "    \"\"\"Simple conversational AI agent using Pipecat\"\"\"\n",
        "    def __init__(self):\n",
        "        self.chat_processor = SimpleChatProcessor()\n",
        "        self.display_processor = TextDisplayProcessor()\n",
        "        self.input_generator = ConversationInputGenerator()\n",
        "\n",
        "    def create_pipeline(self) -> Pipeline:\n",
        "        return Pipeline([self.chat_processor, self.display_processor])\n",
        "\n",
        "    async def run_demo(self):\n",
        "        print(\"🚀 Simple Pipecat AI Agent Demo\")\n",
        "        print(\"🎯 Conversational AI with HuggingFace\")\n",
        "        print(\"=\" * 50)\n",
        "\n",
        "        pipeline = self.create_pipeline()\n",
        "        runner = PipelineRunner()\n",
        "        task = PipelineTask(pipeline)\n",
        "\n",
        "        async def produce_frames():\n",
        "            async for frame in self.input_generator.generate_conversation():\n",
        "                await task.queue_frame(frame)\n",
        "            await task.stop_when_done()\n",
        "\n",
        "        try:\n",
        "            print(\"🎬 Running conversation demo...\\n\")\n",
        "            await asyncio.gather(\n",
        "                runner.run(task),\n",
        "                produce_frames(),\n",
        "            )\n",
        "        except Exception as e:\n",
        "            print(f\"❌ Demo error: {e}\")\n",
        "            logging.error(f\"Pipeline error: {e}\")\n",
        "\n",
        "        print(\"✅ Demo completed successfully!\")"
      ],
      "metadata": {
        "id": "qWyqsee6j78t"
      },
      "execution_count": 12,
      "outputs": []
    },
    {
      "cell_type": "code",
      "execution_count": 13,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "gesOuAxfeBb3",
        "outputId": "13961c87-7c9e-4b06-9bfb-943a15c68bf3"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🌐 Google Colab detected - Ready to run!\n",
            "\n",
            "============================================================\n",
            "🎬 READY TO RUN!\n",
            "Execute this cell to start the AI conversation demo\n",
            "============================================================\n",
            "\n",
            "🚀 Starting the AI Agent Demo...\n",
            "🎯 Pipecat AI Agent Tutorial\n",
            "📱 Google Colab Compatible\n",
            "🤖 Free HuggingFace Models\n",
            "🔧 Simple & Working Implementation\n",
            "============================================================\n",
            "🔄 Loading HuggingFace text generation model...\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Device set to use cpu\n",
            "\u001b[32m2025-08-13 08:34:31.372\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.processors.frame_processor\u001b[0m:\u001b[36mlink\u001b[0m:\u001b[36m505\u001b[0m - \u001b[34m\u001b[1mLinking PipelineSource#3 -> SimpleChatProcessor#4\u001b[0m\n",
            "\u001b[32m2025-08-13 08:34:31.375\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.processors.frame_processor\u001b[0m:\u001b[36mlink\u001b[0m:\u001b[36m505\u001b[0m - \u001b[34m\u001b[1mLinking SimpleChatProcessor#4 -> TextDisplayProcessor#3\u001b[0m\n",
            "\u001b[32m2025-08-13 08:34:31.377\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.processors.frame_processor\u001b[0m:\u001b[36mlink\u001b[0m:\u001b[36m505\u001b[0m - \u001b[34m\u001b[1mLinking TextDisplayProcessor#3 -> PipelineSink#3\u001b[0m\n",
            "\u001b[32m2025-08-13 08:34:31.381\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.processors.frame_processor\u001b[0m:\u001b[36mlink\u001b[0m:\u001b[36m505\u001b[0m - \u001b[34m\u001b[1mLinking PipelineTaskSource#2 -> Pipeline#3\u001b[0m\n",
            "\u001b[32m2025-08-13 08:34:31.382\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.processors.frame_processor\u001b[0m:\u001b[36mlink\u001b[0m:\u001b[36m505\u001b[0m - \u001b[34m\u001b[1mLinking Pipeline#3 -> PipelineTaskSink#2\u001b[0m\n",
            "\u001b[32m2025-08-13 08:34:31.384\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.pipeline.runner\u001b[0m:\u001b[36mrun\u001b[0m:\u001b[36m71\u001b[0m - \u001b[34m\u001b[1mRunner PipelineRunner#3 started running PipelineTask#2\u001b[0m\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "✅ Chat model loaded successfully!\n",
            "🚀 Simple Pipecat AI Agent Demo\n",
            "🎯 Conversational AI with HuggingFace\n",
            "==================================================\n",
            "🎬 Running conversation demo...\n",
            "\n",
            "🎭 Starting conversation simulation...\n",
            "\n",
            "👤 USER: Hello! How are you doing today?\n",
            "🤖 AI: I don't know.\n",
            "    💭 Exchange 1 complete\n",
            "\n",
            "👤 USER: What's your favorite thing to talk about?\n",
            "🤖 AI: I love you.\n",
            "    💭 Exchange 2 complete\n",
            "\n",
            "👤 USER: Can you tell me something interesting about AI?\n",
            "🤖 AI: How are you doing today?\n",
            "    💭 Exchange 3 complete\n",
            "\n",
            "👤 USER: What makes conversation enjoyable for you?\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\u001b[32m2025-08-13 08:36:09.801\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.pipeline.task\u001b[0m:\u001b[36mstop_when_done\u001b[0m:\u001b[36m423\u001b[0m - \u001b[34m\u001b[1mTask PipelineTask#2 scheduled to stop when done\u001b[0m\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🤖 AI: It's fun!\n",
            "    💭 Exchange 4 complete\n",
            "\n",
            "👤 USER: Thanks for the great chat!\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\u001b[32m2025-08-13 08:36:51.737\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36mpipecat.pipeline.runner\u001b[0m:\u001b[36mrun\u001b[0m:\u001b[36m88\u001b[0m - \u001b[34m\u001b[1mRunner PipelineRunner#3 finished running PipelineTask#2\u001b[0m\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🤖 AI: It was a pleasure!\n",
            "    💭 Exchange 5 complete\n",
            "\n",
            "✅ Demo completed successfully!\n",
            "\n",
            "🎉 Tutorial Complete!\n",
            "\n",
            "📚 What You Just Saw:\n",
            "✓ Pipecat pipeline architecture in action\n",
            "✓ Custom FrameProcessor implementations\n",
            "✓ HuggingFace conversational AI integration\n",
            "✓ Real-time text processing pipeline\n",
            "✓ Modular, extensible design\n",
            "\n",
            "🚀 Next Steps:\n",
            "• Add real speech-to-text input\n",
            "• Integrate text-to-speech output\n",
            "• Connect to better language models\n",
            "• Add memory and context management\n",
            "• Deploy as a web service\n"
          ]
        }
      ],
      "source": [
        "async def main():\n",
        "    logging.basicConfig(level=logging.INFO)\n",
        "    print(\"🎯 Pipecat AI Agent Tutorial\")\n",
        "    print(\"📱 Google Colab Compatible\")\n",
        "    print(\"🤖 Free HuggingFace Models\")\n",
        "    print(\"🔧 Simple & Working Implementation\")\n",
        "    print(\"=\" * 60)\n",
        "    try:\n",
        "        agent = SimpleAIAgent()\n",
        "        await agent.run_demo()\n",
        "        print(\"\\n🎉 Tutorial Complete!\")\n",
        "        print(\"\\n📚 What You Just Saw:\")\n",
        "        print(\"✓ Pipecat pipeline architecture in action\")\n",
        "        print(\"✓ Custom FrameProcessor implementations\")\n",
        "        print(\"✓ HuggingFace conversational AI integration\")\n",
        "        print(\"✓ Real-time text processing pipeline\")\n",
        "        print(\"✓ Modular, extensible design\")\n",
        "        print(\"\\n🚀 Next Steps:\")\n",
        "        print(\"• Add real speech-to-text input\")\n",
        "        print(\"• Integrate text-to-speech output\")\n",
        "        print(\"• Connect to better language models\")\n",
        "        print(\"• Add memory and context management\")\n",
        "        print(\"• Deploy as a web service\")\n",
        "    except Exception as e:\n",
        "        print(f\"❌ Tutorial failed: {e}\")\n",
        "        import traceback\n",
        "        traceback.print_exc()\n",
        "\n",
        "\n",
        "try:\n",
        "    import google.colab\n",
        "    print(\"🌐 Google Colab detected - Ready to run!\")\n",
        "    ENV = \"colab\"\n",
        "except ImportError:\n",
        "    print(\"💻 Local environment detected\")\n",
        "    ENV = \"local\"\n",
        "\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"🎬 READY TO RUN!\")\n",
        "print(\"Execute this cell to start the AI conversation demo\")\n",
        "print(\"=\"*60)\n",
        "\n",
        "print(\"\\n🚀 Starting the AI Agent Demo...\")\n",
        "\n",
        "await main()"
      ]
    }
  ]
}