{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "smjwVX0N6FNJ"
   },
   "source": [
    "# Model Fee Retreival Agents\n",
    "\n",
    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MervinPraison/PraisonAI/blob/main/cookbooks/notebooks/model_fee_retreival_agents.ipynb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "qv6SmS6e6FNK"
   },
   "source": [
    "## Dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "id": "JwPcM9oh6FNL"
   },
   "outputs": [],
   "source": [
    "# Install dependencies without output\n",
    "%pip install langchain_community > /dev/null\n",
    "%pip install praisonai[crewai] > /dev/null\n",
    "%pip install crawl4ai > /dev/null"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "fLQEc0cJ6FNM"
   },
   "source": [
    "## Tools"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "id": "6kObYPg16FNM"
   },
   "outputs": [],
   "source": [
    "from crawl4ai import AsyncWebCrawler\n",
    "from crawl4ai.extraction_strategy import LLMExtractionStrategy\n",
    "from pydantic import BaseModel, Field\n",
    "from praisonai_tools import BaseTool\n",
    "\n",
    "class ModelFee(BaseModel):\n",
    "    llm_model_name: str = Field(..., description=\"Name of the model.\")\n",
    "    input_fee: str = Field(..., description=\"Fee for input token for the model.\")\n",
    "    output_fee: str = Field(..., description=\"Fee for output token for the model.\")\n",
    "\n",
    "class ModelFeeTool(BaseTool):\n",
    "    name: str = \"ModelFeeTool\"\n",
    "    description: str = \"Extracts model fees for input and output tokens from the given pricing page.\"\n",
    "\n",
    "    def _run(self, url: str):\n",
    "        crawler = AsyncWebCrawler()\n",
    "        crawler.warmup()\n",
    "\n",
    "        result = crawler.run(\n",
    "            url=url,\n",
    "            word_count_threshold=1,\n",
    "            extraction_strategy= LLMExtractionStrategy(\n",
    "                provider=\"openai/gpt-5-nano\",\n",
    "                api_token=os.getenv('OPENAI_API_KEY'),\n",
    "                schema=ModelFee.schema(),\n",
    "                extraction_type=\"schema\",\n",
    "                instruction=\"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens.\n",
    "                Do not miss any models in the entire content. One extracted model JSON format should look like this:\n",
    "                {\"model_name\": \"GPT-4\", \"input_fee\": \"US$10.00 / 1M tokens\", \"output_fee\": \"US$30.00 / 1M tokens\"}.\"\"\"\n",
    "            ),\n",
    "            bypass_cache=True,\n",
    "        )\n",
    "        return result.extracted_content"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "kGlZt5kg6FNM"
   },
   "source": [
    "## YAML Prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "id": "Ih03W4l16FNN"
   },
   "outputs": [],
   "source": [
    "agent_yaml = \"\"\"\n",
    "framework: \"crewai\"\n",
    "topic: \"extract model pricing from websites\"\n",
    "roles:\n",
    "  web_scraper:\n",
    "    role: \"Web Scraper\"\n",
    "    backstory: |\n",
    "      An expert in web scraping with a deep understanding of extracting structured\n",
    "      data from online sources.\n",
    "\n",
    "      https://openai.com/api/pricing/\n",
    "      https://www.anthropic.com/pricing\n",
    "      https://cohere.com/pricing\n",
    "    goal: \"Gather model pricing data from various websites\"\n",
    "    tasks:\n",
    "      scrape_model_pricing:\n",
    "        description: \"Scrape model pricing information from the provided list of websites.\"\n",
    "        expected_output: \"Raw HTML or JSON containing model pricing data.\"\n",
    "    tools:\n",
    "      - \"ModelFeeTool\"\n",
    "  data_cleaner:\n",
    "    role: \"Data Cleaner\"\n",
    "    backstory: \"Specialist in data cleaning, ensuring that all collected data is accurate and properly formatted.\"\n",
    "    goal: \"Clean and organize the scraped pricing data\"\n",
    "    tasks:\n",
    "      clean_pricing_data:\n",
    "        description: \"Process the raw scraped data to remove any duplicates and inconsistencies, and convert it into a structured format.\"\n",
    "        expected_output: \"Cleaned and organized JSON or CSV file with model pricing data.\"\n",
    "    tools: []\n",
    "  data_analyzer:\n",
    "    role: \"Data Analyzer\"\n",
    "    backstory: \"Data analysis expert focused on deriving actionable insights from structured data.\"\n",
    "    goal: \"Analyze the cleaned pricing data to extract insights\"\n",
    "    tasks:\n",
    "      analyze_pricing_data:\n",
    "        description: \"Analyze the cleaned data to extract trends, patterns, and insights on model pricing.\"\n",
    "        expected_output: \"Detailed report summarizing model pricing trends and insights.\"\n",
    "    tools: []\n",
    "dependencies: []\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "PfNBe0oo6FNN"
   },
   "source": [
    "## Main"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "awrEBlNi6FNN",
    "outputId": "c041a5dd-581a-46ea-9726-688c1fee6471"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m\u001b[95m [2024-11-02 12:52:01][DEBUG]: == Working Agent: Web Scraper\u001b[00m\n",
      "\u001b[1m\u001b[95m [2024-11-02 12:52:01][INFO]: == Starting Task: Scrape model pricing information from the provided list of websites.\u001b[00m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new CrewAgentExecutor chain...\u001b[0m\n",
      "\u001b[32;1m\u001b[1;3mThought: I need to extract model pricing data from the provided websites using the ModelFeeTool for each URL. I will start with the first URL.\n",
      "\n",
      "Action: ModelFeeTool  \n",
      "Action Input: {\"url\": \"https://openai.com/api/pricing/\"}  \n",
      "\u001b[0m"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:214: UserWarning: A custom validator is returning a value other than `self`.\n",
      "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n",
      "See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LOG] 🚀 Initializing LocalSeleniumCrawlerStrategy\n",
      "[LOG] 🌤️  Warming up the AsyncWebCrawler\n",
      "[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
      "[LOG] 🚀 Crawling done for https://openai.com/api/pricing/, success: True, time taken: 0.86 seconds\n",
      "[LOG] 🚀 Content extracted for https://openai.com/api/pricing/, success: True, time taken: 0.01 seconds\n",
      "[LOG] 🔥 Extracting semantic blocks for https://openai.com/api/pricing/, Strategy: LLMExtractionStrategy\n",
      "[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:390: UserWarning: Pydantic serializer warnings:\n",
      "  Expected `PromptTokensDetails` but got `dict` with value `{'audio_tokens': None, 'cached_tokens': 0}` - serialized value may not be as expected\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LOG] Extracted 0 blocks from URL: https://openai.com/api/pricing/ block index: 0\n",
      "[LOG] 🚀 Extraction done for https://openai.com/api/pricing/, time taken: 8.94 seconds.\n",
      "\u001b[95m \n",
      "\n",
      "[]\n",
      "\u001b[00m\n",
      "\u001b[32;1m\u001b[1;3mThought: It seems that the first URL did not yield any model pricing data. I will proceed to the second URL to see if I can extract relevant information from there.\n",
      "\n",
      "Action: ModelFeeTool  \n",
      "Action Input: {\"url\": \"https://www.anthropic.com/pricing\"}  \n",
      "\u001b[0m"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:214: UserWarning: A custom validator is returning a value other than `self`.\n",
      "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n",
      "See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LOG] 🚀 Initializing LocalSeleniumCrawlerStrategy\n",
      "[LOG] 🌤️  Warming up the AsyncWebCrawler\n",
      "[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
      "[LOG] 🚀 Crawling done for https://www.anthropic.com/pricing, success: True, time taken: 2.93 seconds\n",
      "[LOG] 🚀 Content extracted for https://www.anthropic.com/pricing, success: True, time taken: 0.06 seconds\n",
      "[LOG] 🔥 Extracting semantic blocks for https://www.anthropic.com/pricing, Strategy: LLMExtractionStrategy\n",
      "[LOG] Call LLM for https://www.anthropic.com/pricing - block index: 0\n",
      "[LOG] Extracted 5 blocks from URL: https://www.anthropic.com/pricing block index: 0\n",
      "[LOG] 🚀 Extraction done for https://www.anthropic.com/pricing, time taken: 4.38 seconds.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:390: UserWarning: Pydantic serializer warnings:\n",
      "  Expected `PromptTokensDetails` but got `dict` with value `{'audio_tokens': None, 'cached_tokens': 0}` - serialized value may not be as expected\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[95m \n",
      "\n",
      "[\n",
      "    {\n",
      "        \"llm_model_name\": \"Claude 3.5 Sonnet\",\n",
      "        \"input_fee\": \"$3 / MTok\",\n",
      "        \"output_fee\": \"$15 / MTok\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Claude 3.5 Haiku\",\n",
      "        \"input_fee\": \"$0.25 / MTok\",\n",
      "        \"output_fee\": \"$1.25 / MTok\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Claude 3 Opus\",\n",
      "        \"input_fee\": \"$15 / MTok\",\n",
      "        \"output_fee\": \"$75 / MTok\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Claude 3 Haiku\",\n",
      "        \"input_fee\": \"$0.25 / MTok\",\n",
      "        \"output_fee\": \"$1.25 / MTok\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Claude 3 Sonnet\",\n",
      "        \"input_fee\": \"$3 / MTok\",\n",
      "        \"output_fee\": \"$15 / MTok\",\n",
      "        \"error\": false\n",
      "    }\n",
      "]\n",
      "\u001b[00m\n",
      "\u001b[32;1m\u001b[1;3mAction: ModelFeeTool  \n",
      "Action Input: {\"url\": \"https://cohere.com/pricing\"}  \n",
      "\u001b[0m"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:214: UserWarning: A custom validator is returning a value other than `self`.\n",
      "Returning anything other than `self` from a top level model validator isn't supported when validating via `__init__`.\n",
      "See the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LOG] 🚀 Initializing LocalSeleniumCrawlerStrategy\n",
      "[LOG] 🌤️  Warming up the AsyncWebCrawler\n",
      "[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
      "[LOG] 🚀 Crawling done for https://cohere.com/pricing, success: True, time taken: 6.34 seconds\n",
      "[LOG] 🚀 Content extracted for https://cohere.com/pricing, success: True, time taken: 0.14 seconds\n",
      "[LOG] 🔥 Extracting semantic blocks for https://cohere.com/pricing, Strategy: LLMExtractionStrategy\n",
      "[LOG] Call LLM for https://cohere.com/pricing - block index: 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/pydantic/main.py:390: UserWarning: Pydantic serializer warnings:\n",
      "  Expected `PromptTokensDetails` but got `dict` with value `{'audio_tokens': None, 'cached_tokens': 0}` - serialized value may not be as expected\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LOG] Extracted 6 blocks from URL: https://cohere.com/pricing block index: 0\n",
      "[LOG] 🚀 Extraction done for https://cohere.com/pricing, time taken: 3.71 seconds.\n",
      "\u001b[95m \n",
      "\n",
      "[\n",
      "    {\n",
      "        \"llm_model_name\": \"Command R+\",\n",
      "        \"input_fee\": \"$2.50 / 1M tokens\",\n",
      "        \"output_fee\": \"$10.00 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Command R\",\n",
      "        \"input_fee\": \"$0.15 / 1M tokens\",\n",
      "        \"output_fee\": \"$0.60 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Command R Fine-tuned Model\",\n",
      "        \"input_fee\": \"$0.30 / 1M tokens\",\n",
      "        \"output_fee\": \"$1.20 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Command R 03-2024\",\n",
      "        \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "        \"output_fee\": \"$1.50 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Command R+ 04-2024\",\n",
      "        \"input_fee\": \"$3.00 / 1M tokens\",\n",
      "        \"output_fee\": \"$15.00 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    },\n",
      "    {\n",
      "        \"llm_model_name\": \"Aya Expanse\",\n",
      "        \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "        \"output_fee\": \"$1.50 / 1M tokens\",\n",
      "        \"error\": false\n",
      "    }\n",
      "]\n",
      "\u001b[00m\n",
      "\u001b[32;1m\u001b[1;3mThought: I have now scraped model pricing data from the second and third URLs. I need to compile all the data I've gathered so far into a structured format.\n",
      "\n",
      "Final Answer: {\n",
      "    \"OpenAI\": [],\n",
      "    \"Anthropic\": [\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3.5 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3.5 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Opus\",\n",
      "            \"input_fee\": \"$15 / MTok\",\n",
      "            \"output_fee\": \"$75 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        }\n",
      "    ],\n",
      "    \"Cohere\": [\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R+\",\n",
      "            \"input_fee\": \"$2.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$10.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R\",\n",
      "            \"input_fee\": \"$0.15 / 1M tokens\",\n",
      "            \"output_fee\": \"$0.60 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R Fine-tuned Model\",\n",
      "            \"input_fee\": \"$0.30 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.20 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R 03-2024\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R+ 04-2024\",\n",
      "            \"input_fee\": \"$3.00 / 1M tokens\",\n",
      "            \"output_fee\": \"$15.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Aya Expanse\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        }\n",
      "    ]\n",
      "}\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "\u001b[1m\u001b[92m [2024-11-02 12:52:57][DEBUG]: == [Web Scraper] Task output: {\n",
      "    \"OpenAI\": [],\n",
      "    \"Anthropic\": [\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3.5 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3.5 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Opus\",\n",
      "            \"input_fee\": \"$15 / MTok\",\n",
      "            \"output_fee\": \"$75 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Claude 3 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        }\n",
      "    ],\n",
      "    \"Cohere\": [\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R+\",\n",
      "            \"input_fee\": \"$2.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$10.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R\",\n",
      "            \"input_fee\": \"$0.15 / 1M tokens\",\n",
      "            \"output_fee\": \"$0.60 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R Fine-tuned Model\",\n",
      "            \"input_fee\": \"$0.30 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.20 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R 03-2024\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Command R+ 04-2024\",\n",
      "            \"input_fee\": \"$3.00 / 1M tokens\",\n",
      "            \"output_fee\": \"$15.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"llm_model_name\": \"Aya Expanse\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        }\n",
      "    ]\n",
      "}\n",
      "\n",
      "\u001b[00m\n",
      "\u001b[1m\u001b[95m [2024-11-02 12:52:57][DEBUG]: == Working Agent: Data Cleaner\u001b[00m\n",
      "\u001b[1m\u001b[95m [2024-11-02 12:52:57][INFO]: == Starting Task: Process the raw scraped data to remove any duplicates and inconsistencies, and convert it into a structured format.\u001b[00m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new CrewAgentExecutor chain...\u001b[0m\n",
      "\u001b[32;1m\u001b[1;3mI now can give a great answer  \n",
      "Final Answer: \n",
      "```json\n",
      "{\n",
      "    \"models\": [\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3.5 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3.5 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3 Opus\",\n",
      "            \"input_fee\": \"$15 / MTok\",\n",
      "            \"output_fee\": \"$75 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R+\",\n",
      "            \"input_fee\": \"$2.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$10.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R\",\n",
      "            \"input_fee\": \"$0.15 / 1M tokens\",\n",
      "            \"output_fee\": \"$0.60 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R Fine-tuned Model\",\n",
      "            \"input_fee\": \"$0.30 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.20 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R 03-2024\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R+ 04-2024\",\n",
      "            \"input_fee\": \"$3.00 / 1M tokens\",\n",
      "            \"output_fee\": \"$15.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Aya Expanse\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        }\n",
      "    ]\n",
      "}\n",
      "```\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "\u001b[1m\u001b[92m [2024-11-02 12:53:29][DEBUG]: == [Data Cleaner] Task output: ```json\n",
      "{\n",
      "    \"models\": [\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3.5 Sonnet\",\n",
      "            \"input_fee\": \"$3 / MTok\",\n",
      "            \"output_fee\": \"$15 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3.5 Haiku\",\n",
      "            \"input_fee\": \"$0.25 / MTok\",\n",
      "            \"output_fee\": \"$1.25 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Anthropic\",\n",
      "            \"llm_model_name\": \"Claude 3 Opus\",\n",
      "            \"input_fee\": \"$15 / MTok\",\n",
      "            \"output_fee\": \"$75 / MTok\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R+\",\n",
      "            \"input_fee\": \"$2.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$10.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R\",\n",
      "            \"input_fee\": \"$0.15 / 1M tokens\",\n",
      "            \"output_fee\": \"$0.60 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R Fine-tuned Model\",\n",
      "            \"input_fee\": \"$0.30 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.20 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R 03-2024\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Command R+ 04-2024\",\n",
      "            \"input_fee\": \"$3.00 / 1M tokens\",\n",
      "            \"output_fee\": \"$15.00 / 1M tokens\"\n",
      "        },\n",
      "        {\n",
      "            \"provider\": \"Cohere\",\n",
      "            \"llm_model_name\": \"Aya Expanse\",\n",
      "            \"input_fee\": \"$0.50 / 1M tokens\",\n",
      "            \"output_fee\": \"$1.50 / 1M tokens\"\n",
      "        }\n",
      "    ]\n",
      "}\n",
      "```\n",
      "\n",
      "\u001b[00m\n",
      "\u001b[1m\u001b[95m [2024-11-02 12:53:29][DEBUG]: == Working Agent: Data Analyzer\u001b[00m\n",
      "\u001b[1m\u001b[95m [2024-11-02 12:53:29][INFO]: == Starting Task: Analyze the cleaned data to extract trends, patterns, and insights on model pricing.\u001b[00m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new CrewAgentExecutor chain...\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "from praisonai import PraisonAI\n",
    "from google.colab import userdata\n",
    "\n",
    "# Create a PraisonAI instance with the agent_yaml content\n",
    "praisonai = PraisonAI(agent_yaml=agent_yaml, tools=[ModelFeeTool])\n",
    "\n",
    "# Add OPENAI_API_KEY Secrets to Google Colab on the Left Hand Side 🔑 or Enter Manually Below\n",
    "os.environ[\"OPENAI_API_KEY\"] = userdata.get('OPENAI_API_KEY') or \"ENTER OPENAI_API_KEY HERE\"\n",
    "os.environ[\"OPENAI_MODEL_NAME\"] = \"gpt-5-nano\"\n",
    "\n",
    "# Run PraisonAI\n",
    "result = praisonai.run()\n",
    "\n",
    "# Print the result\n",
    "print(result) # 2/10\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "VtdYTa0a7d_x"
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
