{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "EElURkc9VGpZ"
      },
      "outputs": [],
      "source": [
        "# Copyright 2025 Google LLC\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "l7hzJLswRlgJ"
      },
      "source": [
        "# Evaluating Third-Party LLMs with the Vertex AI Gen AI Evaluation SDK"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "U_zRPTySRjHX"
      },
      "source": [
        "<table align=\"left\">\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://www.gstatic.com/pantheon/images/bigquery/welcome_page/colab-logo.svg\" alt=\"Google Colaboratory logo\"><br> Open in Colab\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://console.cloud.google.com/vertex-ai/colab/import/https:%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%2Fgenerative-ai%2Fmain%2Fgemini%2Fevaluation%2Fevaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://lh3.googleusercontent.com/JmcxdQi-qOpctIvWKgPtrzZdJJK-J3sWE1RsfjZNwshCFgE_9fULcNpuXYTilIR2hjwN\" alt=\"Google Cloud Colab Enterprise logo\"><br> Open in Colab Enterprise\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\">\n",
        "      <img src=\"https://www.gstatic.com/images/branding/gcpiconscolors/vertexai/v1/32px.svg\" alt=\"Vertex AI logo\"><br> Open in Vertex AI Workbench\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://raw.githubusercontent.com/primer/octicons/refs/heads/main/icons/mark-github-24.svg\" alt=\"GitHub logo\"><br> View on GitHub\n",
        "    </a>\n",
        "  </td>\n",
        "</table>\n",
        "\n",
        "<div style=\"clear: both;\"></div>\n",
        "\n",
        "<b>Share to:</b>\n",
        "\n",
        "<a href=\"https://www.linkedin.com/sharing/share-offsite/?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/8/81/LinkedIn_icon.svg\" alt=\"LinkedIn logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://bsky.app/intent/compose?text=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/7/7a/Bluesky_Logo.svg\" alt=\"Bluesky logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://twitter.com/intent/tweet?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/5/5a/X_icon_2.svg\" alt=\"X logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://reddit.com/submit?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://redditinc.com/hubfs/Reddit%20Inc/Brand/Reddit_Logo.png\" alt=\"Reddit logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://www.facebook.com/sharer/sharer.php?u=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/5/51/Facebook_f_logo_%282019%29.svg\" alt=\"Facebook logo\">\n",
        "</a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YK2R3uVZR1RD"
      },
      "source": [
        "| Author |\n",
        "| --- |\n",
        "| [Jason Dai](https://github.com/jsondai) |"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1ijw7-AyR344"
      },
      "source": [
        "## Overview\n",
        "\n",
        "This notebook demonstrates how to use the new Vertex Gen AI Evaluation SDK to evaluate various types of third-party models. Whether you're working with open models you've deployed yourself, models hosted on other platforms, or managed model services available in the Model Garden, this SDK provides a unified way to assess their performance.\n",
        "\n",
        "We will explore four main scenarios:\n",
        "\n",
        "1.  **Evaluating Third-Party Models via APIs:** This method is ideal for accessing closed-source models from various third-party (3P) providers such as OpenAI, Anthropic, Cohere, etc. These models are typically accessed via an API key. The Vertex AI GenAI Evaluation SDK integrates with the `litellm` library, which acts as a universal translator, allowing you to call over 100 different LLM APIs using a consistent format. You simply need to provide the appropriate API key for the service you wish to use (e.g., `OPENAI_API_KEY` for OpenAI models like `gpt-5`) and Vertex AI GenAI Evaluation SDK handles the provider-specific API calls. For a full list of supported providers and model string formats, refer to the [LiteLLM Supported Providers](https://docs.litellm.ai/docs/providers) page.\n",
        "   \n",
        "\n",
        "2.   **Model as a Service (MaaS) from Model Garden:** Evaluating partner models, such as `llama-4` from Meta, which are offered as managed services within Vertex AI Model Garden. These models often utilize an OpenAI-compatible API format for inference.\n",
        "\n",
        "\n",
        "3.  **Bring Your Own Model (BYOM) Endpoint:**  This method supports evaluating models that you manage and serve independently. This could be on your own hardware, a different cloud service, or a local machine. To integrate with the Vertex AI Evaluation SDK, you implement a Python function that knows how to communicate with your model's specific serving endpoint to generate responses for given prompts.\n",
        "\n",
        "\n",
        "\n",
        "Throughout this tutorial, we will use the `vertexai` SDK's new `Client` interface, which provides a streamlined way to interact with Vertex AI services, including the GenAI Evaluation Service.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5E1mN9wWT-Ol"
      },
      "source": [
        "### Costs\n",
        "\n",
        "This tutorial uses billable components of Google Cloud:\n",
        "\n",
        "- Vertex AI\n",
        "\n",
        "Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9yt-EPacUD4j"
      },
      "source": [
        "## Getting Started\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-zTjlxYMRAog"
      },
      "outputs": [],
      "source": [
        "# @title ### Install Vertex AI SDK for Gen AI Evaluation Service\n",
        "\n",
        "%pip install --upgrade \"google-cloud-aiplatform[evaluation]>=1.115.0\" litellm --force-reinstall --quiet --no-warn-conflicts"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "AWP0pW9hUOBR"
      },
      "outputs": [],
      "source": [
        "# @title ### Authenticate your notebook environment (Colab only)\n",
        "# @markdown If you're running this notebook on Google Colab, run the cell below to authenticate your environment.\n",
        "\n",
        "import sys\n",
        "\n",
        "if \"google.colab\" in sys.modules:\n",
        "    from google.colab import auth\n",
        "\n",
        "    auth.authenticate_user()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "LHpeZ2pWUQag"
      },
      "outputs": [],
      "source": [
        "# @title ### Set Google Cloud project information\n",
        "# @markdown To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
        "# @markdown  Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment).\n",
        "\n",
        "import os\n",
        "\n",
        "# fmt: off\n",
        "PROJECT_ID = \"\"  # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
        "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
        "    PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
        "LOCATION= \"us-central1\"  # @param {type: \"string\", placeholder: \"us-central1\", isTemplate: true}\n",
        "# fmt: on\n",
        "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", LOCATION)\n",
        "\n",
        "\n",
        "from vertexai import Client, types\n",
        "\n",
        "client = Client(project=PROJECT_ID, location=LOCATION)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zafdt1_uCIZ-"
      },
      "source": [
        "## Evaluating Third-Party Models via API\n",
        "\n",
        "The Vertex Gen AI Evaluation SDK allows you to evaluate a wide range of closed-source models from various third-party (3P) providers such as OpenAI, Anthropic, Cohere, and others. To connect to these models, you typically need to provide an API key for the respective service.\n",
        "\n",
        "The SDK simplifies this process by providing a unified way to call these different LLM APIs. You can generally use the same code structure, just changing the model identifier string and ensuring the correct API key is available in your environment.\n",
        "\n",
        "**Key Features:**\n",
        "\n",
        "*   **Broad Provider Support:** Evaluate models from many popular LLM providers.\n",
        "*   **Simplified Workflow:** Use a consistent method within the SDK to run inference, regardless of the 3P provider.\n",
        "*   **API Key Authentication:** Securely authenticate with each provider using their standard API keys.\n",
        "\n",
        "**Note:** You need to set the appropriate API keys as environment variables for the provider you intend to use (e.g., `OPENAI_API_KEY` for OpenAI models, `ANTHROPIC_API_KEY` for Anthropic models, etc.). Consult the specific provider's documentation for details on obtaining and setting API keys. For a list of compatible model names, you can refer to the [LiteLLM Supported Providers](https://docs.litellm.ai/docs/providers) page.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "rXLwn-9oDrzt"
      },
      "outputs": [],
      "source": [
        "# 1. Example using OpenAI\n",
        "LITELLM_MODEL_ID = \"gpt-5-mini\"\n",
        "# Make sure your OPENAI_API_KEY environment variable is set.\n",
        "# fmt: off\n",
        "os.environ[\"OPENAI_API_KEY\"] = \"\"  # @param {type:\"string\", placeholder: \"[your-openai-api-key]\"}\n",
        "# WARNING: Setting API keys directly in code is insecure. Use environment variables or secure storage.\n",
        "\n",
        "# Alternative, use your OPENAI_API_KEY from Colab Secrets manager\n",
        "from google.colab import userdata\n",
        "\n",
        "os.environ[\"OPENAI_API_KEY\"] = userdata.get(\"OPENAI_API_KEY\")\n",
        "\n",
        "\n",
        "# 2. Example for Anthropic:\n",
        "# LITELLM_MODEL_ID = \"anthropic/claude-3-5-sonnet-20240620\" # Example using Anthropic\n",
        "# os.environ[\"ANTHROPIC_API_KEY\"] = \"\" # @param {type:\"string\", placeholder: \"[your-anthropic-api-key]\"}\n",
        "# fmt: on\n",
        "\n",
        "\n",
        "# Run the evaluation\n",
        "openai_responses = client.evals.run_inference(\n",
        "    model=LITELLM_MODEL_ID,\n",
        "    src=\"gs://vertex-evaluation-llm-dataset-us-central1/genai_eval_sdk/test_prompts.jsonl\",\n",
        ")\n",
        "openai_responses.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "EGEYPLGcDt0K"
      },
      "outputs": [],
      "source": [
        "eval_result = client.evals.evaluate(\n",
        "    dataset=openai_responses,\n",
        "    metrics=[\n",
        "        types.RubricMetric.GENERAL_QUALITY,\n",
        "        types.RubricMetric.INSTRUCTION_FOLLOWING,\n",
        "        types.Metric(name=\"rouge_1\"),\n",
        "        types.Metric(name=\"bleu\"),\n",
        "    ],\n",
        ")\n",
        "eval_result.show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "yy7tcU_q0bVR"
      },
      "source": [
        "## Model as a Service (MaaS) from Model Garden\n",
        "\n",
        "Vertex AI Model Garden offers partner models, such as Llama from Meta, as fully managed services. These Model as a Service (MaaS) offerings allow you to use these models without needing to deploy or manage the underlying infrastructure. These MAAS endpoints often expose an OpenAI-compatible API for inference.\n",
        "\n",
        "The Vertex Gen AI Evaluation SDK can directly interact with these MAAS endpoints. You will typically need to use a service account with appropriate permissions to authenticate.\n",
        "\n",
        "**Key Features:**\n",
        "\n",
        "*   **Managed Endpoints:** No need to deploy or manage model serving infrastructure.\n",
        "*   **OpenAI-Compatible API:** Many MAAS models use a familiar API structure.\n",
        "*   **Integrated Evaluation:** Seamlessly evaluate these models within your Google Cloud environment using the Vertex AI GenAI Evaluation SDK.\n",
        "\n",
        "**Note:** Ensure your Google Cloud project has enabled the specific MAAS model API from the Model Garden and that your service account has the `Vertex AI User` role.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "d2efe43f"
      },
      "source": [
        "**Steps to set the environment variable `GOOGLE_APPLICATION_CREDENTIALS`:**\n",
        "\n",
        "1. [Create a service account key](https://cloud.google.com/iam/docs/keys-create-delete#creating) inside IAM & Admin from your Google Cloud Console.\n",
        "\n",
        "2. Upload the downloaded key in JSON format to this Colab notebook's runtime by clicking the folder icon on the left sidebar, then the \"Upload\" icon (a sheet of paper with an arrow pointing up). Select the JSON key file from your local computer.\n",
        "\n",
        "3. Once uploaded, right click on the file and select \"Copy Path\". You can refer to the file by its path in the notebook below."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "jZYWxTZ818Vs"
      },
      "outputs": [],
      "source": [
        "# fmt: off\n",
        "GOOGLE_APP_CRED_PATH = \"\"  # @param {type:\"string\", placeholder: \"[your-google-application-cred-file-path]\"}\n",
        "# fmt: on\n",
        "\n",
        "os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = GOOGLE_APP_CRED_PATH\n",
        "os.environ[\"VERTEXAI_PROJECT\"] = PROJECT_ID\n",
        "os.environ[\"VERTEXAI_LOCATION\"] = LOCATION"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "WzRvLm2kyoG6"
      },
      "outputs": [],
      "source": [
        "# @title Run inference on MaaS models to create eval dataset\n",
        "\n",
        "# Select a MaaS model. Remember to check regional availability!\n",
        "\n",
        "# model = \"deepseek-ai/deepseek-r1-0528-maas\" # Example model\n",
        "# model = \"meta/llama-3.1-70b-instruct-maas\"  # Example model\n",
        "# model = \"meta/llama-4-maverick-17b-128e-instruct-maas\"   # Example model in us-east5\n",
        "# model = \"claude-3-5-haiku\"  # Example model in us-east5\n",
        "# model = \"qwen/qwen3-coder-480b-a35b-instruct-maas\"  # Example model in us-south1\n",
        "\n",
        "# fmt: off\n",
        "MODEL_ID = \"deepseek-ai/deepseek-r1-0528-maas\"  # @param {type:\"string\"}\n",
        "# fmt: on\n",
        "\n",
        "eval_dataset = client.evals.run_inference(\n",
        "    model=MODEL_ID,\n",
        "    src=\"gs://vertex-evaluation-llm-dataset-us-central1/genai_eval_sdk/test_prompts.jsonl\",\n",
        ")\n",
        "eval_dataset.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ErRPHzOx2239"
      },
      "outputs": [],
      "source": [
        "maas_eval_result = client.evals.evaluate(\n",
        "    dataset=eval_dataset,\n",
        "    metrics=[\n",
        "        types.RubricMetric.GENERAL_QUALITY,\n",
        "        types.RubricMetric.INSTRUCTION_FOLLOWING,\n",
        "        types.Metric(name=\"rouge_1\"),\n",
        "        types.Metric(name=\"bleu\"),\n",
        "    ],\n",
        ")\n",
        "maas_eval_result.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "fdkfe9zpyn79"
      },
      "outputs": [],
      "source": [
        "# @title End-to-End Example: Evaluating 3P partner models for MaaS\n",
        "\n",
        "# Ensure GOOGLE_APPLICATION_CREDENTIALS, VERTEXAI_PROJECT, and VERTEXAI_LOCATION are set.\n",
        "os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = GOOGLE_APP_CRED_PATH\n",
        "os.environ[\"VERTEXAI_PROJECT\"] = PROJECT_ID\n",
        "os.environ[\"VERTEXAI_LOCATION\"] = LOCATION\n",
        "\n",
        "# Select a MaaS model. Remember to check regional availability!\n",
        "# model = \"deepseek-ai/deepseek-r1-0528-maas\" # Example model\n",
        "# model = \"meta/llama-3.1-70b-instruct-maas\"  # Example model\n",
        "# model = \"meta/llama-4-maverick-17b-128e-instruct-maas\"   # Example model in us-east5\n",
        "# model = \"claude-3-5-haiku\"  # Example model in us-east5\n",
        "# model = \"qwen/qwen3-coder-480b-a35b-instruct-maas\"  # Example model in us-south1\n",
        "\n",
        "MAAS_MODEL_ID = \"meta/llama-3.1-70b-instruct-maas\"  # Replace with the MaaS model you want to evaluate\n",
        "\n",
        "\n",
        "print(f\"--- Running Inference for MaaS Model: {MAAS_MODEL_ID} ---\")\n",
        "\n",
        "maas_responses = client.evals.run_inference(\n",
        "    model=MAAS_MODEL_ID,\n",
        "    src=\"gs://vertex-evaluation-llm-dataset-us-central1/genai_eval_sdk/test_prompts.jsonl\",\n",
        ")\n",
        "\n",
        "print(f\"\\n--- Running Evaluation for MaaS Model: {MAAS_MODEL_ID} ---\")\n",
        "maas_eval_result = client.evals.evaluate(\n",
        "    dataset=maas_responses,\n",
        "    metrics=[\n",
        "        types.RubricMetric.GENERAL_QUALITY,\n",
        "        types.RubricMetric.INSTRUCTION_FOLLOWING,\n",
        "        types.Metric(name=\"rouge_1\"),\n",
        "        types.Metric(name=\"bleu\"),\n",
        "    ],\n",
        ")\n",
        "print(\"Evaluation complete. Displaying report:\")\n",
        "maas_eval_result.show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5053b88e"
      },
      "source": [
        "## Bring Your Own Model (BYOM) Endpoint\n",
        "\n",
        "The Vertex Gen AI Evaluation SDK allows you to provide a generic Python function as input to specify how the model or application should be invoked for batch inference. This could be done through an endpoint or an SDK. This flexible approach accommodates a wide range of open and closed models, enabling you to evaluate models that you manage and serve independently."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "Eukfi_yV8V6H"
      },
      "outputs": [],
      "source": [
        "# @title #### Define your custom inference function\n",
        "# @markdown Here we use a one-cllick deployed `llama-4-maverick` model endpoint as an example.\n",
        "# @markdown When creating your own endpoint inference function, make sure to verify that\n",
        "# @markdown the dedicated endpoint is specified correctly in the inference function.\n",
        "\n",
        "\n",
        "import json\n",
        "import subprocess\n",
        "\n",
        "import requests\n",
        "\n",
        "# fmt: off\n",
        "PROJECT_ID_BYOM = \"\"  # @param {type:\"string\", placeholder: \"[your-project-id]\"}\n",
        "ENDPOINT_ID_BYOM = \"\"  # @param {type:\"string\", placeholder: \"[your-endpoint-id]\"}\n",
        "LOCATION_BYOM = \"us-central1\"  # @param {type:\"string\", placeholder: \"[your-location]\"}\n",
        "# fmt: on\n",
        "\n",
        "\n",
        "def custom_model_inference_fn(prompt: str) -> str | None:\n",
        "    \"\"\"Calls the specific Vertex AI endpoint with the given prompt using the requests library.\n",
        "\n",
        "    Args:\n",
        "        prompt (str): The input prompt for the model.\n",
        "\n",
        "    Returns:\n",
        "        str: The model's response content as a string, or None if an error occurs.\n",
        "    \"\"\"\n",
        "    project_id: str = PROJECT_ID_BYOM\n",
        "    endpoint_id: str = ENDPOINT_ID_BYOM\n",
        "    location: str = LOCATION_BYOM\n",
        "\n",
        "    # This is the direct URL from sample request\n",
        "    endpoint_url: str = f\"https://{endpoint_id}.{location}-410429375534.prediction.vertexai.goog/v1/projects/{project_id}/locations/{location}/endpoints/{endpoint_id}:predict\"\n",
        "    try:\n",
        "        token = subprocess.run(\n",
        "            [\"gcloud\", \"auth\", \"print-access-token\"],\n",
        "            capture_output=True,\n",
        "            text=True,\n",
        "            check=True,\n",
        "        ).stdout.strip()\n",
        "    except subprocess.CalledProcessError as e:\n",
        "        print(f\"Error getting gcloud access token: {e}\")\n",
        "        return None\n",
        "    headers = {\"Authorization\": f\"Bearer {token}\", \"Content-Type\": \"application/json\"}\n",
        "\n",
        "    # Construct the JSON payload in the chatCompletions format\n",
        "    payload = {\n",
        "        \"instances\": [\n",
        "            {\n",
        "                \"@requestFormat\": \"chatCompletions\",\n",
        "                \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
        "                \"max_tokens\": 100,\n",
        "            }\n",
        "        ]\n",
        "    }\n",
        "\n",
        "    try:\n",
        "        # Make the POST request\n",
        "        response = requests.post(\n",
        "            endpoint_url, headers=headers, data=json.dumps(payload)\n",
        "        )\n",
        "        response.raise_for_status()\n",
        "        json_response = response.json()\n",
        "\n",
        "        # Extract the content from the response\n",
        "        try:\n",
        "            return json_response[\"predictions\"][\"choices\"][0][\"message\"][\"content\"]\n",
        "        except (KeyError, IndexError, TypeError) as e:\n",
        "            print(\n",
        "                f\"Could not extract content from response: {e}. Response: {json_response}\"\n",
        "            )\n",
        "            return None\n",
        "\n",
        "    except requests.exceptions.RequestException as e:\n",
        "        print(f\"Error calling Vertex AI endpoint: {e}\")\n",
        "        return None"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "v8aLZB4u8gj2"
      },
      "outputs": [],
      "source": [
        "custom_model_inference_fn(\"hello\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1WfCydav52aL"
      },
      "outputs": [],
      "source": [
        "print(\"--- Running Inference for Bring Your Own Model (BYOM) Endpoint ---\")\n",
        "# run_inference generates responses for the prompts in eval_df\n",
        "vertex_endpoint_responses = client.evals.run_inference(\n",
        "    model=custom_model_inference_fn,\n",
        "    src=\"gs://vertex-evaluation-llm-dataset-us-central1/genai_eval_sdk/test_prompts.jsonl\",\n",
        ")\n",
        "vertex_endpoint_responses.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "S32PfUAin_Jd"
      },
      "outputs": [],
      "source": [
        "vertex_endpoint_eval_result = client.evals.evaluate(\n",
        "    dataset=vertex_endpoint_responses,\n",
        "    metrics=[\n",
        "        types.RubricMetric.GENERAL_QUALITY,\n",
        "        types.RubricMetric.INSTRUCTION_FOLLOWING,\n",
        "        types.Metric(name=\"rouge_1\"),\n",
        "        types.Metric(name=\"bleu\"),\n",
        "    ],\n",
        ")\n",
        "vertex_endpoint_eval_result.show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KfspSAhV1CEl"
      },
      "source": [
        "## Example: Comparing Multiple Models\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "yupFwcCX1c1m"
      },
      "source": [
        "The Vertex Gen AI Evaluation SDK makes it easy to compare the performance of multiple models on the same dataset and metrics. You can achieve this by running inference separately for each model and then passing the resulting `EvaluationDataset` objects as a list to the `client.evals.evaluate()` method.\n",
        "\n",
        "The evaluation service will then compute the specified metrics for each model's responses, allowing for a side-by-side comparison.\n",
        "\n",
        "**Note:** The following example uses a small subset of the data (10 rows) for demonstration purposes. This is not intended for rigorous benchmarking but to illustrate the comparison functionality.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wQF7SucFoBPk"
      },
      "outputs": [],
      "source": [
        "import os\n",
        "\n",
        "# Refer to instructions in MaaS section above for setting up credentials.\n",
        "# fmt: off\n",
        "GOOGLE_APP_CRED_PATH = \"\"  # @param {type:\"string\", placeholder: \"[your-google-application-cred-file-path]\"}\n",
        "# fmt: on\n",
        "\n",
        "os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = GOOGLE_APP_CRED_PATH\n",
        "os.environ[\"VERTEXAI_PROJECT\"] = PROJECT_ID\n",
        "os.environ[\"VERTEXAI_LOCATION\"] = LOCATION"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1XbtjOSriwpo"
      },
      "outputs": [],
      "source": [
        "import pandas as pd\n",
        "\n",
        "prompts_df = pd.DataFrame(\n",
        "    {\n",
        "        \"prompt\": [\n",
        "            \"Explain the difference between correlation and causation, and provide a real-world example where confusing the two could lead to poor decision-making.\",\n",
        "            \"Write a Python function that finds the longest palindromic substring in a given string. Include comments explaining your approach and time complexity.\",\n",
        "            \"A train leaves Station A at 9:00 AM traveling at 60 mph toward Station B. Another train leaves Station B at 10:00 AM traveling at 80 mph toward Station A. If the stations are 280 miles apart, at what time do the trains meet?\",\n",
        "            \"Analyze the ethical implications of using AI in hiring decisions. Present arguments from multiple perspectives and discuss potential safeguards.\",\n",
        "            \"Translate the following sentence to French, Spanish, and German, then explain any cultural nuances that might affect the translation: 'The early bird catches the worm, but the second mouse gets the cheese.'\",\n",
        "            \"Create a short story (200 words) that includes these elements: a mysterious package, a lighthouse keeper, and a revelation that changes everything. The story should have a clear beginning, middle, and end.\",\n",
        "            \"Compare and contrast the economic theories of Adam Smith and Karl Marx. How would each theorist likely view modern gig economy platforms like Uber?\",\n",
        "            \"Debug this code and explain what's wrong: def fibonacci(n): if n <= 1: return n else: return fibonacci(n-1) + fibonacci(n-2) + fibonacci(n-3)\",\n",
        "            \"You're a manager and an employee consistently delivers excellent work but is frequently late to meetings. Write a constructive feedback message addressing this issue while maintaining morale.\",\n",
        "            \"Explain how transformer architecture works in machine learning to someone with basic programming knowledge but no ML background. Use an analogy to clarify the concept of attention mechanisms.\",\n",
        "        ]\n",
        "    }\n",
        ")\n",
        "\n",
        "data_with_rubrics = client.evals.generate_rubrics(\n",
        "    src=prompts_df,\n",
        "    rubric_group_name=\"general_quality_rubrics\",\n",
        "    predefined_spec_name=types.RubricMetric.GENERAL_QUALITY,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "7ZznuG5TNFep"
      },
      "outputs": [],
      "source": [
        "# --- Model 1: Gemini 2.5 Flash ---\n",
        "gemini_dataset = client.evals.run_inference(\n",
        "    model=\"gemini-2.5-flash\",\n",
        "    src=data_with_rubrics,\n",
        ")\n",
        "\n",
        "# --- Model 2: DeepSeek MAAS Model ---\n",
        "deepseek_dataset = client.evals.run_inference(\n",
        "    model=\"deepseek-ai/deepseek-r1-0528-maas\",\n",
        "    src=data_with_rubrics,\n",
        ")\n",
        "\n",
        "# --- Model 3: Llama 3.1 MAAS Model ---\n",
        "llama_dataset = client.evals.run_inference(\n",
        "    model=\"meta/llama-3.1-70b-instruct-maas\",\n",
        "    src=data_with_rubrics,\n",
        ")\n",
        "\n",
        "# --- Run Comparison Evaluation ---\n",
        "comparison_eval_result = client.evals.evaluate(\n",
        "    dataset=[gemini_dataset, deepseek_dataset, llama_dataset],\n",
        "    metrics=[\n",
        "        types.RubricMetric.GENERAL_QUALITY(rubric_group_name=\"general_quality_rubrics\")\n",
        "    ],\n",
        ")\n",
        "comparison_eval_result.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oPktKo7jFw4w"
      },
      "outputs": [],
      "source": [
        "# --- Model 1: Gemini 2.5 Pro ---\n",
        "gemini_dataset = client.evals.run_inference(\n",
        "    model=\"gemini-2.5-pro\",\n",
        "    src=prompts_df,\n",
        ")\n",
        "\n",
        "# --- Model 2: OpenAI GPT Model ---\n",
        "openai_dataset = client.evals.run_inference(\n",
        "    model=\"gpt-5-mini\",\n",
        "    src=prompts_df,\n",
        ")\n",
        "\n",
        "# --- Model 3: DeepSeek MAAS Model ---\n",
        "deepseek_dataset = client.evals.run_inference(\n",
        "    model=\"deepseek-ai/deepseek-r1-0528-maas\",\n",
        "    src=prompts_df,\n",
        ")\n",
        "\n",
        "# --- Run Comparison Evaluation ---\n",
        "comparison_eval_result = client.evals.evaluate(\n",
        "    dataset=[gemini_dataset, openai_dataset, deepseek_dataset],\n",
        ")\n",
        "comparison_eval_result.show()"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "name": "evaluating_third_party_llms_vertex_ai_gen_ai_eval_sdk.ipynb",
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
