{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ur8xi4C7S06n"
      },
      "outputs": [],
      "source": [
        "# Copyright 2025 Google LLC\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JAPoU8Sm5E6e"
      },
      "source": [
        "# Vertex AI SFT Gemini Migration Recipe\n",
        "\n",
        "\n",
        "<table align=\"left\">\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://www.gstatic.com/pantheon/images/bigquery/welcome_page/colab-logo.svg\" alt=\"Google Colaboratory logo\"><br> Open in Colab\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://console.cloud.google.com/vertex-ai/colab/import/https:%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%2Fgenerative-ai%2Fmain%2Fgemini%2Ftuning%2Fsft_gemini_migration_recipe.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://lh3.googleusercontent.com/JmcxdQi-qOpctIvWKgPtrzZdJJK-J3sWE1RsfjZNwshCFgE_9fULcNpuXYTilIR2hjwN\" alt=\"Google Cloud Colab Enterprise logo\"><br> Open in Colab Enterprise\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\">\n",
        "      <img src=\"https://www.gstatic.com/images/branding/gcpiconscolors/vertexai/v1/32px.svg\" alt=\"Vertex AI logo\"><br> Open in Vertex AI Workbench\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://console.cloud.google.com/bigquery/import?url=https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\">\n",
        "      <img src=\"https://www.gstatic.com/images/branding/gcpiconscolors/bigquery/v1/32px.svg\" alt=\"BigQuery Studio logo\"><br> Open in BigQuery Studio\n",
        "    </a>\n",
        "  </td>\n",
        "  <td style=\"text-align: center\">\n",
        "    <a href=\"https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\">\n",
        "      <img width=\"32px\" src=\"https://raw.githubusercontent.com/primer/octicons/refs/heads/main/icons/mark-github-24.svg\" alt=\"GitHub logo\"><br> View on GitHub\n",
        "    </a>\n",
        "  </td>\n",
        "</table>\n",
        "\n",
        "<div style=\"clear: both;\"></div>\n",
        "\n",
        "<b>Share to:</b>\n",
        "\n",
        "<a href=\"https://www.linkedin.com/sharing/share-offsite/?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/8/81/LinkedIn_icon.svg\" alt=\"LinkedIn logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://bsky.app/intent/compose?text=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/7/7a/Bluesky_Logo.svg\" alt=\"Bluesky logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://twitter.com/intent/tweet?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/5/5a/X_icon_2.svg\" alt=\"X logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://reddit.com/submit?url=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://redditinc.com/hubfs/Reddit%20Inc/Brand/Reddit_Logo.png\" alt=\"Reddit logo\">\n",
        "</a>\n",
        "\n",
        "<a href=\"https://www.facebook.com/sharer/sharer.php?u=https%3A//github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/tuning/sft_gemini_migration_recipe.ipynb\" target=\"_blank\">\n",
        "  <img width=\"20px\" src=\"https://upload.wikimedia.org/wikipedia/commons/5/51/Facebook_f_logo_%282019%29.svg\" alt=\"Facebook logo\">\n",
        "</a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "84f0f73a0f76"
      },
      "source": [
        "| Author(s) |\n",
        "| --- |\n",
        "| [Jeremy Hao](https://github.com/zh272)\n",
        "| [Erwin Huizenga](https://github.com/erwinh85)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tvgnzT1CKxrO"
      },
      "source": [
        "## Overview\n",
        "\n",
        "This colab provides migration recipe for Gemini managed tuning on the following migration pairs:\n",
        "\n",
        "- `gemini-1.5-flash-002` -> `gemini-2.5-flash`\n",
        "\n",
        "Gemini tuning supports the following hyperparameters:\n",
        "\n",
        "- **Epochs**: The number of complete passes the model makes over the entire training dataset during training.\n",
        "- **Adapter size**: The Adapter size to use for the tuning job. The adapter size influences the number of trainable parameters for the tuning job. A larger adapter size implies that the model can learn more complex tasks, but it requires a larger training dataset and longer training times.\n",
        "- **Learning Rate Multiplier**: A multiplier to apply to the recommended learning rate. You can increase the value to converge faster, or decrease the value to avoid overfitting.\n",
        "\n",
        "The values for these hyperparameters impacts the quality of tuned models. Because of model architecture change and tuning infra changes, we do NOT recommend to apply the same hyperparameters from legacy Gemini models to the latest Gemini models.\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "Note that the provided migration recipe below is for those tuning jobs whose hyper-parameters were set EXPLICITLY on legacy models. In cases where the tuning job to be migrated used default hyper-parameters (i.e. you did not specify hyperparameters explicitly), you can rely on the API or SDK to populate new default values for you on new models."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "61RBz8LLbxCR"
      },
      "source": [
        "## Get started"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "No17Cw5hgx12"
      },
      "source": [
        "### Install Google Gen AI SDK and other required packages\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tFy3H3aPgx12"
      },
      "outputs": [],
      "source": [
        "%pip install --upgrade --quiet google-genai"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dmWOrTJ3gx13"
      },
      "source": [
        "### Authenticate your notebook environment (Colab only)\n",
        "\n",
        "If you're running this notebook on Google Colab, run the cell below to authenticate your environment."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "NyKGtVQjgx13"
      },
      "outputs": [],
      "source": [
        "import sys\n",
        "\n",
        "if \"google.colab\" in sys.modules:\n",
        "    from google.colab import auth\n",
        "\n",
        "    auth.authenticate_user()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "DF4l8DTdWgPY"
      },
      "source": [
        "### Set Google Cloud project information\n",
        "\n",
        "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
        "\n",
        "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Nqwi-5ufWp_B"
      },
      "outputs": [],
      "source": [
        "# Use the environment variable if the user doesn't provide Project ID.\n",
        "import os\n",
        "\n",
        "PROJECT_ID = \"[your-project-id]\"  # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
        "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
        "    PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
        "\n",
        "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"global\")\n",
        "\n",
        "from google import genai\n",
        "\n",
        "client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "### Import libraries"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "from IPython.display import Markdown, display"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "6fc324893334"
      },
      "outputs": [],
      "source": [
        "# @title library\n",
        "import math\n",
        "import json\n",
        "from google.genai import types\n",
        "\n",
        "def get_tuning_job_hparams(tuning_job):\n",
        "  job_specs = {}\n",
        "  job_specs['state'] = tuning_job.state\n",
        "  job_specs['base_model'] = tuning_job.base_model\n",
        "  job_specs['tuned_model_display_name'] = tuning_job.tuned_model_display_name\n",
        "\n",
        "  supervised_tuning_spec = tuning_job.supervised_tuning_spec\n",
        "  job_specs['training_dataset_uri'] = supervised_tuning_spec.training_dataset_uri\n",
        "  job_specs['validation_dataset_uri'] = supervised_tuning_spec.validation_dataset_uri\n",
        "\n",
        "  hparams = supervised_tuning_spec.hyper_parameters\n",
        "  job_specs['adapter_size'] = hparams.adapter_size\n",
        "  job_specs['epoch_count'] = hparams.epoch_count\n",
        "  job_specs['learning_rate_multiplier'] = hparams.learning_rate_multiplier\n",
        "\n",
        "  sft_data_stats = tuning_job.tuning_data_stats.supervised_tuning_data_stats\n",
        "  job_specs['sft_total_tokens'] = sft_data_stats.total_billable_token_count\n",
        "  job_specs['sft_num_examples'] = sft_data_stats.tuning_dataset_example_count\n",
        "  job_specs['sft_avg_seq_len'] = job_specs['sft_total_tokens'] / job_specs['sft_num_examples']\n",
        "  # WARNING: API does not give max_seq_len, so using max_input_len+max_output_len as a surrogate, but this may be inaccurate.\n",
        "  job_specs['sft_max_seq_len'] = sft_data_stats.user_input_token_distribution.max + sft_data_stats.user_output_token_distribution.max\n",
        "\n",
        "  return job_specs\n",
        "\n",
        "\n",
        "def epoch_1_5_flash_to_2_5_flash_short_context(old_epochs: int):\n",
        "  new_epochs = old_epochs\n",
        "  if 10< old_epochs < 80:\n",
        "    new_epochs = math.ceil(10 + 6 * (old_epochs-10) / 70)\n",
        "  elif old_epochs >= 80:\n",
        "    new_epochs = 0.2 * old_epochs\n",
        "  return math.ceil(new_epochs)\n",
        "\n",
        "def lrm_1_5_flash_to_2_5_flash(old_lrm: float):\n",
        "  max_lrm = 10\n",
        "  new_lrm = math.log(1+old_lrm)/math.log(2)\n",
        "  new_lrm = min(max_lrm, new_lrm)\n",
        "  return round(new_lrm, 1)\n",
        "\n",
        "def gemini_1_5_flash_to_gemini_2_5_flash(old_specs: dict):\n",
        "  if old_specs.get('base_model') != 'gemini-1.5-flash-002':\n",
        "    return None\n",
        "  new_specs = {\n",
        "      'base_model': 'gemini-2.5-flash',\n",
        "      'tuned_model_display_name': old_specs['tuned_model_display_name'] + '_migrated_gemini_2_5_flash',\n",
        "      'training_dataset_uri': old_specs['training_dataset_uri'],\n",
        "      'validation_dataset_uri': old_specs['validation_dataset_uri'],\n",
        "  }\n",
        "  if 'adapter_size' in old_specs:\n",
        "    new_specs['adapter_size'] = old_specs['adapter_size']\n",
        "\n",
        "  if 'epoch_count' in old_specs:\n",
        "    old_epochs = old_specs['epoch_count']\n",
        "    if old_specs['sft_avg_seq_len'] <= 700 and old_specs['sft_max_seq_len'] <= 8192 and old_specs['sft_num_examples'] < 10000:\n",
        "      new_epochs = epoch_1_5_flash_to_2_5_flash_short_context(old_epochs)\n",
        "    elif old_specs['sft_avg_seq_len'] <= 3000 and old_specs['sft_max_seq_len'] > 8192:\n",
        "      new_epochs = epoch_1_5_flash_to_2_5_flash_short_context(old_epochs)\n",
        "    else:\n",
        "      new_epochs = old_epochs\n",
        "    new_specs['epoch_count'] = new_epochs\n",
        "\n",
        "  if 'learning_rate_multiplier' in old_specs:\n",
        "    new_specs['learning_rate_multiplier'] = lrm_1_5_flash_to_2_5_flash(\n",
        "        old_specs['learning_rate_multiplier']\n",
        "    )\n",
        "  return new_specs\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EdvJRUWRNGHE"
      },
      "source": [
        "## Example"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "cf93d5f0ce00"
      },
      "outputs": [],
      "source": [
        "# @title Load legacy tuning job\n",
        "\n",
        "legacy_tuning_job_number = \"[your-tuning-job-number]\"  # @param {type:\"string\"}\n",
        "if not legacy_tuning_job_number or legacy_tuning_job_number == \"[your-tuning-job-number]\":\n",
        "  raise Exception(\"Please provide a tuning job number\")\n",
        "\n",
        "legacy_tuning_job_name = f'projects/{PROJECT_NUMBER}/locations/{LOCATION}/tuningJobs/{legacy_tuning_job_number}'\n",
        "print('Double check tuning job name:', legacy_tuning_job_name)\n",
        "legacy_tuning_job = client.tunings.get(name=legacy_tuning_job_name)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ERUq7IrBOSKn"
      },
      "outputs": [],
      "source": [
        "# @title gemini-1.5-flash-002 --> gemini-2.5-flash\n",
        "\n",
        "legacy_job_specs = get_tuning_job_hparams(legacy_tuning_job)\n",
        "new_job_specs = gemini_1_5_flash_to_gemini_2_5_flash(legacy_job_specs)\n",
        "print('[1.5-flash tuning hparams]:')\n",
        "print(json.dumps(legacy_job_specs, indent=2, default=str))\n",
        "print('[2.5-flash tuning hparams]:')\n",
        "print(json.dumps(new_job_specs, indent=2, default=str))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "LGtyr2d3Omr-"
      },
      "outputs": [],
      "source": [
        "# @title Create tuning job with new base_model\n",
        "\n",
        "tuning_job = client.tunings.tune(\n",
        "    base_model=new_job_specs['base_model'],\n",
        "    training_dataset=types.TuningDataset(\n",
        "        gcs_uri=new_job_specs['training_dataset_uri'],\n",
        "    ),\n",
        "    config=types.CreateTuningJobConfig(\n",
        "        epoch_count=new_job_specs['epoch_count'],\n",
        "        adapter_size=new_job_specs['adapter_size'],\n",
        "        learning_rate_multiplier=new_job_specs['learning_rate_multiplier'],\n",
        "        tuned_model_display_name=new_job_specs['tuned_model_display_name'],\n",
        "        validation_dataset=types.TuningValidationDataset(\n",
        "            gcs_uri=new_job_specs['validation_dataset_uri'],\n",
        "        ),\n",
        "    ),\n",
        ")\n",
        "print(tuning_job)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
