{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 31,
      "id": "cS9eLXxJMfgS",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 6448,
          "status": "ok",
          "timestamp": 1731428757689,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "cS9eLXxJMfgS",
        "outputId": "eeafe387-3d87-4bb6-bc0e-8a8b4fece6b6"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Collecting rapidfuzz\n",
            "  Downloading rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n",
            "Downloading rapidfuzz-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)\n",
            "\u001b[?25l   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/3.1 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K   \u001b[91m━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.3/3.1 MB\u001b[0m \u001b[31m8.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K   \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━\u001b[0m \u001b[32m2.6/3.1 MB\u001b[0m \u001b[31m38.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m35.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hInstalling collected packages: rapidfuzz\n",
            "Successfully installed rapidfuzz-3.10.1\n"
          ]
        }
      ],
      "source": [
        "!pip install rapidfuzz"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "aG6W5wWi8yv1",
      "metadata": {
        "id": "aG6W5wWi8yv1"
      },
      "source": [
        "First, visit [Langsmith](https://smith.langchain.com/) web page, register and create an API key, please. A free tier will be enough."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "WbZXO26jKx-b",
      "metadata": {
        "id": "WbZXO26jKx-b"
      },
      "outputs": [],
      "source": [
        "LANGCHAIN_TRACING_V2 = True\n",
        "LANGCHAIN_ENDPOINT = \"https://api.smith.langchain.com\"\n",
        "LANGCHAIN_PROJECT = \"test\"\n",
        "LANGCHAIN_API_KEY = \"YOUR_API_KEY\""
      ]
    },
    {
      "cell_type": "markdown",
      "id": "_hvNoD5F8eis",
      "metadata": {
        "id": "_hvNoD5F8eis"
      },
      "source": [
        "## RAG with LangSmith"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "YycPytQhLBj6",
      "metadata": {
        "id": "YycPytQhLBj6"
      },
      "source": [
        "Let's use the vectorstore we created in Chapter 6:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "hEXLA20oK1ug",
      "metadata": {
        "id": "hEXLA20oK1ug"
      },
      "outputs": [],
      "source": [
        "project = \"YOUR_PROJECT\"\n",
        "location = \"us-central1\"\n",
        "bucket_name = \"YOUR_BUCKET_NAME_FOR_VECTORSTORE\""
      ]
    },
    {
      "cell_type": "markdown",
      "id": "Bois-OkIKu-6",
      "metadata": {
        "id": "Bois-OkIKu-6"
      },
      "source": [
        "First, let's use get the corresponding Vertex Vector Search index (feel free to change the name):"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "id": "eLivqHPG8gSY",
      "metadata": {
        "executionInfo": {
          "elapsed": 310,
          "status": "ok",
          "timestamp": 1731428502582,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "eLivqHPG8gSY"
      },
      "outputs": [],
      "source": [
        "project = \"kuligin-sandbox1\"\n",
        "location = \"us-central1\"\n",
        "bucket_name = \"kuligin-sandbox1\""
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "id": "rj7cjRY88ygN",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2283,
          "status": "ok",
          "timestamp": 1731428508283,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "rj7cjRY88ygN",
        "outputId": "4b940388-bc7c-422b-90ad-d9f42d5eb434"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "multimodal_example_endpoint\n",
            "multimodal_example_lc\n"
          ]
        }
      ],
      "source": [
        "from google.cloud import aiplatform\n",
        "\n",
        "for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list():\n",
        "  if index_endpoint.display_name == \"multimodal_example_endpoint\":\n",
        "    break\n",
        "\n",
        "print(index_endpoint.display_name)\n",
        "index_endpoint_name = index_endpoint.name\n",
        "\n",
        "for index in index_endpoint.deployed_indexes:\n",
        "  if index.id == \"multimodal_example_lc\":\n",
        "    break\n",
        "\n",
        "print(index.id)\n",
        "\n",
        "index_name = index.index"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "Y-yCBEtRLQHA",
      "metadata": {
        "id": "Y-yCBEtRLQHA"
      },
      "source": [
        "Now let's create a RAG, run it and visit Langsmith UI to explore the tracing collected:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "id": "4FrQZPqi8iw2",
      "metadata": {
        "executionInfo": {
          "elapsed": 2063,
          "status": "ok",
          "timestamp": 1731428511566,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "4FrQZPqi8iw2"
      },
      "outputs": [],
      "source": [
        "from langchain_google_vertexai import VectorSearchVectorStore\n",
        "from langchain_google_vertexai import VertexAIEmbeddings, ChatVertexAI\n",
        "from langchain.prompts import PromptTemplate\n",
        "from langchain_core.runnables import RunnablePassthrough\n",
        "from langchain_core.output_parsers import StrOutputParser\n",
        "\n",
        "\n",
        "prompt = PromptTemplate(\n",
        "    input_variables=[\"question\", \"context\"],\n",
        "    template=\"Answer the question\\n{question}\\ngiven the following context:\\n{context}\\n.\",\n",
        ")\n",
        "\n",
        "vectorstore = VectorSearchVectorStore.from_components(\n",
        "    project_id=project,\n",
        "    region=location,\n",
        "    gcs_bucket_name=bucket_name,\n",
        "    index_id=index_name,\n",
        "    endpoint_id=index_endpoint.name,\n",
        "    embedding=VertexAIEmbeddings(model_name=\"textembedding-gecko@003\"),\n",
        "    stream_update=True\n",
        ")\n",
        "\n",
        "retriever = vectorstore.as_retriever(search_kwargs={\"k\": 10})\n",
        "\n",
        "def format_docs(docs):\n",
        "  f = \"\\n\".join(\"page: {0}\\n{1}\".format(doc.metadata.get(\"page\", 0), doc.page_content) for doc in docs)\n",
        "  print(docs[0].metadata)\n",
        "  return f\n",
        "\n",
        "\n",
        "chain_rag = (\n",
        "    {\n",
        "        \"context\": retriever | format_docs,\n",
        "        \"question\": RunnablePassthrough(),\n",
        "    }\n",
        "    | prompt\n",
        "    | ChatVertexAI(\n",
        "        temperature=0, model_name=\"gemini-pro\", max_output_tokens=1024)\n",
        "    | StrOutputParser()\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "id": "zXYhrNr-9Pak",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 53
        },
        "executionInfo": {
          "elapsed": 1642,
          "status": "ok",
          "timestamp": 1731428514227,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "zXYhrNr-9Pak",
        "outputId": "be1a0b5c-822c-4119-aef2-e4f98761986b"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'source': 'gen-app-builder/search/alphabet-investor-pdfs/2022_alphabet_annual_report.pdf', 'page': 66, 'element': 'text'}\n"
          ]
        },
        {
          "data": {
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            },
            "text/plain": [
              "\"The answer to your question is not directly provided in the text. However, the text does state that Alphabet's revenue in 2022 was $282.8 billion.\""
            ]
          },
          "execution_count": 5,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "chain_rag.invoke(\"What was Alphabet's revenue in 2022?\")"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "Ca2V8QOVLUfC",
      "metadata": {
        "id": "Ca2V8QOVLUfC"
      },
      "source": [
        "Visit Langsmith and explore how the traces look!"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "6mOxdRE-9kgc",
      "metadata": {
        "id": "6mOxdRE-9kgc"
      },
      "source": [
        "## Using LangchainHub"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "5eb60f50",
      "metadata": {},
      "source": [
        "We can now do the same using RAG from LangchainHub:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "tt9dBS7I9mZ9",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        },
        "executionInfo": {
          "elapsed": 2361,
          "status": "ok",
          "timestamp": 1731428675976,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "tt9dBS7I9mZ9",
        "outputId": "e7e74901-d413-49ab-cd0f-834dbbb40266"
      },
      "outputs": [],
      "source": [
        "from langchain import hub\n",
        "prompt_v1 = hub.pull(\"rlm/rag-prompt\")\n",
        "\n",
        "chain_rag_v1 = (\n",
        "    {\n",
        "        \"context\": retriever | format_docs,\n",
        "        \"question\": RunnablePassthrough(),\n",
        "    }\n",
        "    | prompt_v1\n",
        "    | ChatVertexAI(\n",
        "        temperature=0, model_name=\"gemini-pro\", max_output_tokens=1024)\n",
        "    | StrOutputParser()\n",
        ")\n",
        "chain_rag_v1.invoke(\"What was Alphabet's revenue in 2022?\")"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "wzC9Me-LKvll",
      "metadata": {
        "id": "wzC9Me-LKvll"
      },
      "source": [
        "# Pointwise evaluators"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "e3996319",
      "metadata": {},
      "source": [
        "Let's explore pointwise evaluators available on LangChain and how to use them with Gemini."
      ]
    },
    {
      "cell_type": "markdown",
      "id": "xz_O2nNQj3N3",
      "metadata": {
        "id": "xz_O2nNQj3N3"
      },
      "source": [
        "### qa evalutors"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "fbac527d",
      "metadata": {},
      "source": [
        "We'll start with evaluators for Question & Answering:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "id": "sOfesLBJczjk",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 5027,
          "status": "ok",
          "timestamp": 1731428681786,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "sOfesLBJczjk",
        "outputId": "d773e99c-c0cc-45c0-f58f-53a82f5cdee9"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'results': 'GRADE: CORRECT \\n'}\n"
          ]
        }
      ],
      "source": [
        "from langchain.evaluation import load_evaluator\n",
        "from langchain_google_vertexai import ChatVertexAI\n",
        "llm = ChatVertexAI(model_name=\"gemini-1.5-pro-001\")\n",
        "\n",
        "evaluator_qa = load_evaluator(\"qa\", llm=llm)\n",
        "example = {\n",
        "    \"query\": \"What is the capital of Germany?\",\n",
        "    \"answer\": \"Berlin is the capital of Germany.\"}\n",
        "prediction = {\"result\": \"Berlin.\"}\n",
        "results = evaluator_qa.evaluate(\n",
        "    examples=[example],\n",
        "    predictions=[prediction]\n",
        "\n",
        ")\n",
        "print(results[0])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 8,
      "id": "Gw5zVLPG4uyE",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 441,
          "status": "ok",
          "timestamp": 1731428682225,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "Gw5zVLPG4uyE",
        "outputId": "1f2f29bb-d8b2-4c73-8f6a-5f6566269c38"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "LLMResult(generations=[[ChatGeneration(text='GRADE: CORRECT \\n', generation_info={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}], 'usage_metadata': {'prompt_token_count': 159, 'candidates_token_count': 5, 'total_token_count': 164, 'cached_content_token_count': 0}, 'finish_reason': 'STOP', 'avg_logprobs': -0.00013375983107835054, 'logprobs_result': {'top_candidates': [], 'chosen_candidates': []}}, message=AIMessage(content='GRADE: CORRECT \\n', additional_kwargs={}, response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False, 'severity': 'HARM_SEVERITY_NEGLIGIBLE'}], 'usage_metadata': {'prompt_token_count': 159, 'candidates_token_count': 5, 'total_token_count': 164, 'cached_content_token_count': 0}, 'finish_reason': 'STOP', 'avg_logprobs': -0.00013375983107835054, 'logprobs_result': {'top_candidates': [], 'chosen_candidates': []}}, id='run-2f57c5f7-7ae0-42d0-8a6b-a46eb5c8b652-0', usage_metadata={'input_tokens': 159, 'output_tokens': 5, 'total_tokens': 164}))]], llm_output={}, run=[RunInfo(run_id=UUID('2f57c5f7-7ae0-42d0-8a6b-a46eb5c8b652'))], type='LLMResult')"
            ]
          },
          "execution_count": 8,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "inputs = [\n",
        "  {\n",
        "    \"query\": \"What is the capital of Germany?\",\n",
        "    \"answer\": \"Berlin is the capital of Germany.\",\n",
        "    \"result\": \"Berlin.\"\n",
        "    }]\n",
        "evaluator_qa.generate(inputs)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 9,
      "id": "iHJ98btz2rkr",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 655,
          "status": "ok",
          "timestamp": 1731428682879,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "iHJ98btz2rkr",
        "outputId": "6d512b05-49e9-4fea-d466-f56c6d32d644"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'reasoning': 'GRADE: CORRECT', 'value': 'CORRECT', 'score': 1}\n"
          ]
        }
      ],
      "source": [
        "result = evaluator_qa.evaluate_strings(\n",
        "    input=\"What is the capital of Germany?\",\n",
        "    reference=\"Berlin is the capital of Germany.\",\n",
        "    prediction=\"Berlin.\"\n",
        "\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "99af816b",
      "metadata": {},
      "source": [
        "We can specify which evaluator to use:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 10,
      "id": "6Kz9V7dD1JxR",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 841,
          "status": "ok",
          "timestamp": 1731428683719,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "6Kz9V7dD1JxR",
        "outputId": "04047575-8f1e-47c3-f55a-0ad6359da7ea"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'reasoning': 'EXPLANATION: The student answer matches the capital provided in the context. \\n\\nGRADE: CORRECT', 'value': 'CORRECT', 'score': 1}\n"
          ]
        }
      ],
      "source": [
        "evaluator_cotqa = load_evaluator(\"cot_qa\", llm=llm)\n",
        "result_cot = evaluator_cotqa.evaluate_strings(\n",
        "    input=\"What is the capital of Germany?\",\n",
        "    reference=\"Berlin is the capital of Germany.\",\n",
        "    prediction=\"Berlin.\"\n",
        "\n",
        ")\n",
        "print(result_cot)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 11,
      "id": "BVmSpRBT1TP-",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 325,
          "status": "ok",
          "timestamp": 1731428684042,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "BVmSpRBT1TP-",
        "outputId": "6a806ea3-0e75-4ca0-da4f-b0aa78868378"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'text': 'GRADE: CORRECT \\n'}\n"
          ]
        }
      ],
      "source": [
        "evaluator_contextqa = load_evaluator(\"context_qa\", llm=llm)\n",
        "example = {\n",
        "    \"query\": \"What is the capital?\",\n",
        "    \"context\": \"The question is about Germany.\"}\n",
        "prediction = {\"result\": \"Berlin.\"}\n",
        "results = evaluator_contextqa.evaluate(\n",
        "    examples=[example],\n",
        "    predictions=[prediction]\n",
        "\n",
        ")\n",
        "print(results[0])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 12,
      "id": "sBF6BnijOBdy",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 748,
          "status": "ok",
          "timestamp": 1731428684789,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "sBF6BnijOBdy",
        "outputId": "7fb7a550-2114-4397-ff6a-a89d31383a37"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'reasoning': 'GRADE: CORRECT', 'value': 'CORRECT', 'score': 1}\n"
          ]
        }
      ],
      "source": [
        "result_context = evaluator_contextqa.evaluate_strings(\n",
        "    input=\"What is the capital?\",\n",
        "    reference=\"The question is about Germany.\",\n",
        "    prediction=\"Berlin.\"\n",
        "\n",
        ")\n",
        "print(result_context)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "fde8e703",
      "metadata": {},
      "source": [
        "And we can explore the prompt that is used by the corresponding evluator:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 13,
      "id": "q3T87adzlGFH",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2,
          "status": "ok",
          "timestamp": 1731428684790,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "q3T87adzlGFH",
        "outputId": "13d86f4c-8395-4035-bb54-67e958c0b93c"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "PromptTemplate(input_variables=['context', 'query', 'result'], input_types={}, partial_variables={}, template=\"You are a teacher grading a quiz.\\nYou are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.\\n\\nExample Format:\\nQUESTION: question here\\nCONTEXT: context the question is about here\\nSTUDENT ANSWER: student's answer here\\nGRADE: CORRECT or INCORRECT here\\n\\nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \\n\\nQUESTION: {query}\\nCONTEXT: {context}\\nSTUDENT ANSWER: {result}\\nGRADE:\")"
            ]
          },
          "execution_count": 13,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "evaluator_contextqa.prompt"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "kDgNPCfeaUq5",
      "metadata": {
        "id": "kDgNPCfeaUq5"
      },
      "source": [
        "### score_string evaluator"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "186b4c14",
      "metadata": {},
      "source": [
        "Let's look at evaluator that computes scores between reference and prediction strings:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "f59hSrOmaTo5",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 1657,
          "status": "ok",
          "timestamp": 1731428690766,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "f59hSrOmaTo5",
        "outputId": "dd30e691-95fc-4498-ddc4-857c47553489"
      },
      "outputs": [],
      "source": [
        "evaluator_scorestr = load_evaluator(\"score_string\", llm=llm)\n",
        "\n",
        "result = evaluator_scorestr.evaluate_strings(\n",
        "    input=\"What is the capital of Germany?\",\n",
        "    reference=\"Berlin is the capital of Germany.\",\n",
        "    prediction=\"Berlin.\"\n",
        "\n",
        ")\n",
        "print(result[\"score\"])"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "658341ac",
      "metadata": {},
      "source": [
        "Let's take a look at criterui available:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 15,
      "id": "YTmK3KK_Ydyk",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 315,
          "status": "ok",
          "timestamp": 1731428693322,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "YTmK3KK_Ydyk",
        "outputId": "2f8bcb17-727f-495b-ebbe-55c69616aa09"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "conciseness\n",
            "relevance\n",
            "correctness\n",
            "coherence\n",
            "harmfulness\n",
            "maliciousness\n",
            "helpfulness\n",
            "controversiality\n",
            "misogyny\n",
            "criminality\n",
            "insensitivity\n",
            "depth\n",
            "creativity\n",
            "detail\n"
          ]
        }
      ],
      "source": [
        "from langchain.evaluation import Criteria\n",
        "\n",
        "for c in Criteria:\n",
        "  print(c.value)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 16,
      "id": "NH_NU2KLX_dJ",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 318,
          "status": "ok",
          "timestamp": 1731428696024,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "NH_NU2KLX_dJ",
        "outputId": "ac1f5e06-25bf-48ae-eab8-c1d96bf9542f"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'creativity': 'Does the submission demonstrate novelty or unique ideas?'}\n"
          ]
        }
      ],
      "source": [
        "from langchain.evaluation.scoring.eval_chain import resolve_criteria\n",
        "\n",
        "print(resolve_criteria(\"creativity\"))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 17,
      "id": "-e5c67phYNld",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 3,
          "status": "ok",
          "timestamp": 1731428697001,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "-e5c67phYNld",
        "outputId": "bc88938c-b294-4145-a5b0-1cb3494cbf9b"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'helpfulness': 'Is the submission helpful, insightful, and appropriate?', 'relevance': 'Is the submission referring to a real quote from the text?', 'correctness': 'Is the submission correct, accurate, and factual?', 'depth': 'Does the submission demonstrate depth of thought?'}\n"
          ]
        }
      ],
      "source": [
        "print(resolve_criteria(None))"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "OgJKmDu8xNBi",
      "metadata": {
        "id": "OgJKmDu8xNBi"
      },
      "source": [
        "### criteria evaluator"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "q9sQuhYxaZ7y",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 1866,
          "status": "ok",
          "timestamp": 1731428701034,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "q9sQuhYxaZ7y",
        "outputId": "170c5e58-c8e8-4806-aa4a-33ca6db2760e"
      },
      "outputs": [],
      "source": [
        "evaluator_cr = load_evaluator(\"criteria\", llm=llm)\n",
        "\n",
        "result = evaluator_cr.evaluate_strings(\n",
        "    input=\"What is the capital of Germany?\",\n",
        "    reference=\"Berlin is the capital of Germany.\",\n",
        "    prediction=\"Berlin.\"\n",
        "\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 19,
      "id": "s1XhemqexYDf",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2,
          "status": "ok",
          "timestamp": 1731428701034,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "s1XhemqexYDf",
        "outputId": "f5a3875f-4768-43c3-a0e8-9d94d18ba34d"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "PromptTemplate(input_variables=['input', 'output'], input_types={}, partial_variables={'criteria': 'helpfulness: Is the submission helpful, insightful, and appropriate? If so, respond Y. If not, respond N.'}, template='You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:\\n[BEGIN DATA]\\n***\\n[Input]: {input}\\n***\\n[Submission]: {output}\\n***\\n[Criteria]: {criteria}\\n***\\n[END DATA]\\nDoes the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character \"Y\" or \"N\" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line.')"
            ]
          },
          "execution_count": 19,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "evaluator_cr.prompt"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 20,
      "id": "5yxAvwF_TcB7",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 3,
          "status": "ok",
          "timestamp": 1731428701971,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "5yxAvwF_TcB7",
        "outputId": "4a3d052d-f3c5-4095-d35c-7dc99843e820"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "ChatPromptTemplate(input_variables=['input', 'prediction'], input_types={}, partial_variables={'reference': '', 'criteria': 'For this evaluation, you should primarily consider the following criteria:\\nhelpfulness: Is the submission helpful, insightful, and appropriate?\\nrelevance: Is the submission referring to a real quote from the text?\\ncorrectness: Is the submission correct, accurate, and factual?\\ndepth: Does the submission demonstrate depth of thought?\\n'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant.'), additional_kwargs={}), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['criteria', 'input', 'prediction'], input_types={}, partial_variables={}, template='[Instruction]\\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. {criteria}Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\\n\\n[Question]\\n{input}\\n\\n[The Start of Assistant\\'s Answer]\\n{prediction}\\n[The End of Assistant\\'s Answer]'), additional_kwargs={})])"
            ]
          },
          "execution_count": 20,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "evaluator_scorestr.prompt"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "9EU0pPxsJ5bl",
      "metadata": {
        "id": "9EU0pPxsJ5bl"
      },
      "source": [
        "### Vertex evaluators"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "19f2f596",
      "metadata": {},
      "source": [
        "Set up the numerical project id:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 21,
      "id": "nKczOjjOKuTy",
      "metadata": {
        "executionInfo": {
          "elapsed": 358,
          "status": "ok",
          "timestamp": 1731428703896,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "nKczOjjOKuTy"
      },
      "outputs": [],
      "source": [
        "PROJECT_ID = \"YOUR_PROJECT_ID\""
      ]
    },
    {
      "cell_type": "markdown",
      "id": "e63ba005",
      "metadata": {},
      "source": [
        "Let's start with a simple evaluator that computes a score:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 22,
      "id": "RF-GDzgRKV4S",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 303,
          "status": "ok",
          "timestamp": 1731428705525,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "RF-GDzgRKV4S",
        "outputId": "350b5e95-960b-40b5-e5ce-58bc3c536540"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "[{'score': 0.29071537}, {'score': 1.0}, {'score': 0.488923}]\n"
          ]
        }
      ],
      "source": [
        "from langchain_google_vertexai import VertexStringEvaluator\n",
        "\n",
        "evaluator = VertexStringEvaluator(\n",
        "        metric=\"bleu\", project_id=PROJECT_ID\n",
        "    )\n",
        "result = evaluator.evaluate(\n",
        "    examples=[\n",
        "        {\"reference\": \"Berlin is the capital of Germany.\"},\n",
        "        {\"reference\": \"London is the capital of Britain.\"},\n",
        "        {\"reference\": \"London is the capital of Britain.\"},\n",
        "    ],\n",
        "    predictions=[\n",
        "        {\"prediction\": \"The capital of Germany is Berlin.\"},\n",
        "        {\"prediction\": \"London is the capital of Britain.\"},\n",
        "        {\"prediction\": \"London is a capital of Britain.\"},\n",
        "    ],\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "610e03e5",
      "metadata": {},
      "source": [
        "Now let's compute question-answering relevance using VertexAI Evaluation service:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 23,
      "id": "h3NAhqdiAAgP",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2899,
          "status": "ok",
          "timestamp": 1731428710084,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "h3NAhqdiAAgP",
        "outputId": "4c910181-779a-44b5-8f5f-36c1d0ef0cd6"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'score': 1.0, 'explanation': 'STEP 1: Assess relevance: the response does not address the instruction directly. The instruction asks about the Pixel 8 processor, but the response provides an incorrect processor.\\nSTEP 2: Score based on the criteria and rubrics: the response is irrelevant to the instruction. Thus, the score is 1.', 'confidence': 1.0}\n"
          ]
        }
      ],
      "source": [
        "evaluator_qa = VertexStringEvaluator(\n",
        "        metric=\"question_answering_relevance\", project_id=PROJECT_ID\n",
        "    )\n",
        "result = evaluator_qa.evaluate_strings(\n",
        "   instruction=\"Which processor does Pixel 8 has?\",\n",
        "   prediction=\"Qualcomm Snapdragon 765G \",\n",
        "   contex=\"Google Tensor G3 works with the Titan M2 security chip to protect personal information and make your Pixel more resilient to sophisticated attacks. And now, Face Unlock on Pixel 8 meets the highest Android biometric class, allowing you to access compatible banking and payment apps like Google Wallet.\",\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 24,
      "id": "VjocP0kAAV4K",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2806,
          "status": "ok",
          "timestamp": 1731428713219,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "VjocP0kAAV4K",
        "outputId": "aa5659f6-4de3-4095-b903-de0a88e6e7c5"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'score': 0.0, 'explanation': \"The reference does not contain claims regarding the Pixel 8's processor. Therefore, I am not able to assess reference claim alignment. Since the instruction requests information on the Pixel 8 and the response provides information about Qualcomm Snapdragon 765G processor, there is no information to assess the correctness based on the criteria. Thus, I'm defaulting the score to 0 based on the rubric.\", 'confidence': 1.0}\n"
          ]
        }
      ],
      "source": [
        "evaluator_c = VertexStringEvaluator(\n",
        "        metric=\"question_answering_correctness\", project_id=PROJECT_ID\n",
        "    )\n",
        "result = evaluator_c.evaluate_strings(\n",
        "   instruction=\"Which processor does Pixel 8 has?\",\n",
        "   prediction=\"Qualcomm Snapdragon 765G \",\n",
        "   contex=\"Google Tensor G3 works with the Titan M2 security chip to protect personal information and make your Pixel more resilient to sophisticated attacks. And now, Face Unlock on Pixel 8 meets the highest Android biometric class, allowing you to access compatible banking and payment apps like Google Wallet.\",\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 25,
      "id": "XypJSD_F8G3f",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2879,
          "status": "ok",
          "timestamp": 1731428716097,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "XypJSD_F8G3f",
        "outputId": "553dc4f2-3381-4136-cfbb-597fc331d220"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'score': 1.0, 'explanation': \"STEP 1: Assess relevance: the response does not address the instruction.\\nThe instruction asks about Pixel 8's processor, but the response provides information about Qualcomm Snapdragon 765G, which is not related to Pixel 8. \\nSTEP 2: Score based on the criteria and rubrics.\\nAccording to the rubric, a score of 1 is given when the response is completely irrelevant to the instruction. Thus, the response receives a score of 1.\", 'confidence': 1.0}\n"
          ]
        }
      ],
      "source": [
        "evaluator_qa = VertexStringEvaluator(\n",
        "        metric=\"question_answering_relevance\", project_id=PROJECT_ID\n",
        "    )\n",
        "result = evaluator_qa.evaluate_strings(\n",
        "   instruction=\"Which processor does Pixel 8 has?\",\n",
        "   #prediction=\"Tensor G3\",\n",
        "   prediction=\"Qualcomm Snapdragon 765G\",\n",
        "   contex=\"Google Tensor G3 works with the Titan M2 security chip to protect personal information and make your Pixel more resilient to sophisticated attacks. And now, Face Unlock on Pixel 8 meets the highest Android biometric class, allowing you to access compatible banking and payment apps like Google Wallet.\",\n",
        "   #reference=\"Bundestag is located in Munich\"\n",
        ")\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 26,
      "id": "4D20bdfU_JeC",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 1819,
          "status": "ok",
          "timestamp": 1731428717912,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "4D20bdfU_JeC",
        "outputId": "c5575025-64ff-49d9-f75a-693074cbf0ca"
      },
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.10/dist-packages/langchain_google_vertexai/evaluators/_core.py:60: UserWarning: Ignoring input in VertexPairWiseStringEvaluator, as it is not expected.\n",
            "  warn(self._skip_input_warning)\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'pairwise_choice': 'BASELINE', 'explanation': 'BASELINE response is correct while CANDIDATE response is incorrect. The capital of Great Britain is London.', 'confidence': 1.0}\n"
          ]
        }
      ],
      "source": [
        "from langchain_google_vertexai import VertexPairWiseStringEvaluator\n",
        "\n",
        "evaluator_pw = VertexPairWiseStringEvaluator(\n",
        "        metric=\"pairwise_question_answering_quality\",\n",
        "        project_id=PROJECT_ID\n",
        "    )\n",
        "result = evaluator_pw.evaluate_string_pairs(\n",
        "        prediction=\"London\",\n",
        "        prediction_b=\"Berlin\",\n",
        "        input=\"What is the capital of Great Britain?\",\n",
        "        instruction=\"Be concise\",\n",
        "    )\n",
        "\n",
        "print(result)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "yjlILYa1Ky-a",
      "metadata": {
        "id": "yjlILYa1Ky-a"
      },
      "source": [
        "# Pairwise evaluators"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "60d3e727",
      "metadata": {},
      "source": [
        "With Vertex AI Evaluation service, you can also run pairwise evalouators. Let's look at the example:"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 32,
      "id": "-bfi6I8hK0ul",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 3166,
          "status": "ok",
          "timestamp": 1731428779352,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "-bfi6I8hK0ul",
        "outputId": "2e425507-90ac-4fa2-d20f-088efb6a32e6"
      },
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "WARNING:langchain.evaluation.comparison.eval_chain:This chain was only tested with GPT-4. Performance may be significantly worse with other models.\n"
          ]
        }
      ],
      "source": [
        "from langchain.evaluation import load_evaluator\n",
        "\n",
        "evaluator_ps = load_evaluator(\n",
        "    \"pairwise_string\", llm=llm)\n",
        "\n",
        "result = evaluator_ps.evaluate_string_pairs(\n",
        "    prediction=\"Berlin.\",\n",
        "    prediction_b=\"Berlin is a capital of Germany.\",\n",
        "    input=\"What is the capital of Germany?\",\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 33,
      "id": "RZ4QZIKROVmr",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 2,
          "status": "ok",
          "timestamp": 1731428779353,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "RZ4QZIKROVmr",
        "outputId": "bbb61971-8ae3-424e-ff57-ad68777f00f8"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'reasoning': 'Both assistants provide the correct answer to the user\\'s question, which is that the capital of Germany is Berlin. However, assistant A is more concise and directly answers the question without any extraneous wording. Assistant B\\'s response is grammatically incorrect as it states \"a capital\" instead of \"the capital.\" \\n\\nOverall, assistant A provides a slightly better response due to its conciseness and grammatically correct structure.\\n\\n[[A]] \\n', 'value': 'A', 'score': 1}\n"
          ]
        }
      ],
      "source": [
        "print(result)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "YUb3SnsO_74D",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 1,
          "status": "ok",
          "timestamp": 1731428779671,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "YUb3SnsO_74D",
        "outputId": "f82c788a-41dd-4efb-dcf9-813d91fc0fba"
      },
      "outputs": [],
      "source": [
        "evaluator_ps = load_evaluator(\n",
        "    \"pairwise_string_distance\", llm=llm)\n",
        "\n",
        "result = evaluator_ps.evaluate_string_pairs(\n",
        "    prediction=\"Berlin.\",\n",
        "    prediction_b=\"Berlin is a capital of Germany.\",\n",
        "    input=\"What is the capital of Germany?\",\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 35,
      "id": "LnQK4Zha_-FE",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "executionInfo": {
          "elapsed": 324,
          "status": "ok",
          "timestamp": 1731428782861,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": -60
        },
        "id": "LnQK4Zha_-FE",
        "outputId": "d2b2c353-e21d-472d-9171-75498cdc4935"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "{'score': 0.3164362519201229}"
            ]
          },
          "execution_count": 35,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "result"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "name": "Chapter 14. Evaluation a",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.10.10"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 5
}
