# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# Code generated by the Google Gen AI SDK generator DO NOT EDIT.

from abc import ABC, abstractmethod
import datetime
from enum import Enum, EnumMeta
import inspect
import io
import json
import logging
import sys
import types as builtin_types
import typing
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Union, _UnionGenericAlias  # type: ignore
import pydantic
from pydantic import ConfigDict, Field, PrivateAttr, model_validator
from typing_extensions import Self, TypedDict
from . import _common
from ._operations_converters import (
    _GenerateVideosOperation_from_mldev,
    _GenerateVideosOperation_from_vertex,
    _ImportFileOperation_from_mldev,
    _UploadToFileSearchStoreOperation_from_mldev,
)


if sys.version_info >= (3, 10):
  # Supports both Union[t1, t2] and t1 | t2
  VersionedUnionType = Union[builtin_types.UnionType, _UnionGenericAlias]
  _UNION_TYPES = (typing.Union, builtin_types.UnionType)
else:
  # Supports only Union[t1, t2]
  VersionedUnionType = _UnionGenericAlias
  _UNION_TYPES = (typing.Union,)

_is_pillow_image_imported = False
if typing.TYPE_CHECKING:
  from ._api_client import BaseApiClient
  import PIL.Image

  PIL_Image = PIL.Image.Image
  _is_pillow_image_imported = True
else:
  PIL_Image: typing.Type = Any
  try:
    import PIL.Image

    PIL_Image = PIL.Image.Image
    _is_pillow_image_imported = True
  except ImportError:
    PIL_Image = None

_is_mcp_imported = False
if typing.TYPE_CHECKING:
  from mcp import types as mcp_types
  from mcp import ClientSession as McpClientSession
  from mcp.types import CallToolResult as McpCallToolResult

  _is_mcp_imported = True
else:
  McpClientSession: typing.Type = Any
  McpCallToolResult: typing.Type = Any
  try:
    from mcp import types as mcp_types
    from mcp import ClientSession as McpClientSession
    from mcp.types import CallToolResult as McpCallToolResult

    _is_mcp_imported = True
  except ImportError:
    McpClientSession = None
    McpCallToolResult = None

if typing.TYPE_CHECKING:
  import yaml
else:
  try:
    import yaml
  except ImportError:
    yaml = None

_is_httpx_imported = False
if typing.TYPE_CHECKING:
  import httpx

  HttpxClient = httpx.Client
  HttpxAsyncClient = httpx.AsyncClient
  _is_httpx_imported = True
else:
  HttpxClient: typing.Type = Any
  HttpxAsyncClient: typing.Type = Any

  try:
    import httpx

    HttpxClient = httpx.Client
    HttpxAsyncClient = httpx.AsyncClient
    _is_httpx_imported = True
  except ImportError:
    HttpxClient = None
    HttpxAsyncClient = None

logger = logging.getLogger('google_genai.types')
_from_json_schema_warning_logged = False
_json_schema_warning_logged = False
_response_text_warning_logged = False
_response_text_non_text_warning_logged = False
_response_parts_warning_logged = False
_response_function_calls_warning_logged = False
_response_executable_code_warning_logged = False
_response_code_execution_warning_logged = False
_live_server_text_warning_logged = False
_live_server_data_warning_logged = False


T = typing.TypeVar('T', bound='GenerateContentResponse')

MetricSubclass = typing.TypeVar('MetricSubclass', bound='Metric')


class Outcome(_common.CaseInSensitiveEnum):
  """Outcome of the code execution."""

  OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED'
  """Unspecified status. This value should not be used."""
  OUTCOME_OK = 'OUTCOME_OK'
  """Code execution completed successfully."""
  OUTCOME_FAILED = 'OUTCOME_FAILED'
  """Code execution finished but with a failure. `stderr` should contain the reason."""
  OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED'
  """Code execution ran for too long, and was cancelled. There may or may not be a partial output present."""


class Language(_common.CaseInSensitiveEnum):
  """Programming language of the `code`."""

  LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED'
  """Unspecified language. This value should not be used."""
  PYTHON = 'PYTHON'
  """Python >= 3.10, with numpy and simpy available."""


class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
  """Specifies how the response should be scheduled in the conversation."""

  SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
  """This value is unused."""
  SILENT = 'SILENT'
  """Only add the result to the conversation context, do not interrupt or trigger generation."""
  WHEN_IDLE = 'WHEN_IDLE'
  """Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
  INTERRUPT = 'INTERRUPT'
  """Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""


class Type(_common.CaseInSensitiveEnum):
  """The type of the data."""

  TYPE_UNSPECIFIED = 'TYPE_UNSPECIFIED'
  """Not specified, should not be used."""
  STRING = 'STRING'
  """OpenAPI string type"""
  NUMBER = 'NUMBER'
  """OpenAPI number type"""
  INTEGER = 'INTEGER'
  """OpenAPI integer type"""
  BOOLEAN = 'BOOLEAN'
  """OpenAPI boolean type"""
  ARRAY = 'ARRAY'
  """OpenAPI array type"""
  OBJECT = 'OBJECT'
  """OpenAPI object type"""
  NULL = 'NULL'
  """Null type"""


class Mode(_common.CaseInSensitiveEnum):
  """The mode of the predictor to be used in dynamic retrieval."""

  MODE_UNSPECIFIED = 'MODE_UNSPECIFIED'
  """Always trigger retrieval."""
  MODE_DYNAMIC = 'MODE_DYNAMIC'
  """Run retrieval only when system decides it is necessary."""


class ApiSpec(_common.CaseInSensitiveEnum):
  """The API spec that the external API implements.

  This enum is not supported in Gemini API.
  """

  API_SPEC_UNSPECIFIED = 'API_SPEC_UNSPECIFIED'
  """Unspecified API spec. This value should not be used."""
  SIMPLE_SEARCH = 'SIMPLE_SEARCH'
  """Simple search API spec."""
  ELASTIC_SEARCH = 'ELASTIC_SEARCH'
  """Elastic search API spec."""


class AuthType(_common.CaseInSensitiveEnum):
  """Type of auth scheme. This enum is not supported in Gemini API."""

  AUTH_TYPE_UNSPECIFIED = 'AUTH_TYPE_UNSPECIFIED'
  NO_AUTH = 'NO_AUTH'
  """No Auth."""
  API_KEY_AUTH = 'API_KEY_AUTH'
  """API Key Auth."""
  HTTP_BASIC_AUTH = 'HTTP_BASIC_AUTH'
  """HTTP Basic Auth."""
  GOOGLE_SERVICE_ACCOUNT_AUTH = 'GOOGLE_SERVICE_ACCOUNT_AUTH'
  """Google Service Account Auth."""
  OAUTH = 'OAUTH'
  """OAuth auth."""
  OIDC_AUTH = 'OIDC_AUTH'
  """OpenID Connect (OIDC) Auth."""


class HttpElementLocation(_common.CaseInSensitiveEnum):
  """The location of the API key. This enum is not supported in Gemini API."""

  HTTP_IN_UNSPECIFIED = 'HTTP_IN_UNSPECIFIED'
  HTTP_IN_QUERY = 'HTTP_IN_QUERY'
  """Element is in the HTTP request query."""
  HTTP_IN_HEADER = 'HTTP_IN_HEADER'
  """Element is in the HTTP request header."""
  HTTP_IN_PATH = 'HTTP_IN_PATH'
  """Element is in the HTTP request path."""
  HTTP_IN_BODY = 'HTTP_IN_BODY'
  """Element is in the HTTP request body."""
  HTTP_IN_COOKIE = 'HTTP_IN_COOKIE'
  """Element is in the HTTP request cookie."""


class PhishBlockThreshold(_common.CaseInSensitiveEnum):
  """Sites with confidence level chosen & above this value will be blocked from the search results.

  This enum is not supported in Gemini API.
  """

  PHISH_BLOCK_THRESHOLD_UNSPECIFIED = 'PHISH_BLOCK_THRESHOLD_UNSPECIFIED'
  """Defaults to unspecified."""
  BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE'
  """Blocks Low and above confidence URL that is risky."""
  BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE'
  """Blocks Medium and above confidence URL that is risky."""
  BLOCK_HIGH_AND_ABOVE = 'BLOCK_HIGH_AND_ABOVE'
  """Blocks High and above confidence URL that is risky."""
  BLOCK_HIGHER_AND_ABOVE = 'BLOCK_HIGHER_AND_ABOVE'
  """Blocks Higher and above confidence URL that is risky."""
  BLOCK_VERY_HIGH_AND_ABOVE = 'BLOCK_VERY_HIGH_AND_ABOVE'
  """Blocks Very high and above confidence URL that is risky."""
  BLOCK_ONLY_EXTREMELY_HIGH = 'BLOCK_ONLY_EXTREMELY_HIGH'
  """Blocks Extremely high confidence URL that is risky."""


class ThinkingLevel(_common.CaseInSensitiveEnum):
  """The level of thoughts tokens that the model should generate."""

  THINKING_LEVEL_UNSPECIFIED = 'THINKING_LEVEL_UNSPECIFIED'
  """Default value."""
  LOW = 'LOW'
  """Low thinking level."""
  HIGH = 'HIGH'
  """High thinking level."""


class HarmCategory(_common.CaseInSensitiveEnum):
  """Harm category."""

  HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED'
  """The harm category is unspecified."""
  HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT'
  """The harm category is harassment."""
  HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
  """The harm category is hate speech."""
  HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT'
  """The harm category is sexually explicit content."""
  HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
  """The harm category is dangerous content."""
  HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'
  """Deprecated: Election filter is not longer supported. The harm category is civic integrity."""
  HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE'
  """The harm category is image hate. This enum value is not supported in Gemini API."""
  HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT = (
      'HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT'
  )
  """The harm category is image dangerous content. This enum value is not supported in Gemini API."""
  HARM_CATEGORY_IMAGE_HARASSMENT = 'HARM_CATEGORY_IMAGE_HARASSMENT'
  """The harm category is image harassment. This enum value is not supported in Gemini API."""
  HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT = (
      'HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT'
  )
  """The harm category is image sexually explicit content. This enum value is not supported in Gemini API."""
  HARM_CATEGORY_JAILBREAK = 'HARM_CATEGORY_JAILBREAK'
  """The harm category is for jailbreak prompts. This enum value is not supported in Gemini API."""


class HarmBlockMethod(_common.CaseInSensitiveEnum):
  """Specify if the threshold is used for probability or severity score.

  If not specified, the threshold is used for probability score. This enum is
  not supported in Gemini API.
  """

  HARM_BLOCK_METHOD_UNSPECIFIED = 'HARM_BLOCK_METHOD_UNSPECIFIED'
  """The harm block method is unspecified."""
  SEVERITY = 'SEVERITY'
  """The harm block method uses both probability and severity scores."""
  PROBABILITY = 'PROBABILITY'
  """The harm block method uses the probability score."""


class HarmBlockThreshold(_common.CaseInSensitiveEnum):
  """The harm block threshold."""

  HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED'
  """Unspecified harm block threshold."""
  BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE'
  """Block low threshold and above (i.e. block more)."""
  BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE'
  """Block medium threshold and above."""
  BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH'
  """Block only high threshold (i.e. block less)."""
  BLOCK_NONE = 'BLOCK_NONE'
  """Block none."""
  OFF = 'OFF'
  """Turn off the safety filter."""


class FinishReason(_common.CaseInSensitiveEnum):
  """Output only. The reason why the model stopped generating tokens.

  If empty, the model has not stopped generating the tokens.
  """

  FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED'
  """The finish reason is unspecified."""
  STOP = 'STOP'
  """Token generation reached a natural stopping point or a configured stop sequence."""
  MAX_TOKENS = 'MAX_TOKENS'
  """Token generation reached the configured maximum output tokens."""
  SAFETY = 'SAFETY'
  """Token generation stopped because the content potentially contains safety violations. NOTE: When streaming, [content][] is empty if content filters blocks the output."""
  RECITATION = 'RECITATION'
  """The token generation stopped because of potential recitation."""
  LANGUAGE = 'LANGUAGE'
  """The token generation stopped because of using an unsupported language."""
  OTHER = 'OTHER'
  """All other reasons that stopped the token generation."""
  BLOCKLIST = 'BLOCKLIST'
  """Token generation stopped because the content contains forbidden terms."""
  PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
  """Token generation stopped for potentially containing prohibited content."""
  SPII = 'SPII'
  """Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII)."""
  MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL'
  """The function call generated by the model is invalid."""
  IMAGE_SAFETY = 'IMAGE_SAFETY'
  """Token generation stopped because generated images have safety violations."""
  UNEXPECTED_TOOL_CALL = 'UNEXPECTED_TOOL_CALL'
  """The tool call generated by the model is invalid."""
  IMAGE_PROHIBITED_CONTENT = 'IMAGE_PROHIBITED_CONTENT'
  """Image generation stopped because the generated images have prohibited content."""
  NO_IMAGE = 'NO_IMAGE'
  """The model was expected to generate an image, but none was generated."""


class HarmProbability(_common.CaseInSensitiveEnum):
  """Output only. Harm probability levels in the content."""

  HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED'
  """Harm probability unspecified."""
  NEGLIGIBLE = 'NEGLIGIBLE'
  """Negligible level of harm."""
  LOW = 'LOW'
  """Low level of harm."""
  MEDIUM = 'MEDIUM'
  """Medium level of harm."""
  HIGH = 'HIGH'
  """High level of harm."""


class HarmSeverity(_common.CaseInSensitiveEnum):
  """Output only.

  Harm severity levels in the content. This enum is not supported in Gemini API.
  """

  HARM_SEVERITY_UNSPECIFIED = 'HARM_SEVERITY_UNSPECIFIED'
  """Harm severity unspecified."""
  HARM_SEVERITY_NEGLIGIBLE = 'HARM_SEVERITY_NEGLIGIBLE'
  """Negligible level of harm severity."""
  HARM_SEVERITY_LOW = 'HARM_SEVERITY_LOW'
  """Low level of harm severity."""
  HARM_SEVERITY_MEDIUM = 'HARM_SEVERITY_MEDIUM'
  """Medium level of harm severity."""
  HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH'
  """High level of harm severity."""


class UrlRetrievalStatus(_common.CaseInSensitiveEnum):
  """Status of the url retrieval."""

  URL_RETRIEVAL_STATUS_UNSPECIFIED = 'URL_RETRIEVAL_STATUS_UNSPECIFIED'
  """Default value. This value is unused."""
  URL_RETRIEVAL_STATUS_SUCCESS = 'URL_RETRIEVAL_STATUS_SUCCESS'
  """Url retrieval is successful."""
  URL_RETRIEVAL_STATUS_ERROR = 'URL_RETRIEVAL_STATUS_ERROR'
  """Url retrieval is failed due to error."""
  URL_RETRIEVAL_STATUS_PAYWALL = 'URL_RETRIEVAL_STATUS_PAYWALL'
  """Url retrieval is failed because the content is behind paywall. This enum value is not supported in Vertex AI."""
  URL_RETRIEVAL_STATUS_UNSAFE = 'URL_RETRIEVAL_STATUS_UNSAFE'
  """Url retrieval is failed because the content is unsafe. This enum value is not supported in Vertex AI."""


class BlockedReason(_common.CaseInSensitiveEnum):
  """Output only. The reason why the prompt was blocked."""

  BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED'
  """The blocked reason is unspecified."""
  SAFETY = 'SAFETY'
  """The prompt was blocked for safety reasons."""
  OTHER = 'OTHER'
  """The prompt was blocked for other reasons. For example, it may be due to the prompt's language, or because it contains other harmful content."""
  BLOCKLIST = 'BLOCKLIST'
  """The prompt was blocked because it contains a term from the terminology blocklist."""
  PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
  """The prompt was blocked because it contains prohibited content."""
  IMAGE_SAFETY = 'IMAGE_SAFETY'
  """The prompt was blocked because it contains content that is unsafe for image generation."""
  MODEL_ARMOR = 'MODEL_ARMOR'
  """The prompt was blocked by Model Armor. This enum value is not supported in Gemini API."""
  JAILBREAK = 'JAILBREAK'
  """The prompt was blocked as a jailbreak attempt. This enum value is not supported in Gemini API."""


class TrafficType(_common.CaseInSensitiveEnum):
  """Output only.

  The traffic type for this request. This enum is not supported in Gemini API.
  """

  TRAFFIC_TYPE_UNSPECIFIED = 'TRAFFIC_TYPE_UNSPECIFIED'
  """Unspecified request traffic type."""
  ON_DEMAND = 'ON_DEMAND'
  """The request was processed using Pay-As-You-Go quota."""
  PROVISIONED_THROUGHPUT = 'PROVISIONED_THROUGHPUT'
  """Type for Provisioned Throughput traffic."""


class Modality(_common.CaseInSensitiveEnum):
  """Server content modalities."""

  MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED'
  """The modality is unspecified."""
  TEXT = 'TEXT'
  """Indicates the model should return text"""
  IMAGE = 'IMAGE'
  """Indicates the model should return images."""
  AUDIO = 'AUDIO'
  """Indicates the model should return audio."""


class MediaResolution(_common.CaseInSensitiveEnum):
  """The media resolution to use."""

  MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED'
  """Media resolution has not been set"""
  MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW'
  """Media resolution set to low (64 tokens)."""
  MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM'
  """Media resolution set to medium (256 tokens)."""
  MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH'
  """Media resolution set to high (zoomed reframing with 256 tokens)."""


class TuningMode(_common.CaseInSensitiveEnum):
  """Tuning mode. This enum is not supported in Gemini API."""

  TUNING_MODE_UNSPECIFIED = 'TUNING_MODE_UNSPECIFIED'
  """Tuning mode is unspecified."""
  TUNING_MODE_FULL = 'TUNING_MODE_FULL'
  """Full fine-tuning mode."""
  TUNING_MODE_PEFT_ADAPTER = 'TUNING_MODE_PEFT_ADAPTER'
  """PEFT adapter tuning mode."""


class AdapterSize(_common.CaseInSensitiveEnum):
  """Adapter size for tuning. This enum is not supported in Gemini API."""

  ADAPTER_SIZE_UNSPECIFIED = 'ADAPTER_SIZE_UNSPECIFIED'
  """Adapter size is unspecified."""
  ADAPTER_SIZE_ONE = 'ADAPTER_SIZE_ONE'
  """Adapter size 1."""
  ADAPTER_SIZE_TWO = 'ADAPTER_SIZE_TWO'
  """Adapter size 2."""
  ADAPTER_SIZE_FOUR = 'ADAPTER_SIZE_FOUR'
  """Adapter size 4."""
  ADAPTER_SIZE_EIGHT = 'ADAPTER_SIZE_EIGHT'
  """Adapter size 8."""
  ADAPTER_SIZE_SIXTEEN = 'ADAPTER_SIZE_SIXTEEN'
  """Adapter size 16."""
  ADAPTER_SIZE_THIRTY_TWO = 'ADAPTER_SIZE_THIRTY_TWO'
  """Adapter size 32."""


class JobState(_common.CaseInSensitiveEnum):
  """Job state."""

  JOB_STATE_UNSPECIFIED = 'JOB_STATE_UNSPECIFIED'
  """The job state is unspecified."""
  JOB_STATE_QUEUED = 'JOB_STATE_QUEUED'
  """The job has been just created or resumed and processing has not yet begun."""
  JOB_STATE_PENDING = 'JOB_STATE_PENDING'
  """The service is preparing to run the job."""
  JOB_STATE_RUNNING = 'JOB_STATE_RUNNING'
  """The job is in progress."""
  JOB_STATE_SUCCEEDED = 'JOB_STATE_SUCCEEDED'
  """The job completed successfully."""
  JOB_STATE_FAILED = 'JOB_STATE_FAILED'
  """The job failed."""
  JOB_STATE_CANCELLING = 'JOB_STATE_CANCELLING'
  """The job is being cancelled. From this state the job may only go to either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`."""
  JOB_STATE_CANCELLED = 'JOB_STATE_CANCELLED'
  """The job has been cancelled."""
  JOB_STATE_PAUSED = 'JOB_STATE_PAUSED'
  """The job has been stopped, and can be resumed."""
  JOB_STATE_EXPIRED = 'JOB_STATE_EXPIRED'
  """The job has expired."""
  JOB_STATE_UPDATING = 'JOB_STATE_UPDATING'
  """The job is being updated. Only jobs in the `JOB_STATE_RUNNING` state can be updated. After updating, the job goes back to the `JOB_STATE_RUNNING` state."""
  JOB_STATE_PARTIALLY_SUCCEEDED = 'JOB_STATE_PARTIALLY_SUCCEEDED'
  """The job is partially succeeded, some results may be missing due to errors."""


class TuningTask(_common.CaseInSensitiveEnum):
  """The tuning task.

  Either I2V or T2V. This enum is not supported in Gemini API.
  """

  TUNING_TASK_UNSPECIFIED = 'TUNING_TASK_UNSPECIFIED'
  """Default value. This value is unused."""
  TUNING_TASK_I2V = 'TUNING_TASK_I2V'
  """Tuning task for image to video."""
  TUNING_TASK_T2V = 'TUNING_TASK_T2V'
  """Tuning task for text to video."""
  TUNING_TASK_R2V = 'TUNING_TASK_R2V'
  """Tuning task for reference to video."""


class PartMediaResolutionLevel(_common.CaseInSensitiveEnum):
  """The tokenization quality used for given media."""

  MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED'
  """Media resolution has not been set."""
  MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW'
  """Media resolution set to low."""
  MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM'
  """Media resolution set to medium."""
  MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH'
  """Media resolution set to high."""


class ResourceScope(_common.CaseInSensitiveEnum):
  """Resource scope."""

  COLLECTION = 'COLLECTION'
  """When setting base_url, this value configures resource scope to be the collection.
      The resource name will not include api version, project, or location.
      For example, if base_url is set to "https://aiplatform.googleapis.com",
      then the resource name for a Model would be
      "https://aiplatform.googleapis.com/publishers/google/models/gemini-3-pro-preview"""


class JSONSchemaType(Enum):
  """The type of the data supported by JSON Schema.

  The values of the enums are lower case strings, while the values of the enums
  for the Type class are upper case strings.
  """

  NULL = 'null'
  BOOLEAN = 'boolean'
  OBJECT = 'object'
  ARRAY = 'array'
  NUMBER = 'number'
  INTEGER = 'integer'
  STRING = 'string'


class FeatureSelectionPreference(_common.CaseInSensitiveEnum):
  """Options for feature selection preference."""

  FEATURE_SELECTION_PREFERENCE_UNSPECIFIED = (
      'FEATURE_SELECTION_PREFERENCE_UNSPECIFIED'
  )
  PRIORITIZE_QUALITY = 'PRIORITIZE_QUALITY'
  BALANCED = 'BALANCED'
  PRIORITIZE_COST = 'PRIORITIZE_COST'


class Behavior(_common.CaseInSensitiveEnum):
  """Defines the function behavior. Defaults to `BLOCKING`."""

  UNSPECIFIED = 'UNSPECIFIED'
  """This value is unused."""
  BLOCKING = 'BLOCKING'
  """If set, the system will wait to receive the function response before continuing the conversation."""
  NON_BLOCKING = 'NON_BLOCKING'
  """If set, the system will not wait to receive the function response. Instead, it will attempt to handle function responses as they become available while maintaining the conversation between the user and the model."""


class DynamicRetrievalConfigMode(_common.CaseInSensitiveEnum):
  """Config for the dynamic retrieval config mode."""

  MODE_UNSPECIFIED = 'MODE_UNSPECIFIED'
  """Always trigger retrieval."""
  MODE_DYNAMIC = 'MODE_DYNAMIC'
  """Run retrieval only when system decides it is necessary."""


class Environment(_common.CaseInSensitiveEnum):
  """The environment being operated."""

  ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED'
  """Defaults to browser."""
  ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER'
  """Operates in a web browser."""


class FunctionCallingConfigMode(_common.CaseInSensitiveEnum):
  """Config for the function calling config mode."""

  MODE_UNSPECIFIED = 'MODE_UNSPECIFIED'
  """The function calling config mode is unspecified. Should not be used."""
  AUTO = 'AUTO'
  """Default model behavior, model decides to predict either function calls or natural language response."""
  ANY = 'ANY'
  """Model is constrained to always predicting function calls only. If "allowed_function_names" are set, the predicted function calls will be limited to any one of "allowed_function_names", else the predicted function calls will be any one of the provided "function_declarations"."""
  NONE = 'NONE'
  """Model will not predict any function calls. Model behavior is same as when not passing any function declarations."""
  VALIDATED = 'VALIDATED'
  """Model decides to predict either a function call or a natural language response, but will validate function calls with constrained decoding. If "allowed_function_names" are set, the predicted function call will be limited to any one of "allowed_function_names", else the predicted function call will be any one of the provided "function_declarations"."""


class SafetyFilterLevel(_common.CaseInSensitiveEnum):
  """Enum that controls the safety filter level for objectionable content."""

  BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE'
  BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE'
  BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH'
  BLOCK_NONE = 'BLOCK_NONE'


class PersonGeneration(_common.CaseInSensitiveEnum):
  """Enum that controls the generation of people."""

  DONT_ALLOW = 'DONT_ALLOW'
  """Block generation of images of people."""
  ALLOW_ADULT = 'ALLOW_ADULT'
  """Generate images of adults, but not children."""
  ALLOW_ALL = 'ALLOW_ALL'
  """Generate images that include adults and children."""


class ImagePromptLanguage(_common.CaseInSensitiveEnum):
  """Enum that specifies the language of the text in the prompt."""

  auto = 'auto'
  """Auto-detect the language."""
  en = 'en'
  """English"""
  ja = 'ja'
  """Japanese"""
  ko = 'ko'
  """Korean"""
  hi = 'hi'
  """Hindi"""
  zh = 'zh'
  """Chinese"""
  pt = 'pt'
  """Portuguese"""
  es = 'es'
  """Spanish"""


class MaskReferenceMode(_common.CaseInSensitiveEnum):
  """Enum representing the mask mode of a mask reference image."""

  MASK_MODE_DEFAULT = 'MASK_MODE_DEFAULT'
  MASK_MODE_USER_PROVIDED = 'MASK_MODE_USER_PROVIDED'
  MASK_MODE_BACKGROUND = 'MASK_MODE_BACKGROUND'
  MASK_MODE_FOREGROUND = 'MASK_MODE_FOREGROUND'
  MASK_MODE_SEMANTIC = 'MASK_MODE_SEMANTIC'


class ControlReferenceType(_common.CaseInSensitiveEnum):
  """Enum representing the control type of a control reference image."""

  CONTROL_TYPE_DEFAULT = 'CONTROL_TYPE_DEFAULT'
  CONTROL_TYPE_CANNY = 'CONTROL_TYPE_CANNY'
  CONTROL_TYPE_SCRIBBLE = 'CONTROL_TYPE_SCRIBBLE'
  CONTROL_TYPE_FACE_MESH = 'CONTROL_TYPE_FACE_MESH'


class SubjectReferenceType(_common.CaseInSensitiveEnum):
  """Enum representing the subject type of a subject reference image."""

  SUBJECT_TYPE_DEFAULT = 'SUBJECT_TYPE_DEFAULT'
  SUBJECT_TYPE_PERSON = 'SUBJECT_TYPE_PERSON'
  SUBJECT_TYPE_ANIMAL = 'SUBJECT_TYPE_ANIMAL'
  SUBJECT_TYPE_PRODUCT = 'SUBJECT_TYPE_PRODUCT'


class EditMode(_common.CaseInSensitiveEnum):
  """Enum representing the editing mode."""

  EDIT_MODE_DEFAULT = 'EDIT_MODE_DEFAULT'
  EDIT_MODE_INPAINT_REMOVAL = 'EDIT_MODE_INPAINT_REMOVAL'
  EDIT_MODE_INPAINT_INSERTION = 'EDIT_MODE_INPAINT_INSERTION'
  EDIT_MODE_OUTPAINT = 'EDIT_MODE_OUTPAINT'
  EDIT_MODE_CONTROLLED_EDITING = 'EDIT_MODE_CONTROLLED_EDITING'
  EDIT_MODE_STYLE = 'EDIT_MODE_STYLE'
  EDIT_MODE_BGSWAP = 'EDIT_MODE_BGSWAP'
  EDIT_MODE_PRODUCT_IMAGE = 'EDIT_MODE_PRODUCT_IMAGE'


class SegmentMode(_common.CaseInSensitiveEnum):
  """Enum that represents the segmentation mode."""

  FOREGROUND = 'FOREGROUND'
  BACKGROUND = 'BACKGROUND'
  PROMPT = 'PROMPT'
  SEMANTIC = 'SEMANTIC'
  INTERACTIVE = 'INTERACTIVE'


class VideoGenerationReferenceType(_common.CaseInSensitiveEnum):
  """Enum for the reference type of a video generation reference image."""

  ASSET = 'ASSET'
  """A reference image that provides assets to the generated video,
      such as the scene, an object, a character, etc."""
  STYLE = 'STYLE'
  """A reference image that provides aesthetics including colors,
      lighting, texture, etc., to be used as the style of the generated video,
      such as 'anime', 'photography', 'origami', etc."""


class VideoGenerationMaskMode(_common.CaseInSensitiveEnum):
  """Enum for the mask mode of a video generation mask."""

  INSERT = 'INSERT'
  """The image mask contains a masked rectangular region which is
      applied on the first frame of the input video. The object described in
      the prompt is inserted into this region and will appear in subsequent
      frames."""
  REMOVE = 'REMOVE'
  """The image mask is used to determine an object in the
      first video frame to track. This object is removed from the video."""
  REMOVE_STATIC = 'REMOVE_STATIC'
  """The image mask is used to determine a region in the
      video. Objects in this region will be removed."""
  OUTPAINT = 'OUTPAINT'
  """The image mask contains a masked rectangular region where
      the input video will go. The remaining area will be generated. Video
      masks are not supported."""


class VideoCompressionQuality(_common.CaseInSensitiveEnum):
  """Enum that controls the compression quality of the generated videos."""

  OPTIMIZED = 'OPTIMIZED'
  """Optimized video compression quality. This will produce videos
      with a compressed, smaller file size."""
  LOSSLESS = 'LOSSLESS'
  """Lossless video compression quality. This will produce videos
      with a larger file size."""


class TuningMethod(_common.CaseInSensitiveEnum):
  """Enum representing the tuning method."""

  SUPERVISED_FINE_TUNING = 'SUPERVISED_FINE_TUNING'
  """Supervised fine tuning."""
  PREFERENCE_TUNING = 'PREFERENCE_TUNING'
  """Preference optimization tuning."""


class DocumentState(_common.CaseInSensitiveEnum):
  """State for the lifecycle of a Document."""

  STATE_UNSPECIFIED = 'STATE_UNSPECIFIED'
  STATE_PENDING = 'STATE_PENDING'
  STATE_ACTIVE = 'STATE_ACTIVE'
  STATE_FAILED = 'STATE_FAILED'


class FileState(_common.CaseInSensitiveEnum):
  """State for the lifecycle of a File."""

  STATE_UNSPECIFIED = 'STATE_UNSPECIFIED'
  PROCESSING = 'PROCESSING'
  ACTIVE = 'ACTIVE'
  FAILED = 'FAILED'


class FileSource(_common.CaseInSensitiveEnum):
  """Source of the File."""

  SOURCE_UNSPECIFIED = 'SOURCE_UNSPECIFIED'
  UPLOADED = 'UPLOADED'
  GENERATED = 'GENERATED'


class TurnCompleteReason(_common.CaseInSensitiveEnum):
  """The reason why the turn is complete."""

  TURN_COMPLETE_REASON_UNSPECIFIED = 'TURN_COMPLETE_REASON_UNSPECIFIED'
  """Default value. Reason is unspecified."""
  MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL'
  """The function call generated by the model is invalid."""
  RESPONSE_REJECTED = 'RESPONSE_REJECTED'
  """The response is rejected by the model."""
  NEED_MORE_INPUT = 'NEED_MORE_INPUT'
  """Needs more input from the user."""


class MediaModality(_common.CaseInSensitiveEnum):
  """Server content modalities."""

  MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED'
  """The modality is unspecified."""
  TEXT = 'TEXT'
  """Plain text."""
  IMAGE = 'IMAGE'
  """Images."""
  VIDEO = 'VIDEO'
  """Video."""
  AUDIO = 'AUDIO'
  """Audio."""
  DOCUMENT = 'DOCUMENT'
  """Document, e.g. PDF."""


class StartSensitivity(_common.CaseInSensitiveEnum):
  """Start of speech sensitivity."""

  START_SENSITIVITY_UNSPECIFIED = 'START_SENSITIVITY_UNSPECIFIED'
  """The default is START_SENSITIVITY_LOW."""
  START_SENSITIVITY_HIGH = 'START_SENSITIVITY_HIGH'
  """Automatic detection will detect the start of speech more often."""
  START_SENSITIVITY_LOW = 'START_SENSITIVITY_LOW'
  """Automatic detection will detect the start of speech less often."""


class EndSensitivity(_common.CaseInSensitiveEnum):
  """End of speech sensitivity."""

  END_SENSITIVITY_UNSPECIFIED = 'END_SENSITIVITY_UNSPECIFIED'
  """The default is END_SENSITIVITY_LOW."""
  END_SENSITIVITY_HIGH = 'END_SENSITIVITY_HIGH'
  """Automatic detection ends speech more often."""
  END_SENSITIVITY_LOW = 'END_SENSITIVITY_LOW'
  """Automatic detection ends speech less often."""


class ActivityHandling(_common.CaseInSensitiveEnum):
  """The different ways of handling user activity."""

  ACTIVITY_HANDLING_UNSPECIFIED = 'ACTIVITY_HANDLING_UNSPECIFIED'
  """If unspecified, the default behavior is `START_OF_ACTIVITY_INTERRUPTS`."""
  START_OF_ACTIVITY_INTERRUPTS = 'START_OF_ACTIVITY_INTERRUPTS'
  """If true, start of activity will interrupt the model's response (also called "barge in"). The model's current response will be cut-off in the moment of the interruption. This is the default behavior."""
  NO_INTERRUPTION = 'NO_INTERRUPTION'
  """The model's response will not be interrupted."""


class TurnCoverage(_common.CaseInSensitiveEnum):
  """Options about which input is included in the user's turn."""

  TURN_COVERAGE_UNSPECIFIED = 'TURN_COVERAGE_UNSPECIFIED'
  """If unspecified, the default behavior is `TURN_INCLUDES_ONLY_ACTIVITY`."""
  TURN_INCLUDES_ONLY_ACTIVITY = 'TURN_INCLUDES_ONLY_ACTIVITY'
  """The users turn only includes activity since the last turn, excluding inactivity (e.g. silence on the audio stream). This is the default behavior."""
  TURN_INCLUDES_ALL_INPUT = 'TURN_INCLUDES_ALL_INPUT'
  """The users turn includes all realtime input since the last turn, including inactivity (e.g. silence on the audio stream)."""


class Scale(_common.CaseInSensitiveEnum):
  """Scale of the generated music."""

  SCALE_UNSPECIFIED = 'SCALE_UNSPECIFIED'
  """Default value. This value is unused."""
  C_MAJOR_A_MINOR = 'C_MAJOR_A_MINOR'
  """C major or A minor."""
  D_FLAT_MAJOR_B_FLAT_MINOR = 'D_FLAT_MAJOR_B_FLAT_MINOR'
  """Db major or Bb minor."""
  D_MAJOR_B_MINOR = 'D_MAJOR_B_MINOR'
  """D major or B minor."""
  E_FLAT_MAJOR_C_MINOR = 'E_FLAT_MAJOR_C_MINOR'
  """Eb major or C minor"""
  E_MAJOR_D_FLAT_MINOR = 'E_MAJOR_D_FLAT_MINOR'
  """E major or Db minor."""
  F_MAJOR_D_MINOR = 'F_MAJOR_D_MINOR'
  """F major or D minor."""
  G_FLAT_MAJOR_E_FLAT_MINOR = 'G_FLAT_MAJOR_E_FLAT_MINOR'
  """Gb major or Eb minor."""
  G_MAJOR_E_MINOR = 'G_MAJOR_E_MINOR'
  """G major or E minor."""
  A_FLAT_MAJOR_F_MINOR = 'A_FLAT_MAJOR_F_MINOR'
  """Ab major or F minor."""
  A_MAJOR_G_FLAT_MINOR = 'A_MAJOR_G_FLAT_MINOR'
  """A major or Gb minor."""
  B_FLAT_MAJOR_G_MINOR = 'B_FLAT_MAJOR_G_MINOR'
  """Bb major or G minor."""
  B_MAJOR_A_FLAT_MINOR = 'B_MAJOR_A_FLAT_MINOR'
  """B major or Ab minor."""


class MusicGenerationMode(_common.CaseInSensitiveEnum):
  """The mode of music generation."""

  MUSIC_GENERATION_MODE_UNSPECIFIED = 'MUSIC_GENERATION_MODE_UNSPECIFIED'
  """Rely on the server default generation mode."""
  QUALITY = 'QUALITY'
  """Steer text prompts to regions of latent space with higher quality
      music."""
  DIVERSITY = 'DIVERSITY'
  """Steer text prompts to regions of latent space with a larger
      diversity of music."""
  VOCALIZATION = 'VOCALIZATION'
  """Steer text prompts to regions of latent space more likely to
      generate music with vocals."""


class LiveMusicPlaybackControl(_common.CaseInSensitiveEnum):
  """The playback control signal to apply to the music generation."""

  PLAYBACK_CONTROL_UNSPECIFIED = 'PLAYBACK_CONTROL_UNSPECIFIED'
  """This value is unused."""
  PLAY = 'PLAY'
  """Start generating the music."""
  PAUSE = 'PAUSE'
  """Hold the music generation. Use PLAY to resume from the current position."""
  STOP = 'STOP'
  """Stop the music generation and reset the context (prompts retained).
      Use PLAY to restart the music generation."""
  RESET_CONTEXT = 'RESET_CONTEXT'
  """Reset the context of the music generation without stopping it.
      Retains the current prompts and config."""


class PartMediaResolution(_common.BaseModel):
  """Media resolution for the input media."""

  level: Optional[PartMediaResolutionLevel] = Field(
      default=None,
      description="""The tokenization quality used for given media.
    """,
  )
  num_tokens: Optional[int] = Field(
      default=None,
      description="""Specifies the required sequence length for media tokenization.
    """,
  )


class PartMediaResolutionDict(TypedDict, total=False):
  """Media resolution for the input media."""

  level: Optional[PartMediaResolutionLevel]
  """The tokenization quality used for given media.
    """

  num_tokens: Optional[int]
  """Specifies the required sequence length for media tokenization.
    """


PartMediaResolutionOrDict = Union[PartMediaResolution, PartMediaResolutionDict]


class CodeExecutionResult(_common.BaseModel):
  """Result of executing the [ExecutableCode].

  Only generated when using the [CodeExecution] tool, and always follows a
  `part` containing the [ExecutableCode].
  """

  outcome: Optional[Outcome] = Field(
      default=None, description="""Required. Outcome of the code execution."""
  )
  output: Optional[str] = Field(
      default=None,
      description="""Optional. Contains stdout when code execution is successful, stderr or other description otherwise.""",
  )


class CodeExecutionResultDict(TypedDict, total=False):
  """Result of executing the [ExecutableCode].

  Only generated when using the [CodeExecution] tool, and always follows a
  `part` containing the [ExecutableCode].
  """

  outcome: Optional[Outcome]
  """Required. Outcome of the code execution."""

  output: Optional[str]
  """Optional. Contains stdout when code execution is successful, stderr or other description otherwise."""


CodeExecutionResultOrDict = Union[CodeExecutionResult, CodeExecutionResultDict]


class ExecutableCode(_common.BaseModel):
  """Code generated by the model that is meant to be executed, and the result returned to the model.

  Generated when using the [CodeExecution] tool, in which the code will be
  automatically executed, and a corresponding [CodeExecutionResult] will also be
  generated.
  """

  code: Optional[str] = Field(
      default=None, description="""Required. The code to be executed."""
  )
  language: Optional[Language] = Field(
      default=None,
      description="""Required. Programming language of the `code`.""",
  )


class ExecutableCodeDict(TypedDict, total=False):
  """Code generated by the model that is meant to be executed, and the result returned to the model.

  Generated when using the [CodeExecution] tool, in which the code will be
  automatically executed, and a corresponding [CodeExecutionResult] will also be
  generated.
  """

  code: Optional[str]
  """Required. The code to be executed."""

  language: Optional[Language]
  """Required. Programming language of the `code`."""


ExecutableCodeOrDict = Union[ExecutableCode, ExecutableCodeDict]


class FileData(_common.BaseModel):
  """URI based data."""

  display_name: Optional[str] = Field(
      default=None,
      description="""Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""",
  )
  file_uri: Optional[str] = Field(
      default=None, description="""Required. URI."""
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""Required. The IANA standard MIME type of the source data.""",
  )


class FileDataDict(TypedDict, total=False):
  """URI based data."""

  display_name: Optional[str]
  """Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API."""

  file_uri: Optional[str]
  """Required. URI."""

  mime_type: Optional[str]
  """Required. The IANA standard MIME type of the source data."""


FileDataOrDict = Union[FileData, FileDataDict]


class PartialArg(_common.BaseModel):
  """Partial argument value of the function call.

  This data type is not supported in Gemini API.
  """

  null_value: Optional[Literal['NULL_VALUE']] = Field(
      default=None, description="""Optional. Represents a null value."""
  )
  number_value: Optional[float] = Field(
      default=None, description="""Optional. Represents a double value."""
  )
  string_value: Optional[str] = Field(
      default=None, description="""Optional. Represents a string value."""
  )
  bool_value: Optional[bool] = Field(
      default=None, description="""Optional. Represents a boolean value."""
  )
  json_path: Optional[str] = Field(
      default=None,
      description="""Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""",
  )
  will_continue: Optional[bool] = Field(
      default=None,
      description="""Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.""",
  )


class PartialArgDict(TypedDict, total=False):
  """Partial argument value of the function call.

  This data type is not supported in Gemini API.
  """

  null_value: Optional[Literal['NULL_VALUE']]
  """Optional. Represents a null value."""

  number_value: Optional[float]
  """Optional. Represents a double value."""

  string_value: Optional[str]
  """Optional. Represents a string value."""

  bool_value: Optional[bool]
  """Optional. Represents a boolean value."""

  json_path: Optional[str]
  """Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data"."""

  will_continue: Optional[bool]
  """Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow."""


PartialArgOrDict = Union[PartialArg, PartialArgDict]


class FunctionCall(_common.BaseModel):
  """A function call."""

  id: Optional[str] = Field(
      default=None,
      description="""The unique id of the function call. If populated, the client to execute the
   `function_call` and return the response with the matching `id`.""",
  )
  args: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.""",
  )
  name: Optional[str] = Field(
      default=None,
      description="""Optional. The name of the function to call. Matches [FunctionDeclaration.name].""",
  )
  partial_args: Optional[list[PartialArg]] = Field(
      default=None,
      description="""Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally. This field is not supported in Gemini API.""",
  )
  will_continue: Optional[bool] = Field(
      default=None,
      description="""Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow. This field is not supported in Gemini API.""",
  )


class FunctionCallDict(TypedDict, total=False):
  """A function call."""

  id: Optional[str]
  """The unique id of the function call. If populated, the client to execute the
   `function_call` and return the response with the matching `id`."""

  args: Optional[dict[str, Any]]
  """Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details."""

  name: Optional[str]
  """Optional. The name of the function to call. Matches [FunctionDeclaration.name]."""

  partial_args: Optional[list[PartialArgDict]]
  """Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally. This field is not supported in Gemini API."""

  will_continue: Optional[bool]
  """Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow. This field is not supported in Gemini API."""


FunctionCallOrDict = Union[FunctionCall, FunctionCallDict]


class FunctionResponseBlob(_common.BaseModel):
  """Raw media bytes for function response.

  Text should not be sent as raw bytes, use the FunctionResponse.response
  field.
  """

  mime_type: Optional[str] = Field(
      default=None,
      description="""Required. The IANA standard MIME type of the source data.""",
  )
  data: Optional[bytes] = Field(
      default=None, description="""Required. Inline media bytes."""
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""Optional. Display name of the blob.
      Used to provide a label or filename to distinguish blobs.""",
  )


class FunctionResponseBlobDict(TypedDict, total=False):
  """Raw media bytes for function response.

  Text should not be sent as raw bytes, use the FunctionResponse.response
  field.
  """

  mime_type: Optional[str]
  """Required. The IANA standard MIME type of the source data."""

  data: Optional[bytes]
  """Required. Inline media bytes."""

  display_name: Optional[str]
  """Optional. Display name of the blob.
      Used to provide a label or filename to distinguish blobs."""


FunctionResponseBlobOrDict = Union[
    FunctionResponseBlob, FunctionResponseBlobDict
]


class FunctionResponseFileData(_common.BaseModel):
  """URI based data for function response."""

  file_uri: Optional[str] = Field(
      default=None, description="""Required. URI."""
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""Required. The IANA standard MIME type of the source data.""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""Optional. Display name of the file.
      Used to provide a label or filename to distinguish files.""",
  )


class FunctionResponseFileDataDict(TypedDict, total=False):
  """URI based data for function response."""

  file_uri: Optional[str]
  """Required. URI."""

  mime_type: Optional[str]
  """Required. The IANA standard MIME type of the source data."""

  display_name: Optional[str]
  """Optional. Display name of the file.
      Used to provide a label or filename to distinguish files."""


FunctionResponseFileDataOrDict = Union[
    FunctionResponseFileData, FunctionResponseFileDataDict
]


class FunctionResponsePart(_common.BaseModel):
  """A datatype containing media that is part of a `FunctionResponse` message.

  A `FunctionResponsePart` consists of data which has an associated datatype. A
  `FunctionResponsePart` can only contain one of the accepted types in
  `FunctionResponsePart.data`.

  A `FunctionResponsePart` must have a fixed IANA MIME type identifying the
  type and subtype of the media if the `inline_data` field is filled with raw
  bytes.
  """

  inline_data: Optional[FunctionResponseBlob] = Field(
      default=None, description="""Optional. Inline media bytes."""
  )
  file_data: Optional[FunctionResponseFileData] = Field(
      default=None, description="""Optional. URI based data."""
  )

  @classmethod
  def from_bytes(cls, *, data: bytes, mime_type: str) -> 'FunctionResponsePart':
    """Creates a FunctionResponsePart from bytes and mime type.

    Args:
      data (bytes): The bytes of the data
      mime_type (str): mime_type: The MIME type of the data.
    """
    inline_data = FunctionResponseBlob(
        data=data,
        mime_type=mime_type,
    )
    return cls(inline_data=inline_data)

  @classmethod
  def from_uri(
      cls, *, file_uri: str, mime_type: Optional[str] = None
  ) -> 'FunctionResponsePart':
    """Creates a FunctionResponsePart from a file uri.

    Args:
      file_uri (str): The uri of the file
      mime_type (str): mime_type: The MIME type of the file. If not provided,
        the MIME type will be automatically determined.
    """
    if mime_type is None:
      import mimetypes

      mime_type, _ = mimetypes.guess_type(file_uri)
      if not mime_type:
        raise ValueError(f'Failed to determine mime type for file: {file_uri}')
    file_data = FunctionResponseFileData(file_uri=file_uri, mime_type=mime_type)
    return cls(file_data=file_data)


class FunctionResponsePartDict(TypedDict, total=False):
  """A datatype containing media that is part of a `FunctionResponse` message.

  A `FunctionResponsePart` consists of data which has an associated datatype. A
  `FunctionResponsePart` can only contain one of the accepted types in
  `FunctionResponsePart.data`.

  A `FunctionResponsePart` must have a fixed IANA MIME type identifying the
  type and subtype of the media if the `inline_data` field is filled with raw
  bytes.
  """

  inline_data: Optional[FunctionResponseBlobDict]
  """Optional. Inline media bytes."""

  file_data: Optional[FunctionResponseFileDataDict]
  """Optional. URI based data."""


FunctionResponsePartOrDict = Union[
    FunctionResponsePart, FunctionResponsePartDict
]


class FunctionResponse(_common.BaseModel):
  """A function response."""

  will_continue: Optional[bool] = Field(
      default=None,
      description="""Signals that function call continues, and more responses will be returned, turning the function call into a generator. Is only applicable to NON_BLOCKING function calls (see FunctionDeclaration.behavior for details), ignored otherwise. If false, the default, future responses will not be considered. Is only applicable to NON_BLOCKING function calls, is ignored otherwise. If set to false, future responses will not be considered. It is allowed to return empty `response` with `will_continue=False` to signal that the function call is finished.""",
  )
  scheduling: Optional[FunctionResponseScheduling] = Field(
      default=None,
      description="""Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE.""",
  )
  parts: Optional[list[FunctionResponsePart]] = Field(
      default=None,
      description="""List of parts that constitute a function response. Each part may
      have a different IANA MIME type.""",
  )
  id: Optional[str] = Field(
      default=None,
      description="""Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`.""",
  )
  name: Optional[str] = Field(
      default=None,
      description="""Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].""",
  )
  response: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.""",
  )

  @classmethod
  def from_mcp_response(
      cls, *, name: str, response: McpCallToolResult
  ) -> 'FunctionResponse':
    if not _is_mcp_imported:
      raise ValueError(
          'MCP response is not supported. Please ensure that the MCP library is'
          ' imported.'
      )

    if response.isError:
      return cls(name=name, response={'error': 'MCP response is error.'})
    else:
      return cls(name=name, response={'result': response.content})


class FunctionResponseDict(TypedDict, total=False):
  """A function response."""

  will_continue: Optional[bool]
  """Signals that function call continues, and more responses will be returned, turning the function call into a generator. Is only applicable to NON_BLOCKING function calls (see FunctionDeclaration.behavior for details), ignored otherwise. If false, the default, future responses will not be considered. Is only applicable to NON_BLOCKING function calls, is ignored otherwise. If set to false, future responses will not be considered. It is allowed to return empty `response` with `will_continue=False` to signal that the function call is finished."""

  scheduling: Optional[FunctionResponseScheduling]
  """Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE."""

  parts: Optional[list[FunctionResponsePartDict]]
  """List of parts that constitute a function response. Each part may
      have a different IANA MIME type."""

  id: Optional[str]
  """Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`."""

  name: Optional[str]
  """Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]."""

  response: Optional[dict[str, Any]]
  """Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output."""


FunctionResponseOrDict = Union[FunctionResponse, FunctionResponseDict]


class Blob(_common.BaseModel):
  """Content blob."""

  data: Optional[bytes] = Field(
      default=None, description="""Required. Raw bytes."""
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""",
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""Required. The IANA standard MIME type of the source data.""",
  )

  def as_image(self) -> Optional['Image']:
    """Returns the Blob as a Image, or None if the Blob is not an image."""
    if (
        not self.data
        or not self.mime_type
        or not self.mime_type.startswith('image/')
    ):
      return None
    return Image(
        image_bytes=self.data,
        mime_type=self.mime_type,
    )


class BlobDict(TypedDict, total=False):
  """Content blob."""

  data: Optional[bytes]
  """Required. Raw bytes."""

  display_name: Optional[str]
  """Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API."""

  mime_type: Optional[str]
  """Required. The IANA standard MIME type of the source data."""


BlobOrDict = Union[Blob, BlobDict]


class VideoMetadata(_common.BaseModel):
  """Metadata describes the input video content."""

  end_offset: Optional[str] = Field(
      default=None, description="""Optional. The end offset of the video."""
  )
  fps: Optional[float] = Field(
      default=None,
      description="""Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0].""",
  )
  start_offset: Optional[str] = Field(
      default=None, description="""Optional. The start offset of the video."""
  )


class VideoMetadataDict(TypedDict, total=False):
  """Metadata describes the input video content."""

  end_offset: Optional[str]
  """Optional. The end offset of the video."""

  fps: Optional[float]
  """Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0]."""

  start_offset: Optional[str]
  """Optional. The start offset of the video."""


VideoMetadataOrDict = Union[VideoMetadata, VideoMetadataDict]


class Part(_common.BaseModel):
  """A datatype containing media content.

  Exactly one field within a Part should be set, representing the specific type
  of content being conveyed. Using multiple fields within the same `Part`
  instance is considered invalid.
  """

  media_resolution: Optional[PartMediaResolution] = Field(
      default=None,
      description="""Media resolution for the input media.
    """,
  )
  code_execution_result: Optional[CodeExecutionResult] = Field(
      default=None,
      description="""Optional. Result of executing the [ExecutableCode].""",
  )
  executable_code: Optional[ExecutableCode] = Field(
      default=None,
      description="""Optional. Code generated by the model that is meant to be executed.""",
  )
  file_data: Optional[FileData] = Field(
      default=None, description="""Optional. URI based data."""
  )
  function_call: Optional[FunctionCall] = Field(
      default=None,
      description="""Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""",
  )
  function_response: Optional[FunctionResponse] = Field(
      default=None,
      description="""Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""",
  )
  inline_data: Optional[Blob] = Field(
      default=None, description="""Optional. Inlined bytes data."""
  )
  text: Optional[str] = Field(
      default=None, description="""Optional. Text part (can be code)."""
  )
  thought: Optional[bool] = Field(
      default=None,
      description="""Optional. Indicates if the part is thought from the model.""",
  )
  thought_signature: Optional[bytes] = Field(
      default=None,
      description="""Optional. An opaque signature for the thought so it can be reused in subsequent requests.""",
  )
  video_metadata: Optional[VideoMetadata] = Field(
      default=None,
      description="""Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.""",
  )

  def __init__(
      self,
      value: Optional['PartUnionDict'] = None,
      /,
      *,
      video_metadata: Optional[VideoMetadata] = None,
      thought: Optional[bool] = None,
      inline_data: Optional[Blob] = None,
      file_data: Optional[FileData] = None,
      thought_signature: Optional[bytes] = None,
      function_call: Optional[FunctionCall] = None,
      code_execution_result: Optional[CodeExecutionResult] = None,
      executable_code: Optional[ExecutableCode] = None,
      function_response: Optional[FunctionResponse] = None,
      text: Optional[str] = None,
      # Pydantic allows CamelCase in addition to snake_case attribute
      # names. kwargs here catch these aliases.
      **kwargs: Any,
  ):
    part_dict = dict(
        video_metadata=video_metadata,
        thought=thought,
        inline_data=inline_data,
        file_data=file_data,
        thought_signature=thought_signature,
        function_call=function_call,
        code_execution_result=code_execution_result,
        executable_code=executable_code,
        function_response=function_response,
        text=text,
    )
    part_dict = {k: v for k, v in part_dict.items() if v is not None}

    if part_dict and value is not None:
      raise ValueError(
          'Positional and keyword arguments can not be combined when '
          'initializing a Part.'
      )

    if value is None:
      pass
    elif isinstance(value, str):
      part_dict['text'] = value
    elif isinstance(value, File):
      if not value.uri or not value.mime_type:
        raise ValueError('file uri and mime_type are required.')
      part_dict['file_data'] = FileData(
          file_uri=value.uri,
          mime_type=value.mime_type,
          display_name=value.display_name,
      )
    elif isinstance(value, dict):
      try:
        Part.model_validate(value)
        part_dict.update(value)  # type: ignore[arg-type]
      except pydantic.ValidationError:
        part_dict['file_data'] = FileData.model_validate(value)
    elif isinstance(value, Part):
      part_dict.update(value.dict())
    elif 'image' in value.__class__.__name__.lower():
      # PIL.Image case.

      suffix = value.format.lower() if value.format else 'jpeg'
      mimetype = f'image/{suffix}'
      bytes_io = io.BytesIO()
      value.save(bytes_io, suffix.upper())

      part_dict['inline_data'] = Blob(
          data=bytes_io.getvalue(), mime_type=mimetype
      )
    else:
      raise ValueError(f'Unsupported content part type: {type(value)}')

    super().__init__(**part_dict, **kwargs)

  def as_image(self) -> Optional['Image']:
    """Returns the part as a PIL Image, or None if the part is not an image."""
    if not self.inline_data:
      return None
    return self.inline_data.as_image()

  @classmethod
  def _t_part_media_resolution(
      cls,
      part_media_resolution: Union[
          'PartMediaResolutionOrDict', 'PartMediaResolutionLevel', str
      ],
  ) -> PartMediaResolution:
    if isinstance(part_media_resolution, str):
      part_media_resolution = PartMediaResolution(level=part_media_resolution)
    elif isinstance(part_media_resolution, PartMediaResolutionLevel):
      part_media_resolution = PartMediaResolution(level=part_media_resolution)
    elif isinstance(part_media_resolution, dict):
      part_media_resolution = PartMediaResolution(**part_media_resolution)

    return part_media_resolution

  @classmethod
  def from_uri(
      cls,
      *,
      file_uri: str,
      mime_type: Optional[str] = None,
      media_resolution: Optional[
          Union['PartMediaResolutionOrDict', 'PartMediaResolutionLevel', str]
      ] = None,
  ) -> 'Part':
    """Creates a Part from a file uri.

    Args:
      file_uri (str): The uri of the file
      mime_type (str): mime_type: The MIME type of the file. If not provided,
        the MIME type will be automatically determined.
    """
    if mime_type is None:
      import mimetypes

      mime_type, _ = mimetypes.guess_type(file_uri)
      if not mime_type:
        raise ValueError(f'Failed to determine mime type for file: {file_uri}')
    if media_resolution is not None:
      media_resolution = cls._t_part_media_resolution(media_resolution)
    file_data = FileData(file_uri=file_uri, mime_type=mime_type)
    return cls(file_data=file_data, media_resolution=media_resolution)

  @classmethod
  def from_text(cls, *, text: str) -> 'Part':
    return cls(text=text)

  @classmethod
  def from_bytes(
      cls,
      *,
      data: bytes,
      mime_type: str,
      media_resolution: Optional[
          Union['PartMediaResolutionOrDict', 'PartMediaResolutionLevel', str]
      ] = None,
  ) -> 'Part':
    inline_data = Blob(
        data=data,
        mime_type=mime_type,
    )
    if media_resolution is not None:
      media_resolution = cls._t_part_media_resolution(media_resolution)

    return cls(inline_data=inline_data, media_resolution=media_resolution)

  @classmethod
  def from_function_call(cls, *, name: str, args: dict[str, Any]) -> 'Part':
    function_call = FunctionCall(name=name, args=args)
    return cls(function_call=function_call)

  @classmethod
  def from_function_response(
      cls,
      *,
      name: str,
      response: dict[str, Any],
      parts: Optional[list[FunctionResponsePart]] = None,
  ) -> 'Part':
    function_response = FunctionResponse(
        name=name, response=response, parts=parts
    )
    return cls(function_response=function_response)

  @classmethod
  def from_executable_code(cls, *, code: str, language: Language) -> 'Part':
    executable_code = ExecutableCode(code=code, language=language)
    return cls(executable_code=executable_code)

  @classmethod
  def from_code_execution_result(
      cls, *, outcome: Outcome, output: str
  ) -> 'Part':
    code_execution_result = CodeExecutionResult(outcome=outcome, output=output)
    return cls(code_execution_result=code_execution_result)


class PartDict(TypedDict, total=False):
  """A datatype containing media content.

  Exactly one field within a Part should be set, representing the specific type
  of content being conveyed. Using multiple fields within the same `Part`
  instance is considered invalid.
  """

  media_resolution: Optional[PartMediaResolutionDict]
  """Media resolution for the input media.
    """

  code_execution_result: Optional[CodeExecutionResultDict]
  """Optional. Result of executing the [ExecutableCode]."""

  executable_code: Optional[ExecutableCodeDict]
  """Optional. Code generated by the model that is meant to be executed."""

  file_data: Optional[FileDataDict]
  """Optional. URI based data."""

  function_call: Optional[FunctionCallDict]
  """Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values."""

  function_response: Optional[FunctionResponseDict]
  """Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model."""

  inline_data: Optional[BlobDict]
  """Optional. Inlined bytes data."""

  text: Optional[str]
  """Optional. Text part (can be code)."""

  thought: Optional[bool]
  """Optional. Indicates if the part is thought from the model."""

  thought_signature: Optional[bytes]
  """Optional. An opaque signature for the thought so it can be reused in subsequent requests."""

  video_metadata: Optional[VideoMetadataDict]
  """Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data."""


PartOrDict = Union[Part, PartDict]


class Content(_common.BaseModel):
  """Contains the multi-part content of a message."""

  parts: Optional[list[Part]] = Field(
      default=None,
      description="""List of parts that constitute a single message. Each part may have
      a different IANA MIME type.""",
  )
  role: Optional[str] = Field(
      default=None,
      description="""Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset.""",
  )


class ContentDict(TypedDict, total=False):
  """Contains the multi-part content of a message."""

  parts: Optional[list[PartDict]]
  """List of parts that constitute a single message. Each part may have
      a different IANA MIME type."""

  role: Optional[str]
  """Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset."""


ContentOrDict = Union[Content, ContentDict]


class HttpRetryOptions(_common.BaseModel):
  """HTTP retry options to be used in each of the requests."""

  attempts: Optional[int] = Field(
      default=None,
      description="""Maximum number of attempts, including the original request.
      If 0 or 1, it means no retries. If not specified, default to 5.""",
  )
  initial_delay: Optional[float] = Field(
      default=None,
      description="""Initial delay before the first retry, in fractions of a second. If not specified, default to 1.0 second.""",
  )
  max_delay: Optional[float] = Field(
      default=None,
      description="""Maximum delay between retries, in fractions of a second. If not specified, default to 60.0 seconds.""",
  )
  exp_base: Optional[float] = Field(
      default=None,
      description="""Multiplier by which the delay increases after each attempt. If not specified, default to 2.0.""",
  )
  jitter: Optional[float] = Field(
      default=None,
      description="""Randomness factor for the delay. If not specified, default to 1.0.""",
  )
  http_status_codes: Optional[list[int]] = Field(
      default=None,
      description="""List of HTTP status codes that should trigger a retry.
      If not specified, a default set of retryable codes (408, 429, and 5xx) may be used.""",
  )


class HttpRetryOptionsDict(TypedDict, total=False):
  """HTTP retry options to be used in each of the requests."""

  attempts: Optional[int]
  """Maximum number of attempts, including the original request.
      If 0 or 1, it means no retries. If not specified, default to 5."""

  initial_delay: Optional[float]
  """Initial delay before the first retry, in fractions of a second. If not specified, default to 1.0 second."""

  max_delay: Optional[float]
  """Maximum delay between retries, in fractions of a second. If not specified, default to 60.0 seconds."""

  exp_base: Optional[float]
  """Multiplier by which the delay increases after each attempt. If not specified, default to 2.0."""

  jitter: Optional[float]
  """Randomness factor for the delay. If not specified, default to 1.0."""

  http_status_codes: Optional[list[int]]
  """List of HTTP status codes that should trigger a retry.
      If not specified, a default set of retryable codes (408, 429, and 5xx) may be used."""


HttpRetryOptionsOrDict = Union[HttpRetryOptions, HttpRetryOptionsDict]


class HttpOptions(_common.BaseModel):
  """HTTP options to be used in each of the requests."""

  base_url: Optional[str] = Field(
      default=None,
      description="""The base URL for the AI platform service endpoint.""",
  )
  base_url_resource_scope: Optional[ResourceScope] = Field(
      default=None,
      description="""The resource scope used to constructing the resource name when base_url is set""",
  )
  api_version: Optional[str] = Field(
      default=None, description="""Specifies the version of the API to use."""
  )
  headers: Optional[dict[str, str]] = Field(
      default=None,
      description="""Additional HTTP headers to be sent with the request.""",
  )
  timeout: Optional[int] = Field(
      default=None, description="""Timeout for the request in milliseconds."""
  )
  client_args: Optional[dict[str, Any]] = Field(
      default=None, description="""Args passed to the HTTP client."""
  )
  async_client_args: Optional[dict[str, Any]] = Field(
      default=None, description="""Args passed to the async HTTP client."""
  )
  extra_body: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Extra parameters to add to the request body.
      The structure must match the backend API's request structure.
      - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
      - GeminiAPI backend API docs: https://ai.google.dev/api/rest""",
  )
  retry_options: Optional[HttpRetryOptions] = Field(
      default=None, description="""HTTP retry options for the request."""
  )

  httpx_client: Optional['HttpxClient'] = Field(
      default=None,
      description="""A custom httpx client to be used for the request.""",
  )
  httpx_async_client: Optional['HttpxAsyncClient'] = Field(
      default=None,
      description="""A custom httpx async client to be used for the request.""",
  )


class HttpOptionsDict(TypedDict, total=False):
  """HTTP options to be used in each of the requests."""

  base_url: Optional[str]
  """The base URL for the AI platform service endpoint."""

  base_url_resource_scope: Optional[ResourceScope]
  """The resource scope used to constructing the resource name when base_url is set"""

  api_version: Optional[str]
  """Specifies the version of the API to use."""

  headers: Optional[dict[str, str]]
  """Additional HTTP headers to be sent with the request."""

  timeout: Optional[int]
  """Timeout for the request in milliseconds."""

  client_args: Optional[dict[str, Any]]
  """Args passed to the HTTP client."""

  async_client_args: Optional[dict[str, Any]]
  """Args passed to the async HTTP client."""

  extra_body: Optional[dict[str, Any]]
  """Extra parameters to add to the request body.
      The structure must match the backend API's request structure.
      - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
      - GeminiAPI backend API docs: https://ai.google.dev/api/rest"""

  retry_options: Optional[HttpRetryOptionsDict]
  """HTTP retry options for the request."""


HttpOptionsOrDict = Union[HttpOptions, HttpOptionsDict]


class JSONSchema(_common.BaseModel):
  """A subset of JSON Schema according to 2020-12 JSON Schema draft.

  Represents a subset of a JSON Schema object that is used by the Gemini model.
  The difference between this class and the Schema class is that this class is
  compatible with OpenAPI 3.1 schema objects. And the Schema class is used to
  make API call to Gemini model.
  """

  type: Optional[Union[JSONSchemaType, list[JSONSchemaType]]] = Field(
      default=None,
      description="""Validation succeeds if the type of the instance matches the type represented by the given type, or matches at least one of the given types.""",
  )
  format: Optional[str] = Field(
      default=None,
      description='Define semantic information about a string instance.',
  )
  title: Optional[str] = Field(
      default=None,
      description=(
          'A preferably short description about the purpose of the instance'
          ' described by the schema.'
      ),
  )
  description: Optional[str] = Field(
      default=None,
      description=(
          'An explanation about the purpose of the instance described by the'
          ' schema.'
      ),
  )
  default: Optional[Any] = Field(
      default=None,
      description=(
          'This keyword can be used to supply a default JSON value associated'
          ' with a particular schema.'
      ),
  )
  items: Optional['JSONSchema'] = Field(
      default=None,
      description=(
          'Validation succeeds if each element of the instance not covered by'
          ' prefixItems validates against this schema.'
      ),
  )
  min_items: Optional[int] = Field(
      default=None,
      description=(
          'An array instance is valid if its size is greater than, or equal to,'
          ' the value of this keyword.'
      ),
  )
  max_items: Optional[int] = Field(
      default=None,
      description=(
          'An array instance is valid if its size is less than, or equal to,'
          ' the value of this keyword.'
      ),
  )
  enum: Optional[list[Any]] = Field(
      default=None,
      description=(
          'Validation succeeds if the instance is equal to one of the elements'
          ' in this keyword’s array value.'
      ),
  )
  properties: Optional[dict[str, 'JSONSchema']] = Field(
      default=None,
      description=(
          'Validation succeeds if, for each name that appears in both the'
          ' instance and as a name within this keyword’s value, the child'
          ' instance for that name successfully validates against the'
          ' corresponding schema.'
      ),
  )
  required: Optional[list[str]] = Field(
      default=None,
      description=(
          'An object instance is valid against this keyword if every item in'
          ' the array is the name of a property in the instance.'
      ),
  )
  min_properties: Optional[int] = Field(
      default=None,
      description=(
          'An object instance is valid if its number of properties is greater'
          ' than, or equal to, the value of this keyword.'
      ),
  )
  max_properties: Optional[int] = Field(
      default=None,
      description=(
          'An object instance is valid if its number of properties is less'
          ' than, or equal to, the value of this keyword.'
      ),
  )
  minimum: Optional[float] = Field(
      default=None,
      description=(
          'Validation succeeds if the numeric instance is greater than or equal'
          ' to the given number.'
      ),
  )
  maximum: Optional[float] = Field(
      default=None,
      description=(
          'Validation succeeds if the numeric instance is less than or equal to'
          ' the given number.'
      ),
  )
  min_length: Optional[int] = Field(
      default=None,
      description=(
          'A string instance is valid against this keyword if its length is'
          ' greater than, or equal to, the value of this keyword.'
      ),
  )
  max_length: Optional[int] = Field(
      default=None,
      description=(
          'A string instance is valid against this keyword if its length is'
          ' less than, or equal to, the value of this keyword.'
      ),
  )
  pattern: Optional[str] = Field(
      default=None,
      description=(
          'A string instance is considered valid if the regular expression'
          ' matches the instance successfully.'
      ),
  )
  additional_properties: Optional[Any] = Field(
      default=None,
      description="""Can either be a boolean or an object; controls the presence of additional properties.""",
  )
  any_of: Optional[list['JSONSchema']] = Field(
      default=None,
      description=(
          'An instance validates successfully against this keyword if it'
          ' validates successfully against at least one schema defined by this'
          ' keyword’s value.'
      ),
  )
  unique_items: Optional[bool] = Field(
      default=None,
      description="""Boolean value that indicates whether the items in an array are unique.""",
  )
  ref: Optional[str] = Field(
      default=None,
      alias='$ref',
      description="""Allows indirect references between schema nodes.""",
  )
  defs: Optional[dict[str, 'JSONSchema']] = Field(
      default=None,
      alias='$defs',
      description="""Schema definitions to be used with $ref.""",
  )


class Schema(_common.BaseModel):
  """Schema is used to define the format of input/output data.

  Represents a select subset of an [OpenAPI 3.0 schema
  object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may
  be added in the future as needed.
  """

  additional_properties: Optional[Any] = Field(
      default=None,
      description="""Optional. Can either be a boolean or an object; controls the presence of additional properties.""",
  )
  defs: Optional[dict[str, 'Schema']] = Field(
      default=None,
      description="""Optional. A map of definitions for use by `ref` Only allowed at the root of the schema.""",
  )
  ref: Optional[str] = Field(
      default=None,
      description="""Optional. Allows indirect references between schema nodes. The value should be a valid reference to a child of the root `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring""",
  )
  any_of: Optional[list['Schema']] = Field(
      default=None,
      description="""Optional. The value should be validated against any (one or more) of the subschemas in the list.""",
  )
  default: Optional[Any] = Field(
      default=None, description="""Optional. Default value of the data."""
  )
  description: Optional[str] = Field(
      default=None, description="""Optional. The description of the data."""
  )
  enum: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]}""",
  )
  example: Optional[Any] = Field(
      default=None,
      description="""Optional. Example of the object. Will only populated when the object is the root.""",
  )
  format: Optional[str] = Field(
      default=None,
      description="""Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc""",
  )
  items: Optional['Schema'] = Field(
      default=None,
      description="""Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY.""",
  )
  max_items: Optional[int] = Field(
      default=None,
      description="""Optional. Maximum number of the elements for Type.ARRAY.""",
  )
  max_length: Optional[int] = Field(
      default=None,
      description="""Optional. Maximum length of the Type.STRING""",
  )
  max_properties: Optional[int] = Field(
      default=None,
      description="""Optional. Maximum number of the properties for Type.OBJECT.""",
  )
  maximum: Optional[float] = Field(
      default=None,
      description="""Optional. Maximum value of the Type.INTEGER and Type.NUMBER""",
  )
  min_items: Optional[int] = Field(
      default=None,
      description="""Optional. Minimum number of the elements for Type.ARRAY.""",
  )
  min_length: Optional[int] = Field(
      default=None,
      description="""Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING""",
  )
  min_properties: Optional[int] = Field(
      default=None,
      description="""Optional. Minimum number of the properties for Type.OBJECT.""",
  )
  minimum: Optional[float] = Field(
      default=None,
      description="""Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER""",
  )
  nullable: Optional[bool] = Field(
      default=None,
      description="""Optional. Indicates if the value may be null.""",
  )
  pattern: Optional[str] = Field(
      default=None,
      description="""Optional. Pattern of the Type.STRING to restrict a string to a regular expression.""",
  )
  properties: Optional[dict[str, 'Schema']] = Field(
      default=None,
      description="""Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.""",
  )
  property_ordering: Optional[list[str]] = Field(
      default=None,
      description="""Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties.""",
  )
  required: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Required properties of Type.OBJECT.""",
  )
  title: Optional[str] = Field(
      default=None, description="""Optional. The title of the Schema."""
  )
  type: Optional[Type] = Field(
      default=None, description="""Optional. The type of the data."""
  )

  @property
  def json_schema(self) -> JSONSchema:
    """Converts the Schema object to a JSONSchema object, that is compatible with 2020-12 JSON Schema draft.

     Note: Conversion of fields that are not included in the JSONSchema class
     are ignored.
     Json Schema is now supported natively by both Vertex AI and Gemini API.
     Users
     are recommended to pass/receive Json Schema directly to/from the API. For
     example:
     1. the counter part of GenerateContentConfig.response_schema is
        GenerateContentConfig.response_json_schema, which accepts [JSON
       Schema](https://json-schema.org/)
     2. the counter part of FunctionDeclaration.parameters is
        FunctionDeclaration.parameters_json_schema, which accepts [JSON
        Schema](https://json-schema.org/)
     3. the counter part of FunctionDeclaration.response is
        FunctionDeclaration.response_json_schema, which accepts [JSON
    Schema](https://json-schema.org/)
    """

    global _json_schema_warning_logged
    if not _json_schema_warning_logged:
      info_message = """
Note: Conversion of fields that are not included in the JSONSchema class are
ignored.
Json Schema is now supported natively by both Vertex AI and Gemini API. Users
are recommended to pass/receive Json Schema directly to/from the API. For example:
1. the counter part of GenerateContentConfig.response_schema is
   GenerateContentConfig.response_json_schema, which accepts [JSON
  Schema](https://json-schema.org/)
2. the counter part of FunctionDeclaration.parameters is
   FunctionDeclaration.parameters_json_schema, which accepts [JSON
   Schema](https://json-schema.org/)
3. the counter part of FunctionDeclaration.response is
   FunctionDeclaration.response_json_schema, which accepts [JSON
   Schema](https://json-schema.org/)
"""
      logger.info(info_message)
      _json_schema_warning_logged = True

    json_schema_field_names: set[str] = set(JSONSchema.model_fields.keys())
    schema_field_names: tuple[str] = (
        'items',
    )  # 'additional_properties' to come
    list_schema_field_names: tuple[str] = (
        'any_of',  # 'one_of', 'all_of', 'not' to come
    )
    dict_schema_field_names: tuple[str] = ('properties',)  # 'defs' to come

    def convert_schema(schema: Union['Schema', dict[str, Any]]) -> JSONSchema:
      if isinstance(schema, pydantic.BaseModel):
        schema_dict = schema.model_dump(exclude_none=True)
      else:
        schema_dict = schema
      json_schema = JSONSchema()
      for field_name, field_value in schema_dict.items():
        if field_value is None:
          continue
        elif field_name == 'nullable':
          if json_schema.type is None:
            json_schema.type = JSONSchemaType.NULL
          elif isinstance(json_schema.type, JSONSchemaType):
            current_type: JSONSchemaType = json_schema.type
            json_schema.type = [current_type, JSONSchemaType.NULL]
          elif isinstance(json_schema.type, list):
            json_schema.type.append(JSONSchemaType.NULL)
        elif field_name not in json_schema_field_names:
          continue
        elif field_name == 'type':
          if field_value == Type.TYPE_UNSPECIFIED:
            continue
          json_schema_type = JSONSchemaType(field_value.lower())
          if json_schema.type is None:
            json_schema.type = json_schema_type
          elif isinstance(json_schema.type, JSONSchemaType):
            existing_type: JSONSchemaType = json_schema.type
            json_schema.type = [existing_type, json_schema_type]
          elif isinstance(json_schema.type, list):
            json_schema.type.append(json_schema_type)
        elif field_name in schema_field_names:
          schema_field_value: 'JSONSchema' = convert_schema(field_value)
          setattr(json_schema, field_name, schema_field_value)
        elif field_name in list_schema_field_names:
          list_schema_field_value: list['JSONSchema'] = [
              convert_schema(this_field_value)
              for this_field_value in field_value
          ]
          setattr(json_schema, field_name, list_schema_field_value)
        elif field_name in dict_schema_field_names:
          dict_schema_field_value: dict[str, 'JSONSchema'] = {
              key: convert_schema(value) for key, value in field_value.items()
          }
          setattr(json_schema, field_name, dict_schema_field_value)
        else:
          setattr(json_schema, field_name, field_value)

      return json_schema

    return convert_schema(self)

  @classmethod
  def from_json_schema(
      cls,
      *,
      json_schema: JSONSchema,
      api_option: Literal['VERTEX_AI', 'GEMINI_API'] = 'GEMINI_API',
      raise_error_on_unsupported_field: bool = False,
  ) -> 'Schema':
    """Converts a JSONSchema object to a Schema object.

     Note: Conversion of fields that are not included in the JSONSchema class
     are ignored.
     Json Schema is now supported natively by both Vertex AI and Gemini API.
     Users
     are recommended to pass/receive Json Schema directly to/from the API. For
     example:
     1. the counter part of GenerateContentConfig.response_schema is
        GenerateContentConfig.response_json_schema, which accepts [JSON
       Schema](https://json-schema.org/)
     2. the counter part of FunctionDeclaration.parameters is
        FunctionDeclaration.parameters_json_schema, which accepts [JSON
        Schema](https://json-schema.org/)
     3. the counter part of FunctionDeclaration.response is
        FunctionDeclaration.response_json_schema, which accepts [JSON
    Schema](https://json-schema.org/)
     The JSONSchema is compatible with 2020-12 JSON Schema draft, specified by
     OpenAPI 3.1.

     Args:
         json_schema: JSONSchema object to be converted.
         api_option: API option to be used. If set to 'VERTEX_AI', the
           JSONSchema will be converted to a Schema object that is compatible
           with Vertex AI API. If set to 'GEMINI_API', the JSONSchema will be
           converted to a Schema object that is compatible with Gemini API.
           Default is 'GEMINI_API'.
         raise_error_on_unsupported_field: If set to True, an error will be
           raised if the JSONSchema contains any unsupported fields. Default is
           False.

     Returns:
         Schema object that is compatible with the specified API option.
     Raises:
         ValueError: If the JSONSchema contains any unsupported fields and
           raise_error_on_unsupported_field is set to True. Or if the JSONSchema
           is not compatible with the specified API option.
    """
    global _from_json_schema_warning_logged
    if not _from_json_schema_warning_logged:
      info_message = """
Note: Conversion of fields that are not included in the JSONSchema class are ignored.
Json Schema is now supported natively by both Vertex AI and Gemini API. Users
are recommended to pass/receive Json Schema directly to/from the API. For example:
1. the counter part of GenerateContentConfig.response_schema is
   GenerateContentConfig.response_json_schema, which accepts [JSON
  Schema](https://json-schema.org/)
2. the counter part of FunctionDeclaration.parameters is
   FunctionDeclaration.parameters_json_schema, which accepts [JSON
   Schema](https://json-schema.org/)
3. the counter part of FunctionDeclaration.response is
   FunctionDeclaration.response_json_schema, which accepts [JSON
   Schema](https://json-schema.org/)
"""
      logger.info(info_message)
      _from_json_schema_warning_logged = True

    google_schema_field_names: set[str] = set(cls.model_fields.keys())
    schema_field_names: tuple[str, ...] = (
        'items',
    )  # 'additional_properties' to come
    list_schema_field_names: tuple[str, ...] = (
        'any_of',  # 'one_of', 'all_of', 'not' to come
    )
    dict_schema_field_names: tuple[str, ...] = ('properties',)

    related_field_names_by_type: dict[str, tuple[str, ...]] = {
        JSONSchemaType.NUMBER.value: (
            'description',
            'enum',
            'format',
            'maximum',
            'minimum',
            'title',
        ),
        JSONSchemaType.STRING.value: (
            'description',
            'enum',
            'format',
            'max_length',
            'min_length',
            'pattern',
            'title',
        ),
        JSONSchemaType.OBJECT.value: (
            'any_of',
            'description',
            'max_properties',
            'min_properties',
            'properties',
            'required',
            'title',
        ),
        JSONSchemaType.ARRAY.value: (
            'description',
            'items',
            'max_items',
            'min_items',
            'title',
        ),
        JSONSchemaType.BOOLEAN.value: (
            'description',
            'title',
        ),
    }
    # Treat `INTEGER` like `NUMBER`.
    related_field_names_by_type[JSONSchemaType.INTEGER.value] = (
        related_field_names_by_type[JSONSchemaType.NUMBER.value]
    )

    # placeholder for potential gemini api unsupported fields
    gemini_api_unsupported_field_names: tuple[str, ...] = ()

    def _resolve_ref(
        ref_path: str, root_schema_dict: dict[str, Any]
    ) -> dict[str, Any]:
      """Helper to resolve a $ref path."""
      current = root_schema_dict
      for part in ref_path.lstrip('#/').split('/'):
        if part == '$defs':
          part = 'defs'
        current = current[part]
      current.pop('title', None)
      if 'properties' in current and current['properties'] is not None:
        for prop_schema in current['properties'].values():
          if isinstance(prop_schema, dict):
            prop_schema.pop('title', None)

      return current

    def normalize_json_schema_type(
        json_schema_type: Optional[
            Union[JSONSchemaType, Sequence[JSONSchemaType], str, Sequence[str]]
        ],
    ) -> tuple[list[str], bool]:
      """Returns (non_null_types, nullable)"""
      if json_schema_type is None:
        return [], False
      type_sequence: Sequence[Union[JSONSchemaType, str]]
      if isinstance(json_schema_type, str) or not isinstance(
          json_schema_type, Sequence
      ):
        type_sequence = [json_schema_type]
      else:
        type_sequence = json_schema_type
      non_null_types = []
      nullable = False
      for type_value in type_sequence:
        if isinstance(type_value, JSONSchemaType):
          type_value = type_value.value
        if type_value == JSONSchemaType.NULL.value:
          nullable = True
        else:
          non_null_types.append(type_value)
      return non_null_types, nullable

    def raise_error_if_cannot_convert(
        json_schema_dict: dict[str, Any],
        api_option: Literal['VERTEX_AI', 'GEMINI_API'],
        raise_error_on_unsupported_field: bool,
    ) -> None:
      """Raises an error if the JSONSchema cannot be converted to the specified Schema object."""
      if not raise_error_on_unsupported_field:
        return
      for field_name, field_value in json_schema_dict.items():
        if field_value is None:
          continue
        if field_name not in google_schema_field_names and field_name not in [
            'ref',
            'defs',
        ]:
          raise ValueError(
              f'JSONSchema field "{field_name}" is not supported by the Schema'
              ' object. And the "raise_error_on_unsupported_field" argument is'
              ' set to True. If you still want to convert it into the Schema'
              f' object, please either remove the field "{field_name}" from the'
              ' JSONSchema object, leave the'
              ' "raise_error_on_unsupported_field" unset, or try using'
              ' response_json_schema instead.'
          )
        if (
            field_name in gemini_api_unsupported_field_names
            and api_option == 'GEMINI_API'
        ):
          raise ValueError(
              f'The "{field_name}" field is not supported by the Schema '
              'object for GEMINI_API.'
          )

    def copy_schema_fields(
        json_schema_dict: dict[str, Any],
        related_fields_to_copy: tuple[str, ...],
        sub_schema_in_any_of: dict[str, Any],
    ) -> None:
      """Copies the fields from json_schema_dict to sub_schema_in_any_of."""
      for field_name in related_fields_to_copy:
        sub_schema_in_any_of[field_name] = json_schema_dict.get(
            field_name, None
        )

    def convert_json_schema(
        current_json_schema: JSONSchema,
        root_json_schema_dict: dict[str, Any],
        api_option: Literal['VERTEX_AI', 'GEMINI_API'],
        raise_error_on_unsupported_field: bool,
    ) -> 'Schema':
      schema = Schema()
      json_schema_dict = current_json_schema.model_dump()

      if json_schema_dict.get('ref'):
        json_schema_dict = _resolve_ref(
            json_schema_dict['ref'], root_json_schema_dict
        )

      raise_error_if_cannot_convert(
          json_schema_dict=json_schema_dict,
          api_option=api_option,
          raise_error_on_unsupported_field=raise_error_on_unsupported_field,
      )

      # At the highest level of the logic, there are two passes:
      # Pass 1: the JSONSchema.type is union-like,
      #         e.g. ['null', 'string', 'array'].
      #         for this case, we need to split the JSONSchema into multiple
      #         sub-schemas, and copy them into the any_of field of the Schema.
      #         And when we copy the non-type fields into any_of field,
      #         we only copy the fields related to the specific type.
      #         Detailed logic is commented below with `Pass 1` keyword tag.
      # Pass 2: the JSONSchema.type is not union-like,
      #         e.g. 'string', ['string'], ['null', 'string'].
      #         for this case, no splitting is needed. Detailed
      #         logic is commented below with `Pass 2` keyword tag.
      #
      #
      # Pass 1: the JSONSchema.type is union-like
      #         e.g. ['null', 'string', 'array'].
      non_null_types, nullable = normalize_json_schema_type(
          json_schema_dict.get('type', None)
      )
      is_union_like_type = len(non_null_types) > 1
      if len(non_null_types) > 1:
        logger.warning(
            'JSONSchema type is union-like, e.g. ["null", "string", "array"]. '
            'Converting it into multiple sub-schemas, and copying them into '
            'the any_of field of the Schema. The value of `default` field is '
            'ignored because it is ambiguous to tell which sub-schema it '
            'belongs to.'
        )
        reformed_json_schema = JSONSchema()
        # start splitting the JSONSchema into multiple sub-schemas
        any_of = []
        if nullable:
          schema.nullable = True
        for normalized_type in non_null_types:
          sub_schema_in_any_of = {'type': normalized_type}
          related_field_names = related_field_names_by_type.get(normalized_type)
          if related_field_names is not None:
            copy_schema_fields(
                json_schema_dict=json_schema_dict,
                related_fields_to_copy=related_field_names,
                sub_schema_in_any_of=sub_schema_in_any_of,
            )
          any_of.append(JSONSchema(**sub_schema_in_any_of))
        reformed_json_schema.any_of = any_of
        json_schema_dict = reformed_json_schema.model_dump()

      # Pass 2: the JSONSchema.type is not union-like,
      # e.g. 'string', ['string'], ['null', 'string'].
      for field_name, field_value in json_schema_dict.items():
        if field_value is None or field_name == 'defs':
          continue
        if field_name in schema_field_names:
          if field_name == 'items' and not field_value:
            continue
          schema_field_value: 'Schema' = convert_json_schema(
              current_json_schema=JSONSchema(**field_value),
              root_json_schema_dict=root_json_schema_dict,
              api_option=api_option,
              raise_error_on_unsupported_field=raise_error_on_unsupported_field,
          )
          setattr(schema, field_name, schema_field_value)
        elif field_name in list_schema_field_names:
          list_schema_field_value: list['Schema'] = [
              convert_json_schema(
                  current_json_schema=JSONSchema(**this_field_value),
                  root_json_schema_dict=root_json_schema_dict,
                  api_option=api_option,
                  raise_error_on_unsupported_field=raise_error_on_unsupported_field,
              )
              for this_field_value in field_value
          ]
          setattr(schema, field_name, list_schema_field_value)
          if not schema.type and not is_union_like_type and not schema.any_of:
            schema.type = Type('OBJECT')
        elif field_name in dict_schema_field_names:
          dict_schema_field_value: dict[str, 'Schema'] = {
              key: convert_json_schema(
                  current_json_schema=JSONSchema(**value),
                  root_json_schema_dict=root_json_schema_dict,
                  api_option=api_option,
                  raise_error_on_unsupported_field=raise_error_on_unsupported_field,
              )
              for key, value in field_value.items()
          }
          setattr(schema, field_name, dict_schema_field_value)
        elif field_name == 'type':
          non_null_types, nullable = normalize_json_schema_type(field_value)
          if nullable:
            schema.nullable = True
          if non_null_types:
            schema.type = Type(non_null_types[0])
        else:
          if (
              hasattr(schema, field_name)
              and field_name != 'additional_properties'
          ):
            setattr(schema, field_name, field_value)

      if (
          schema.type == 'ARRAY'
          and schema.items
          and not schema.items.model_dump(exclude_unset=True)
      ):
        schema.items = None

      if schema.any_of and len(schema.any_of) == 2:
        nullable_part = None
        type_part = None
        for part in schema.any_of:
          # A schema representing `None` will either be of type NULL or just be nullable.
          part_dict = part.model_dump(exclude_unset=True)
          if part_dict == {'nullable': True} or part_dict == {'type': 'NULL'}:
            nullable_part = part
          else:
            type_part = part

        # If we found both parts, unwrap them into a single schema.
        if nullable_part and type_part:
          default_value = schema.default
          schema = type_part
          schema.nullable = True
          # Carry the default value over to the unwrapped schema
          if default_value is not None:
            schema.default = default_value

      return schema

    # This is the initial call to the recursive function.
    root_schema_dict = json_schema.model_dump()
    return convert_json_schema(
        current_json_schema=json_schema,
        root_json_schema_dict=root_schema_dict,
        api_option=api_option,
        raise_error_on_unsupported_field=raise_error_on_unsupported_field,
    )


class SchemaDict(TypedDict, total=False):
  """Schema is used to define the format of input/output data.

  Represents a select subset of an [OpenAPI 3.0 schema
  object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may
  be added in the future as needed.
  """

  additional_properties: Optional[Any]
  """Optional. Can either be a boolean or an object; controls the presence of additional properties."""

  defs: Optional[dict[str, 'SchemaDict']]
  """Optional. A map of definitions for use by `ref` Only allowed at the root of the schema."""

  ref: Optional[str]
  """Optional. Allows indirect references between schema nodes. The value should be a valid reference to a child of the root `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring"""

  any_of: Optional[list['SchemaDict']]
  """Optional. The value should be validated against any (one or more) of the subschemas in the list."""

  default: Optional[Any]
  """Optional. Default value of the data."""

  description: Optional[str]
  """Optional. The description of the data."""

  enum: Optional[list[str]]
  """Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]}"""

  example: Optional[Any]
  """Optional. Example of the object. Will only populated when the object is the root."""

  format: Optional[str]
  """Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc"""

  items: Optional['SchemaDict']
  """Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY."""

  max_items: Optional[int]
  """Optional. Maximum number of the elements for Type.ARRAY."""

  max_length: Optional[int]
  """Optional. Maximum length of the Type.STRING"""

  max_properties: Optional[int]
  """Optional. Maximum number of the properties for Type.OBJECT."""

  maximum: Optional[float]
  """Optional. Maximum value of the Type.INTEGER and Type.NUMBER"""

  min_items: Optional[int]
  """Optional. Minimum number of the elements for Type.ARRAY."""

  min_length: Optional[int]
  """Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING"""

  min_properties: Optional[int]
  """Optional. Minimum number of the properties for Type.OBJECT."""

  minimum: Optional[float]
  """Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER"""

  nullable: Optional[bool]
  """Optional. Indicates if the value may be null."""

  pattern: Optional[str]
  """Optional. Pattern of the Type.STRING to restrict a string to a regular expression."""

  properties: Optional[dict[str, 'SchemaDict']]
  """Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT."""

  property_ordering: Optional[list[str]]
  """Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties."""

  required: Optional[list[str]]
  """Optional. Required properties of Type.OBJECT."""

  title: Optional[str]
  """Optional. The title of the Schema."""

  type: Optional[Type]
  """Optional. The type of the data."""


SchemaOrDict = Union[Schema, SchemaDict]


class ModelSelectionConfig(_common.BaseModel):
  """Config for model selection."""

  feature_selection_preference: Optional[FeatureSelectionPreference] = Field(
      default=None, description="""Options for feature selection preference."""
  )


class ModelSelectionConfigDict(TypedDict, total=False):
  """Config for model selection."""

  feature_selection_preference: Optional[FeatureSelectionPreference]
  """Options for feature selection preference."""


ModelSelectionConfigOrDict = Union[
    ModelSelectionConfig, ModelSelectionConfigDict
]


class FunctionDeclaration(_common.BaseModel):
  """Defines a function that the model can generate JSON inputs for.

  The inputs are based on `OpenAPI 3.0 specifications
  <https://spec.openapis.org/oas/v3.0.3>`_.
  """

  behavior: Optional[Behavior] = Field(
      default=None, description="""Defines the function behavior."""
  )
  description: Optional[str] = Field(
      default=None,
      description="""Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.""",
  )
  name: Optional[str] = Field(
      default=None,
      description="""Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.""",
  )
  parameters: Optional[Schema] = Field(
      default=None,
      description="""Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1""",
  )
  parameters_json_schema: Optional[Any] = Field(
      default=None,
      description="""Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`.""",
  )
  response: Optional[Schema] = Field(
      default=None,
      description="""Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function.""",
  )
  response_json_schema: Optional[Any] = Field(
      default=None,
      description="""Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`.""",
  )

  @classmethod
  def from_callable_with_api_option(
      cls,
      *,
      callable: Callable[..., Any],
      api_option: Literal['VERTEX_AI', 'GEMINI_API'] = 'GEMINI_API',
      behavior: Optional[Behavior] = None,
  ) -> 'FunctionDeclaration':
    """Converts a Callable to a FunctionDeclaration based on the API option.

    Supported API option is 'VERTEX_AI' or 'GEMINI_API'. If api_option is unset,
    it will default to 'GEMINI_API'. If unsupported api_option is provided, it
    will raise ValueError.
    """
    supported_api_options = ['VERTEX_AI', 'GEMINI_API']
    if api_option not in supported_api_options:
      raise ValueError(
          f'Unsupported api_option value: {api_option}. Supported api_option'
          f' value is one of: {supported_api_options}.'
      )
    from . import _automatic_function_calling_util

    parameters_properties = {}
    parameters_json_schema = {}
    annotation_under_future = typing.get_type_hints(callable)
    try:
      for name, param in inspect.signature(callable).parameters.items():
        if param.kind in (
            inspect.Parameter.POSITIONAL_OR_KEYWORD,
            inspect.Parameter.KEYWORD_ONLY,
            inspect.Parameter.POSITIONAL_ONLY,
        ):
          param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
              param, annotation_under_future, name
          )
          schema = (
              _automatic_function_calling_util._parse_schema_from_parameter(
                  api_option, param, callable.__name__
              )
          )
          parameters_properties[name] = schema
    except ValueError:
      parameters_properties = {}
      for name, param in inspect.signature(callable).parameters.items():
        if param.kind in (
            inspect.Parameter.POSITIONAL_OR_KEYWORD,
            inspect.Parameter.KEYWORD_ONLY,
            inspect.Parameter.POSITIONAL_ONLY,
        ):
          try:
            param = _automatic_function_calling_util._handle_params_as_deferred_annotations(
                param, annotation_under_future, name
            )
            param_schema_adapter = pydantic.TypeAdapter(
                param.annotation,
                config=pydantic.ConfigDict(arbitrary_types_allowed=True),
            )
            json_schema_dict = param_schema_adapter.json_schema()
            json_schema_dict = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
                json_schema_dict
            )
            if 'prefixItems' in json_schema_dict:
              parameters_json_schema[name] = json_schema_dict
              continue

            union_args = typing.get_args(param.annotation)
            has_primitive = any(
                _automatic_function_calling_util._is_builtin_primitive_or_compound(
                    arg
                )
                for arg in union_args
            )
            if (
                '$ref' in json_schema_dict or '$defs' in json_schema_dict
            ) and has_primitive:
              # This is a complex schema with a primitive (e.g., str | MyModel)
              # that is better represented by raw JSON schema.
              parameters_json_schema[name] = json_schema_dict
              continue

            schema = Schema.from_json_schema(
                json_schema=JSONSchema(**json_schema_dict),
                api_option=api_option,
            )
            if param.default is not inspect.Parameter.empty:
              schema.default = param.default
            parameters_properties[name] = schema
          except Exception as e:
            _automatic_function_calling_util._raise_for_unsupported_param(
                param, callable.__name__, e
            )

    declaration = FunctionDeclaration(
        name=callable.__name__,
        description=inspect.cleandoc(callable.__doc__)
        if callable.__doc__
        else callable.__doc__,
        behavior=behavior,
    )
    if parameters_properties:
      declaration.parameters = Schema(
          type='OBJECT',
          properties=parameters_properties,
      )
      declaration.parameters.required = (
          _automatic_function_calling_util._get_required_fields(
              declaration.parameters
          )
      )
    elif parameters_json_schema:
      declaration.parameters_json_schema = parameters_json_schema
    # TODO: b/421991354 - Remove this check once the bug is fixed.
    if api_option == 'GEMINI_API':
      return declaration

    return_annotation = inspect.signature(callable).return_annotation
    if return_annotation is inspect._empty:
      return declaration

    return_value = inspect.Parameter(
        'return_value',
        inspect.Parameter.POSITIONAL_OR_KEYWORD,
        annotation=return_annotation,
    )

    # This snippet catches the case when type hints are stored as strings
    if isinstance(return_value.annotation, str):
      return_value = return_value.replace(
          annotation=annotation_under_future['return']
      )
    response_schema: Optional[Schema] = None
    response_json_schema: Optional[Union[dict[str, Any], Schema]] = {}
    try:
      response_schema = (
          _automatic_function_calling_util._parse_schema_from_parameter(
              api_option,
              return_value,
              callable.__name__,
          )
      )
      if response_schema.any_of is not None:
        # To handle any_of, we need to use responseJsonSchema
        response_json_schema = response_schema
        response_schema = None
    except ValueError:
      try:
        return_value_schema_adapter = pydantic.TypeAdapter(
            return_value.annotation,
            config=pydantic.ConfigDict(arbitrary_types_allowed=True),
        )
        response_json_schema = return_value_schema_adapter.json_schema()
        response_json_schema = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
            response_json_schema
        )
      except Exception as e:
        _automatic_function_calling_util._raise_for_unsupported_param(
            return_value, callable.__name__, e
        )

    if response_schema:
      declaration.response = response_schema
    elif response_json_schema:
      declaration.response_json_schema = response_json_schema
    return declaration

  @classmethod
  def from_callable(
      cls,
      *,
      client: 'BaseApiClient',
      callable: Callable[..., Any],
      behavior: Optional[Behavior] = None,
  ) -> 'FunctionDeclaration':
    """Converts a Callable to a FunctionDeclaration based on the client.

    Note: For best results prefer
    [Google-style
    docstring](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods)
    when describing arguments. This function does **not** parse argument
    descriptions into the property description slots of the resulting structure.
    Instead it sends the whole docstring in the top-level function description.
    Google-style docstring are closest to what the model is trained on.
    """
    if client.vertexai:
      return cls.from_callable_with_api_option(
          callable=callable, api_option='VERTEX_AI', behavior=behavior
      )
    else:
      return cls.from_callable_with_api_option(
          callable=callable, api_option='GEMINI_API', behavior=behavior
      )


class FunctionDeclarationDict(TypedDict, total=False):
  """Defines a function that the model can generate JSON inputs for.

  The inputs are based on `OpenAPI 3.0 specifications
  <https://spec.openapis.org/oas/v3.0.3>`_.
  """

  behavior: Optional[Behavior]
  """Defines the function behavior."""

  description: Optional[str]
  """Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function."""

  name: Optional[str]
  """Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64."""

  parameters: Optional[SchemaDict]
  """Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1"""

  parameters_json_schema: Optional[Any]
  """Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`."""

  response: Optional[SchemaDict]
  """Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function."""

  response_json_schema: Optional[Any]
  """Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`."""


FunctionDeclarationOrDict = Union[FunctionDeclaration, FunctionDeclarationDict]


class DynamicRetrievalConfig(_common.BaseModel):
  """Describes the options to customize dynamic retrieval."""

  mode: Optional[DynamicRetrievalConfigMode] = Field(
      default=None,
      description="""The mode of the predictor to be used in dynamic retrieval.""",
  )
  dynamic_threshold: Optional[float] = Field(
      default=None,
      description="""Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used.""",
  )


class DynamicRetrievalConfigDict(TypedDict, total=False):
  """Describes the options to customize dynamic retrieval."""

  mode: Optional[DynamicRetrievalConfigMode]
  """The mode of the predictor to be used in dynamic retrieval."""

  dynamic_threshold: Optional[float]
  """Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used."""


DynamicRetrievalConfigOrDict = Union[
    DynamicRetrievalConfig, DynamicRetrievalConfigDict
]


class GoogleSearchRetrieval(_common.BaseModel):
  """Tool to retrieve public web data for grounding, powered by Google."""

  dynamic_retrieval_config: Optional[DynamicRetrievalConfig] = Field(
      default=None,
      description="""Specifies the dynamic retrieval configuration for the given source.""",
  )


class GoogleSearchRetrievalDict(TypedDict, total=False):
  """Tool to retrieve public web data for grounding, powered by Google."""

  dynamic_retrieval_config: Optional[DynamicRetrievalConfigDict]
  """Specifies the dynamic retrieval configuration for the given source."""


GoogleSearchRetrievalOrDict = Union[
    GoogleSearchRetrieval, GoogleSearchRetrievalDict
]


class ComputerUse(_common.BaseModel):
  """Tool to support computer use."""

  environment: Optional[Environment] = Field(
      default=None, description="""Required. The environment being operated."""
  )
  excluded_predefined_functions: Optional[list[str]] = Field(
      default=None,
      description="""By default, predefined functions are included in the final model call.
    Some of them can be explicitly excluded from being automatically included.
    This can serve two purposes:
      1. Using a more restricted / different action space.
      2. Improving the definitions / instructions of predefined functions.""",
  )


class ComputerUseDict(TypedDict, total=False):
  """Tool to support computer use."""

  environment: Optional[Environment]
  """Required. The environment being operated."""

  excluded_predefined_functions: Optional[list[str]]
  """By default, predefined functions are included in the final model call.
    Some of them can be explicitly excluded from being automatically included.
    This can serve two purposes:
      1. Using a more restricted / different action space.
      2. Improving the definitions / instructions of predefined functions."""


ComputerUseOrDict = Union[ComputerUse, ComputerUseDict]


class FileSearch(_common.BaseModel):
  """Tool to retrieve knowledge from the File Search Stores."""

  file_search_store_names: Optional[list[str]] = Field(
      default=None,
      description="""The names of the file_search_stores to retrieve from.
      Example: `fileSearchStores/my-file-search-store-123`""",
  )
  top_k: Optional[int] = Field(
      default=None,
      description="""The number of file search retrieval chunks to retrieve.""",
  )
  metadata_filter: Optional[str] = Field(
      default=None,
      description="""Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression.""",
  )


class FileSearchDict(TypedDict, total=False):
  """Tool to retrieve knowledge from the File Search Stores."""

  file_search_store_names: Optional[list[str]]
  """The names of the file_search_stores to retrieve from.
      Example: `fileSearchStores/my-file-search-store-123`"""

  top_k: Optional[int]
  """The number of file search retrieval chunks to retrieve."""

  metadata_filter: Optional[str]
  """Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression."""


FileSearchOrDict = Union[FileSearch, FileSearchDict]


class ApiAuthApiKeyConfig(_common.BaseModel):
  """The API secret. This data type is not supported in Gemini API."""

  api_key_secret_version: Optional[str] = Field(
      default=None,
      description="""Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}""",
  )
  api_key_string: Optional[str] = Field(
      default=None,
      description="""The API key string. Either this or `api_key_secret_version` must be set.""",
  )


class ApiAuthApiKeyConfigDict(TypedDict, total=False):
  """The API secret. This data type is not supported in Gemini API."""

  api_key_secret_version: Optional[str]
  """Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}"""

  api_key_string: Optional[str]
  """The API key string. Either this or `api_key_secret_version` must be set."""


ApiAuthApiKeyConfigOrDict = Union[ApiAuthApiKeyConfig, ApiAuthApiKeyConfigDict]


class ApiAuth(_common.BaseModel):
  """The generic reusable api auth config.

  Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto)
  instead. This data type is not supported in Gemini API.
  """

  api_key_config: Optional[ApiAuthApiKeyConfig] = Field(
      default=None, description="""The API secret."""
  )


class ApiAuthDict(TypedDict, total=False):
  """The generic reusable api auth config.

  Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto)
  instead. This data type is not supported in Gemini API.
  """

  api_key_config: Optional[ApiAuthApiKeyConfigDict]
  """The API secret."""


ApiAuthOrDict = Union[ApiAuth, ApiAuthDict]


class ApiKeyConfig(_common.BaseModel):
  """Config for authentication with API key.

  This data type is not supported in Gemini API.
  """

  api_key_secret: Optional[str] = Field(
      default=None,
      description="""Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.""",
  )
  api_key_string: Optional[str] = Field(
      default=None,
      description="""Optional. The API key to be used in the request directly.""",
  )
  http_element_location: Optional[HttpElementLocation] = Field(
      default=None, description="""Optional. The location of the API key."""
  )
  name: Optional[str] = Field(
      default=None,
      description="""Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name.""",
  )


class ApiKeyConfigDict(TypedDict, total=False):
  """Config for authentication with API key.

  This data type is not supported in Gemini API.
  """

  api_key_secret: Optional[str]
  """Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource."""

  api_key_string: Optional[str]
  """Optional. The API key to be used in the request directly."""

  http_element_location: Optional[HttpElementLocation]
  """Optional. The location of the API key."""

  name: Optional[str]
  """Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name."""


ApiKeyConfigOrDict = Union[ApiKeyConfig, ApiKeyConfigDict]


class AuthConfigGoogleServiceAccountConfig(_common.BaseModel):
  """Config for Google Service Account Authentication.

  This data type is not supported in Gemini API.
  """

  service_account: Optional[str] = Field(
      default=None,
      description="""Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.""",
  )


class AuthConfigGoogleServiceAccountConfigDict(TypedDict, total=False):
  """Config for Google Service Account Authentication.

  This data type is not supported in Gemini API.
  """

  service_account: Optional[str]
  """Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension."""


AuthConfigGoogleServiceAccountConfigOrDict = Union[
    AuthConfigGoogleServiceAccountConfig,
    AuthConfigGoogleServiceAccountConfigDict,
]


class AuthConfigHttpBasicAuthConfig(_common.BaseModel):
  """Config for HTTP Basic Authentication.

  This data type is not supported in Gemini API.
  """

  credential_secret: Optional[str] = Field(
      default=None,
      description="""Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.""",
  )


class AuthConfigHttpBasicAuthConfigDict(TypedDict, total=False):
  """Config for HTTP Basic Authentication.

  This data type is not supported in Gemini API.
  """

  credential_secret: Optional[str]
  """Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource."""


AuthConfigHttpBasicAuthConfigOrDict = Union[
    AuthConfigHttpBasicAuthConfig, AuthConfigHttpBasicAuthConfigDict
]


class AuthConfigOauthConfig(_common.BaseModel):
  """Config for user oauth. This data type is not supported in Gemini API."""

  access_token: Optional[str] = Field(
      default=None,
      description="""Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.""",
  )
  service_account: Optional[str] = Field(
      default=None,
      description="""The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.""",
  )


class AuthConfigOauthConfigDict(TypedDict, total=False):
  """Config for user oauth. This data type is not supported in Gemini API."""

  access_token: Optional[str]
  """Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time."""

  service_account: Optional[str]
  """The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account."""


AuthConfigOauthConfigOrDict = Union[
    AuthConfigOauthConfig, AuthConfigOauthConfigDict
]


class AuthConfigOidcConfig(_common.BaseModel):
  """Config for user OIDC auth.

  This data type is not supported in Gemini API.
  """

  id_token: Optional[str] = Field(
      default=None,
      description="""OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.""",
  )
  service_account: Optional[str] = Field(
      default=None,
      description="""The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).""",
  )


class AuthConfigOidcConfigDict(TypedDict, total=False):
  """Config for user OIDC auth.

  This data type is not supported in Gemini API.
  """

  id_token: Optional[str]
  """OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time."""

  service_account: Optional[str]
  """The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents)."""


AuthConfigOidcConfigOrDict = Union[
    AuthConfigOidcConfig, AuthConfigOidcConfigDict
]


class AuthConfig(_common.BaseModel):
  """Auth configuration to run the extension.

  This data type is not supported in Gemini API.
  """

  api_key_config: Optional[ApiKeyConfig] = Field(
      default=None, description="""Config for API key auth."""
  )
  auth_type: Optional[AuthType] = Field(
      default=None, description="""Type of auth scheme."""
  )
  google_service_account_config: Optional[
      AuthConfigGoogleServiceAccountConfig
  ] = Field(
      default=None, description="""Config for Google Service Account auth."""
  )
  http_basic_auth_config: Optional[AuthConfigHttpBasicAuthConfig] = Field(
      default=None, description="""Config for HTTP Basic auth."""
  )
  oauth_config: Optional[AuthConfigOauthConfig] = Field(
      default=None, description="""Config for user oauth."""
  )
  oidc_config: Optional[AuthConfigOidcConfig] = Field(
      default=None, description="""Config for user OIDC auth."""
  )


class AuthConfigDict(TypedDict, total=False):
  """Auth configuration to run the extension.

  This data type is not supported in Gemini API.
  """

  api_key_config: Optional[ApiKeyConfigDict]
  """Config for API key auth."""

  auth_type: Optional[AuthType]
  """Type of auth scheme."""

  google_service_account_config: Optional[
      AuthConfigGoogleServiceAccountConfigDict
  ]
  """Config for Google Service Account auth."""

  http_basic_auth_config: Optional[AuthConfigHttpBasicAuthConfigDict]
  """Config for HTTP Basic auth."""

  oauth_config: Optional[AuthConfigOauthConfigDict]
  """Config for user oauth."""

  oidc_config: Optional[AuthConfigOidcConfigDict]
  """Config for user OIDC auth."""


AuthConfigOrDict = Union[AuthConfig, AuthConfigDict]


class ExternalApiElasticSearchParams(_common.BaseModel):
  """The search parameters to use for the ELASTIC_SEARCH spec.

  This data type is not supported in Gemini API.
  """

  index: Optional[str] = Field(
      default=None, description="""The ElasticSearch index to use."""
  )
  num_hits: Optional[int] = Field(
      default=None,
      description="""Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.""",
  )
  search_template: Optional[str] = Field(
      default=None, description="""The ElasticSearch search template to use."""
  )


class ExternalApiElasticSearchParamsDict(TypedDict, total=False):
  """The search parameters to use for the ELASTIC_SEARCH spec.

  This data type is not supported in Gemini API.
  """

  index: Optional[str]
  """The ElasticSearch index to use."""

  num_hits: Optional[int]
  """Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param."""

  search_template: Optional[str]
  """The ElasticSearch search template to use."""


ExternalApiElasticSearchParamsOrDict = Union[
    ExternalApiElasticSearchParams, ExternalApiElasticSearchParamsDict
]


class ExternalApiSimpleSearchParams(_common.BaseModel):
  """The search parameters to use for SIMPLE_SEARCH spec.

  This data type is not supported in Gemini API.
  """

  pass


class ExternalApiSimpleSearchParamsDict(TypedDict, total=False):
  """The search parameters to use for SIMPLE_SEARCH spec.

  This data type is not supported in Gemini API.
  """

  pass


ExternalApiSimpleSearchParamsOrDict = Union[
    ExternalApiSimpleSearchParams, ExternalApiSimpleSearchParamsDict
]


class ExternalApi(_common.BaseModel):
  """Retrieve from data source powered by external API for grounding.

  The external API is not owned by Google, but need to follow the pre-defined
  API spec. This data type is not supported in Gemini API.
  """

  api_auth: Optional[ApiAuth] = Field(
      default=None,
      description="""The authentication config to access the API. Deprecated. Please use auth_config instead.""",
  )
  api_spec: Optional[ApiSpec] = Field(
      default=None,
      description="""The API spec that the external API implements.""",
  )
  auth_config: Optional[AuthConfig] = Field(
      default=None,
      description="""The authentication config to access the API.""",
  )
  elastic_search_params: Optional[ExternalApiElasticSearchParams] = Field(
      default=None, description="""Parameters for the elastic search API."""
  )
  endpoint: Optional[str] = Field(
      default=None,
      description="""The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search""",
  )
  simple_search_params: Optional[ExternalApiSimpleSearchParams] = Field(
      default=None, description="""Parameters for the simple search API."""
  )


class ExternalApiDict(TypedDict, total=False):
  """Retrieve from data source powered by external API for grounding.

  The external API is not owned by Google, but need to follow the pre-defined
  API spec. This data type is not supported in Gemini API.
  """

  api_auth: Optional[ApiAuthDict]
  """The authentication config to access the API. Deprecated. Please use auth_config instead."""

  api_spec: Optional[ApiSpec]
  """The API spec that the external API implements."""

  auth_config: Optional[AuthConfigDict]
  """The authentication config to access the API."""

  elastic_search_params: Optional[ExternalApiElasticSearchParamsDict]
  """Parameters for the elastic search API."""

  endpoint: Optional[str]
  """The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search"""

  simple_search_params: Optional[ExternalApiSimpleSearchParamsDict]
  """Parameters for the simple search API."""


ExternalApiOrDict = Union[ExternalApi, ExternalApiDict]


class VertexAISearchDataStoreSpec(_common.BaseModel):
  """Define data stores within engine to filter on in a search call and configurations for those data stores.

  For more information, see
  https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec.
  This data type is not supported in Gemini API.
  """

  data_store: Optional[str] = Field(
      default=None,
      description="""Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""",
  )
  filter: Optional[str] = Field(
      default=None,
      description="""Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)""",
  )


class VertexAISearchDataStoreSpecDict(TypedDict, total=False):
  """Define data stores within engine to filter on in a search call and configurations for those data stores.

  For more information, see
  https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec.
  This data type is not supported in Gemini API.
  """

  data_store: Optional[str]
  """Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`"""

  filter: Optional[str]
  """Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)"""


VertexAISearchDataStoreSpecOrDict = Union[
    VertexAISearchDataStoreSpec, VertexAISearchDataStoreSpecDict
]


class VertexAISearch(_common.BaseModel):
  """Retrieve from Vertex AI Search datastore or engine for grounding.

  datastore and engine are mutually exclusive. See
  https://cloud.google.com/products/agent-builder. This data type is not
  supported in Gemini API.
  """

  data_store_specs: Optional[list[VertexAISearchDataStoreSpec]] = Field(
      default=None,
      description="""Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.""",
  )
  datastore: Optional[str] = Field(
      default=None,
      description="""Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""",
  )
  engine: Optional[str] = Field(
      default=None,
      description="""Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`""",
  )
  filter: Optional[str] = Field(
      default=None,
      description="""Optional. Filter strings to be passed to the search API.""",
  )
  max_results: Optional[int] = Field(
      default=None,
      description="""Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10.""",
  )


class VertexAISearchDict(TypedDict, total=False):
  """Retrieve from Vertex AI Search datastore or engine for grounding.

  datastore and engine are mutually exclusive. See
  https://cloud.google.com/products/agent-builder. This data type is not
  supported in Gemini API.
  """

  data_store_specs: Optional[list[VertexAISearchDataStoreSpecDict]]
  """Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used."""

  datastore: Optional[str]
  """Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`"""

  engine: Optional[str]
  """Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`"""

  filter: Optional[str]
  """Optional. Filter strings to be passed to the search API."""

  max_results: Optional[int]
  """Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10."""


VertexAISearchOrDict = Union[VertexAISearch, VertexAISearchDict]


class VertexRagStoreRagResource(_common.BaseModel):
  """The definition of the Rag resource.

  This data type is not supported in Gemini API.
  """

  rag_corpus: Optional[str] = Field(
      default=None,
      description="""Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`""",
  )
  rag_file_ids: Optional[list[str]] = Field(
      default=None,
      description="""Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.""",
  )


class VertexRagStoreRagResourceDict(TypedDict, total=False):
  """The definition of the Rag resource.

  This data type is not supported in Gemini API.
  """

  rag_corpus: Optional[str]
  """Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`"""

  rag_file_ids: Optional[list[str]]
  """Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field."""


VertexRagStoreRagResourceOrDict = Union[
    VertexRagStoreRagResource, VertexRagStoreRagResourceDict
]


class RagRetrievalConfigFilter(_common.BaseModel):
  """Config for filters. This data type is not supported in Gemini API."""

  metadata_filter: Optional[str] = Field(
      default=None, description="""Optional. String for metadata filtering."""
  )
  vector_distance_threshold: Optional[float] = Field(
      default=None,
      description="""Optional. Only returns contexts with vector distance smaller than the threshold.""",
  )
  vector_similarity_threshold: Optional[float] = Field(
      default=None,
      description="""Optional. Only returns contexts with vector similarity larger than the threshold.""",
  )


class RagRetrievalConfigFilterDict(TypedDict, total=False):
  """Config for filters. This data type is not supported in Gemini API."""

  metadata_filter: Optional[str]
  """Optional. String for metadata filtering."""

  vector_distance_threshold: Optional[float]
  """Optional. Only returns contexts with vector distance smaller than the threshold."""

  vector_similarity_threshold: Optional[float]
  """Optional. Only returns contexts with vector similarity larger than the threshold."""


RagRetrievalConfigFilterOrDict = Union[
    RagRetrievalConfigFilter, RagRetrievalConfigFilterDict
]


class RagRetrievalConfigHybridSearch(_common.BaseModel):
  """Config for Hybrid Search. This data type is not supported in Gemini API."""

  alpha: Optional[float] = Field(
      default=None,
      description="""Optional. Alpha value controls the weight between dense and sparse vector search results. The range is [0, 1], while 0 means sparse vector search only and 1 means dense vector search only. The default value is 0.5 which balances sparse and dense vector search equally.""",
  )


class RagRetrievalConfigHybridSearchDict(TypedDict, total=False):
  """Config for Hybrid Search. This data type is not supported in Gemini API."""

  alpha: Optional[float]
  """Optional. Alpha value controls the weight between dense and sparse vector search results. The range is [0, 1], while 0 means sparse vector search only and 1 means dense vector search only. The default value is 0.5 which balances sparse and dense vector search equally."""


RagRetrievalConfigHybridSearchOrDict = Union[
    RagRetrievalConfigHybridSearch, RagRetrievalConfigHybridSearchDict
]


class RagRetrievalConfigRankingLlmRanker(_common.BaseModel):
  """Config for LlmRanker. This data type is not supported in Gemini API."""

  model_name: Optional[str] = Field(
      default=None,
      description="""Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).""",
  )


class RagRetrievalConfigRankingLlmRankerDict(TypedDict, total=False):
  """Config for LlmRanker. This data type is not supported in Gemini API."""

  model_name: Optional[str]
  """Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models)."""


RagRetrievalConfigRankingLlmRankerOrDict = Union[
    RagRetrievalConfigRankingLlmRanker, RagRetrievalConfigRankingLlmRankerDict
]


class RagRetrievalConfigRankingRankService(_common.BaseModel):
  """Config for Rank Service. This data type is not supported in Gemini API."""

  model_name: Optional[str] = Field(
      default=None,
      description="""Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`""",
  )


class RagRetrievalConfigRankingRankServiceDict(TypedDict, total=False):
  """Config for Rank Service. This data type is not supported in Gemini API."""

  model_name: Optional[str]
  """Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`"""


RagRetrievalConfigRankingRankServiceOrDict = Union[
    RagRetrievalConfigRankingRankService,
    RagRetrievalConfigRankingRankServiceDict,
]


class RagRetrievalConfigRanking(_common.BaseModel):
  """Config for ranking and reranking.

  This data type is not supported in Gemini API.
  """

  llm_ranker: Optional[RagRetrievalConfigRankingLlmRanker] = Field(
      default=None, description="""Optional. Config for LlmRanker."""
  )
  rank_service: Optional[RagRetrievalConfigRankingRankService] = Field(
      default=None, description="""Optional. Config for Rank Service."""
  )


class RagRetrievalConfigRankingDict(TypedDict, total=False):
  """Config for ranking and reranking.

  This data type is not supported in Gemini API.
  """

  llm_ranker: Optional[RagRetrievalConfigRankingLlmRankerDict]
  """Optional. Config for LlmRanker."""

  rank_service: Optional[RagRetrievalConfigRankingRankServiceDict]
  """Optional. Config for Rank Service."""


RagRetrievalConfigRankingOrDict = Union[
    RagRetrievalConfigRanking, RagRetrievalConfigRankingDict
]


class RagRetrievalConfig(_common.BaseModel):
  """Specifies the context retrieval config.

  This data type is not supported in Gemini API.
  """

  filter: Optional[RagRetrievalConfigFilter] = Field(
      default=None, description="""Optional. Config for filters."""
  )
  hybrid_search: Optional[RagRetrievalConfigHybridSearch] = Field(
      default=None, description="""Optional. Config for Hybrid Search."""
  )
  ranking: Optional[RagRetrievalConfigRanking] = Field(
      default=None,
      description="""Optional. Config for ranking and reranking.""",
  )
  top_k: Optional[int] = Field(
      default=None,
      description="""Optional. The number of contexts to retrieve.""",
  )


class RagRetrievalConfigDict(TypedDict, total=False):
  """Specifies the context retrieval config.

  This data type is not supported in Gemini API.
  """

  filter: Optional[RagRetrievalConfigFilterDict]
  """Optional. Config for filters."""

  hybrid_search: Optional[RagRetrievalConfigHybridSearchDict]
  """Optional. Config for Hybrid Search."""

  ranking: Optional[RagRetrievalConfigRankingDict]
  """Optional. Config for ranking and reranking."""

  top_k: Optional[int]
  """Optional. The number of contexts to retrieve."""


RagRetrievalConfigOrDict = Union[RagRetrievalConfig, RagRetrievalConfigDict]


class VertexRagStore(_common.BaseModel):
  """Retrieve from Vertex RAG Store for grounding.

  This data type is not supported in Gemini API.
  """

  rag_corpora: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Deprecated. Please use rag_resources instead.""",
  )
  rag_resources: Optional[list[VertexRagStoreRagResource]] = Field(
      default=None,
      description="""Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.""",
  )
  rag_retrieval_config: Optional[RagRetrievalConfig] = Field(
      default=None,
      description="""Optional. The retrieval config for the Rag query.""",
  )
  similarity_top_k: Optional[int] = Field(
      default=None,
      description="""Optional. Number of top k results to return from the selected corpora.""",
  )
  store_context: Optional[bool] = Field(
      default=None,
      description="""Optional. Currently only supported for Gemini Multimodal Live API. In Gemini Multimodal Live API, if `store_context` bool is specified, Gemini will leverage it to automatically memorize the interactions between the client and Gemini, and retrieve context when needed to augment the response generation for users' ongoing and future interactions.""",
  )
  vector_distance_threshold: Optional[float] = Field(
      default=None,
      description="""Optional. Only return results with vector distance smaller than the threshold.""",
  )


class VertexRagStoreDict(TypedDict, total=False):
  """Retrieve from Vertex RAG Store for grounding.

  This data type is not supported in Gemini API.
  """

  rag_corpora: Optional[list[str]]
  """Optional. Deprecated. Please use rag_resources instead."""

  rag_resources: Optional[list[VertexRagStoreRagResourceDict]]
  """Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support."""

  rag_retrieval_config: Optional[RagRetrievalConfigDict]
  """Optional. The retrieval config for the Rag query."""

  similarity_top_k: Optional[int]
  """Optional. Number of top k results to return from the selected corpora."""

  store_context: Optional[bool]
  """Optional. Currently only supported for Gemini Multimodal Live API. In Gemini Multimodal Live API, if `store_context` bool is specified, Gemini will leverage it to automatically memorize the interactions between the client and Gemini, and retrieve context when needed to augment the response generation for users' ongoing and future interactions."""

  vector_distance_threshold: Optional[float]
  """Optional. Only return results with vector distance smaller than the threshold."""


VertexRagStoreOrDict = Union[VertexRagStore, VertexRagStoreDict]


class Retrieval(_common.BaseModel):
  """Defines a retrieval tool that model can call to access external knowledge.

  This data type is not supported in Gemini API.
  """

  disable_attribution: Optional[bool] = Field(
      default=None,
      description="""Optional. Deprecated. This option is no longer supported.""",
  )
  external_api: Optional[ExternalApi] = Field(
      default=None,
      description="""Use data source powered by external API for grounding.""",
  )
  vertex_ai_search: Optional[VertexAISearch] = Field(
      default=None,
      description="""Set to use data source powered by Vertex AI Search.""",
  )
  vertex_rag_store: Optional[VertexRagStore] = Field(
      default=None,
      description="""Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService.""",
  )


class RetrievalDict(TypedDict, total=False):
  """Defines a retrieval tool that model can call to access external knowledge.

  This data type is not supported in Gemini API.
  """

  disable_attribution: Optional[bool]
  """Optional. Deprecated. This option is no longer supported."""

  external_api: Optional[ExternalApiDict]
  """Use data source powered by external API for grounding."""

  vertex_ai_search: Optional[VertexAISearchDict]
  """Set to use data source powered by Vertex AI Search."""

  vertex_rag_store: Optional[VertexRagStoreDict]
  """Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService."""


RetrievalOrDict = Union[Retrieval, RetrievalDict]


class ToolCodeExecution(_common.BaseModel):
  """Tool that executes code generated by the model, and automatically returns the result to the model.

  See also [ExecutableCode]and [CodeExecutionResult] which are input and output
  to this tool. This data type is not supported in Gemini API.
  """

  pass


class ToolCodeExecutionDict(TypedDict, total=False):
  """Tool that executes code generated by the model, and automatically returns the result to the model.

  See also [ExecutableCode]and [CodeExecutionResult] which are input and output
  to this tool. This data type is not supported in Gemini API.
  """

  pass


ToolCodeExecutionOrDict = Union[ToolCodeExecution, ToolCodeExecutionDict]


class EnterpriseWebSearch(_common.BaseModel):
  """Tool to search public web data, powered by Vertex AI Search and Sec4 compliance.

  This data type is not supported in Gemini API.
  """

  exclude_domains: Optional[list[str]] = Field(
      default=None,
      description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""",
  )
  blocking_confidence: Optional[PhishBlockThreshold] = Field(
      default=None,
      description="""Optional. Sites with confidence level chosen & above this value will be blocked from the search results.""",
  )


class EnterpriseWebSearchDict(TypedDict, total=False):
  """Tool to search public web data, powered by Vertex AI Search and Sec4 compliance.

  This data type is not supported in Gemini API.
  """

  exclude_domains: Optional[list[str]]
  """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains."""

  blocking_confidence: Optional[PhishBlockThreshold]
  """Optional. Sites with confidence level chosen & above this value will be blocked from the search results."""


EnterpriseWebSearchOrDict = Union[EnterpriseWebSearch, EnterpriseWebSearchDict]


class GoogleMaps(_common.BaseModel):
  """Tool to retrieve public maps data for grounding, powered by Google."""

  auth_config: Optional[AuthConfig] = Field(
      default=None,
      description="""The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""",
  )
  enable_widget: Optional[bool] = Field(
      default=None,
      description="""Optional. If true, include the widget context token in the response.""",
  )


class GoogleMapsDict(TypedDict, total=False):
  """Tool to retrieve public maps data for grounding, powered by Google."""

  auth_config: Optional[AuthConfigDict]
  """The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API."""

  enable_widget: Optional[bool]
  """Optional. If true, include the widget context token in the response."""


GoogleMapsOrDict = Union[GoogleMaps, GoogleMapsDict]


class Interval(_common.BaseModel):
  """Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).

  The start must be less than or equal to the end. When the start equals the
  end, the interval is empty (matches no time). When both start and end are
  unspecified, the interval matches any time.
  """

  end_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.""",
  )
  start_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.""",
  )


class IntervalDict(TypedDict, total=False):
  """Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).

  The start must be less than or equal to the end. When the start equals the
  end, the interval is empty (matches no time). When both start and end are
  unspecified, the interval matches any time.
  """

  end_time: Optional[datetime.datetime]
  """Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end."""

  start_time: Optional[datetime.datetime]
  """Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start."""


IntervalOrDict = Union[Interval, IntervalDict]


class GoogleSearch(_common.BaseModel):
  """GoogleSearch tool type.

  Tool to support Google Search in Model. Powered by Google.
  """

  exclude_domains: Optional[list[str]] = Field(
      default=None,
      description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API.""",
  )
  blocking_confidence: Optional[PhishBlockThreshold] = Field(
      default=None,
      description="""Optional. Sites with confidence level chosen & above this value will be blocked from the search results. This field is not supported in Gemini API.""",
  )
  time_range_filter: Optional[Interval] = Field(
      default=None,
      description="""Optional. Filter search results to a specific time range. If customers set a start time, they must set an end time (and vice versa). This field is not supported in Vertex AI.""",
  )


class GoogleSearchDict(TypedDict, total=False):
  """GoogleSearch tool type.

  Tool to support Google Search in Model. Powered by Google.
  """

  exclude_domains: Optional[list[str]]
  """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API."""

  blocking_confidence: Optional[PhishBlockThreshold]
  """Optional. Sites with confidence level chosen & above this value will be blocked from the search results. This field is not supported in Gemini API."""

  time_range_filter: Optional[IntervalDict]
  """Optional. Filter search results to a specific time range. If customers set a start time, they must set an end time (and vice versa). This field is not supported in Vertex AI."""


GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]


class UrlContext(_common.BaseModel):
  """Tool to support URL context."""

  pass


class UrlContextDict(TypedDict, total=False):
  """Tool to support URL context."""

  pass


UrlContextOrDict = Union[UrlContext, UrlContextDict]


class Tool(_common.BaseModel):
  """Tool details of a tool that the model may use to generate a response."""

  function_declarations: Optional[list[FunctionDeclaration]] = Field(
      default=None,
      description="""List of function declarations that the tool supports.""",
  )
  retrieval: Optional[Retrieval] = Field(
      default=None,
      description="""Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. This field is not supported in Gemini API.""",
  )
  google_search_retrieval: Optional[GoogleSearchRetrieval] = Field(
      default=None,
      description="""Optional. Specialized retrieval tool that is powered by Google Search.""",
  )
  computer_use: Optional[ComputerUse] = Field(
      default=None,
      description="""Optional. Tool to support the model interacting directly with the
      computer. If enabled, it automatically populates computer-use specific
      Function Declarations.""",
  )
  file_search: Optional[FileSearch] = Field(
      default=None,
      description="""Optional. Tool to retrieve knowledge from the File Search Stores.""",
  )
  code_execution: Optional[ToolCodeExecution] = Field(
      default=None,
      description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""",
  )
  enterprise_web_search: Optional[EnterpriseWebSearch] = Field(
      default=None,
      description="""Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance. This field is not supported in Gemini API.""",
  )
  google_maps: Optional[GoogleMaps] = Field(
      default=None,
      description="""Optional. GoogleMaps tool type. Tool to support Google Maps in Model.""",
  )
  google_search: Optional[GoogleSearch] = Field(
      default=None,
      description="""Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.""",
  )
  url_context: Optional[UrlContext] = Field(
      default=None,
      description="""Optional. Tool to support URL context retrieval.""",
  )


class ToolDict(TypedDict, total=False):
  """Tool details of a tool that the model may use to generate a response."""

  function_declarations: Optional[list[FunctionDeclarationDict]]
  """List of function declarations that the tool supports."""

  retrieval: Optional[RetrievalDict]
  """Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. This field is not supported in Gemini API."""

  google_search_retrieval: Optional[GoogleSearchRetrievalDict]
  """Optional. Specialized retrieval tool that is powered by Google Search."""

  computer_use: Optional[ComputerUseDict]
  """Optional. Tool to support the model interacting directly with the
      computer. If enabled, it automatically populates computer-use specific
      Function Declarations."""

  file_search: Optional[FileSearchDict]
  """Optional. Tool to retrieve knowledge from the File Search Stores."""

  code_execution: Optional[ToolCodeExecutionDict]
  """Optional. CodeExecution tool type. Enables the model to execute code as part of generation."""

  enterprise_web_search: Optional[EnterpriseWebSearchDict]
  """Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance. This field is not supported in Gemini API."""

  google_maps: Optional[GoogleMapsDict]
  """Optional. GoogleMaps tool type. Tool to support Google Maps in Model."""

  google_search: Optional[GoogleSearchDict]
  """Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google."""

  url_context: Optional[UrlContextDict]
  """Optional. Tool to support URL context retrieval."""


ToolOrDict = Union[Tool, ToolDict]
if _is_mcp_imported:
  ToolUnion = Union[Tool, Callable[..., Any], mcp_types.Tool, McpClientSession]
  ToolUnionDict = Union[
      ToolDict, Callable[..., Any], mcp_types.Tool, McpClientSession
  ]
else:
  ToolUnion = Union[Tool, Callable[..., Any]]  # type: ignore[misc]
  ToolUnionDict = Union[ToolDict, Callable[..., Any]]  # type: ignore[misc]

ToolListUnion = list[ToolUnion]
ToolListUnionDict = list[ToolUnionDict]

SchemaUnion = Union[
    dict[Any, Any], type, Schema, builtin_types.GenericAlias, VersionedUnionType  # type: ignore[valid-type]
]
SchemaUnionDict = Union[SchemaUnion, SchemaDict]


class FunctionCallingConfig(_common.BaseModel):
  """Function calling config."""

  mode: Optional[FunctionCallingConfigMode] = Field(
      default=None, description="""Optional. Function calling mode."""
  )
  allowed_function_names: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.""",
  )
  stream_function_call_arguments: Optional[bool] = Field(
      default=None,
      description="""Optional. When set to true, arguments of a single function call will be streamed out in multiple parts/contents/responses. Partial parameter results will be returned in the [FunctionCall.partial_args] field. This field is not supported in Gemini API.""",
  )


class FunctionCallingConfigDict(TypedDict, total=False):
  """Function calling config."""

  mode: Optional[FunctionCallingConfigMode]
  """Optional. Function calling mode."""

  allowed_function_names: Optional[list[str]]
  """Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided."""

  stream_function_call_arguments: Optional[bool]
  """Optional. When set to true, arguments of a single function call will be streamed out in multiple parts/contents/responses. Partial parameter results will be returned in the [FunctionCall.partial_args] field. This field is not supported in Gemini API."""


FunctionCallingConfigOrDict = Union[
    FunctionCallingConfig, FunctionCallingConfigDict
]


class LatLng(_common.BaseModel):
  """An object that represents a latitude/longitude pair.

  This is expressed as a pair of doubles to represent degrees latitude and
  degrees longitude. Unless specified otherwise, this object must conform to the
  <a href="https://en.wikipedia.org/wiki/World_Geodetic_System#1984_version">
  WGS84 standard</a>. Values must be within normalized ranges.
  """

  latitude: Optional[float] = Field(
      default=None,
      description="""The latitude in degrees. It must be in the range [-90.0, +90.0].""",
  )
  longitude: Optional[float] = Field(
      default=None,
      description="""The longitude in degrees. It must be in the range [-180.0, +180.0]""",
  )


class LatLngDict(TypedDict, total=False):
  """An object that represents a latitude/longitude pair.

  This is expressed as a pair of doubles to represent degrees latitude and
  degrees longitude. Unless specified otherwise, this object must conform to the
  <a href="https://en.wikipedia.org/wiki/World_Geodetic_System#1984_version">
  WGS84 standard</a>. Values must be within normalized ranges.
  """

  latitude: Optional[float]
  """The latitude in degrees. It must be in the range [-90.0, +90.0]."""

  longitude: Optional[float]
  """The longitude in degrees. It must be in the range [-180.0, +180.0]"""


LatLngOrDict = Union[LatLng, LatLngDict]


class RetrievalConfig(_common.BaseModel):
  """Retrieval config."""

  lat_lng: Optional[LatLng] = Field(
      default=None, description="""Optional. The location of the user."""
  )
  language_code: Optional[str] = Field(
      default=None, description="""The language code of the user."""
  )


class RetrievalConfigDict(TypedDict, total=False):
  """Retrieval config."""

  lat_lng: Optional[LatLngDict]
  """Optional. The location of the user."""

  language_code: Optional[str]
  """The language code of the user."""


RetrievalConfigOrDict = Union[RetrievalConfig, RetrievalConfigDict]


class ToolConfig(_common.BaseModel):
  """Tool config.

  This config is shared for all tools provided in the request.
  """

  function_calling_config: Optional[FunctionCallingConfig] = Field(
      default=None, description="""Optional. Function calling config."""
  )
  retrieval_config: Optional[RetrievalConfig] = Field(
      default=None, description="""Optional. Retrieval config."""
  )


class ToolConfigDict(TypedDict, total=False):
  """Tool config.

  This config is shared for all tools provided in the request.
  """

  function_calling_config: Optional[FunctionCallingConfigDict]
  """Optional. Function calling config."""

  retrieval_config: Optional[RetrievalConfigDict]
  """Optional. Retrieval config."""


ToolConfigOrDict = Union[ToolConfig, ToolConfigDict]


class ReplicatedVoiceConfig(_common.BaseModel):
  """ReplicatedVoiceConfig is used to configure replicated voice."""

  mime_type: Optional[str] = Field(
      default=None,
      description="""The mime type of the replicated voice.
      """,
  )
  voice_sample_audio: Optional[bytes] = Field(
      default=None,
      description="""The sample audio of the replicated voice.
      """,
  )


class ReplicatedVoiceConfigDict(TypedDict, total=False):
  """ReplicatedVoiceConfig is used to configure replicated voice."""

  mime_type: Optional[str]
  """The mime type of the replicated voice.
      """

  voice_sample_audio: Optional[bytes]
  """The sample audio of the replicated voice.
      """


ReplicatedVoiceConfigOrDict = Union[
    ReplicatedVoiceConfig, ReplicatedVoiceConfigDict
]


class PrebuiltVoiceConfig(_common.BaseModel):
  """The configuration for the prebuilt speaker to use."""

  voice_name: Optional[str] = Field(
      default=None, description="""The name of the preset voice to use."""
  )


class PrebuiltVoiceConfigDict(TypedDict, total=False):
  """The configuration for the prebuilt speaker to use."""

  voice_name: Optional[str]
  """The name of the preset voice to use."""


PrebuiltVoiceConfigOrDict = Union[PrebuiltVoiceConfig, PrebuiltVoiceConfigDict]


class VoiceConfig(_common.BaseModel):

  replicated_voice_config: Optional[ReplicatedVoiceConfig] = Field(
      default=None,
      description="""If true, the model will use a replicated voice for the response.""",
  )
  prebuilt_voice_config: Optional[PrebuiltVoiceConfig] = Field(
      default=None,
      description="""The configuration for the prebuilt voice to use.""",
  )


class VoiceConfigDict(TypedDict, total=False):

  replicated_voice_config: Optional[ReplicatedVoiceConfigDict]
  """If true, the model will use a replicated voice for the response."""

  prebuilt_voice_config: Optional[PrebuiltVoiceConfigDict]
  """The configuration for the prebuilt voice to use."""


VoiceConfigOrDict = Union[VoiceConfig, VoiceConfigDict]


class SpeakerVoiceConfig(_common.BaseModel):
  """Configuration for a single speaker in a multi speaker setup."""

  speaker: Optional[str] = Field(
      default=None,
      description="""Required. The name of the speaker. This should be the same as the speaker name used in the prompt.""",
  )
  voice_config: Optional[VoiceConfig] = Field(
      default=None,
      description="""Required. The configuration for the voice of this speaker.""",
  )


class SpeakerVoiceConfigDict(TypedDict, total=False):
  """Configuration for a single speaker in a multi speaker setup."""

  speaker: Optional[str]
  """Required. The name of the speaker. This should be the same as the speaker name used in the prompt."""

  voice_config: Optional[VoiceConfigDict]
  """Required. The configuration for the voice of this speaker."""


SpeakerVoiceConfigOrDict = Union[SpeakerVoiceConfig, SpeakerVoiceConfigDict]


class MultiSpeakerVoiceConfig(_common.BaseModel):
  """The configuration for the multi-speaker setup.

  This data type is not supported in Vertex AI.
  """

  speaker_voice_configs: Optional[list[SpeakerVoiceConfig]] = Field(
      default=None, description="""Required. All the enabled speaker voices."""
  )


class MultiSpeakerVoiceConfigDict(TypedDict, total=False):
  """The configuration for the multi-speaker setup.

  This data type is not supported in Vertex AI.
  """

  speaker_voice_configs: Optional[list[SpeakerVoiceConfigDict]]
  """Required. All the enabled speaker voices."""


MultiSpeakerVoiceConfigOrDict = Union[
    MultiSpeakerVoiceConfig, MultiSpeakerVoiceConfigDict
]


class SpeechConfig(_common.BaseModel):

  voice_config: Optional[VoiceConfig] = Field(
      default=None,
      description="""Configuration for the voice of the response.""",
  )
  language_code: Optional[str] = Field(
      default=None,
      description="""Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.""",
  )
  multi_speaker_voice_config: Optional[MultiSpeakerVoiceConfig] = Field(
      default=None,
      description="""Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI.""",
  )


class SpeechConfigDict(TypedDict, total=False):

  voice_config: Optional[VoiceConfigDict]
  """Configuration for the voice of the response."""

  language_code: Optional[str]
  """Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization."""

  multi_speaker_voice_config: Optional[MultiSpeakerVoiceConfigDict]
  """Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI."""


SpeechConfigOrDict = Union[SpeechConfig, SpeechConfigDict]


class AutomaticFunctionCallingConfig(_common.BaseModel):
  """The configuration for automatic function calling."""

  disable: Optional[bool] = Field(
      default=None,
      description="""Whether to disable automatic function calling.
      If not set or set to False, will enable automatic function calling.
      If set to True, will disable automatic function calling.
      """,
  )
  maximum_remote_calls: Optional[int] = Field(
      default=10,
      description="""If automatic function calling is enabled,
      maximum number of remote calls for automatic function calling.
      This number should be a positive integer.
      If not set, SDK will set maximum number of remote calls to 10.
      """,
  )
  ignore_call_history: Optional[bool] = Field(
      default=None,
      description="""If automatic function calling is enabled,
      whether to ignore call history to the response.
      If not set, SDK will set ignore_call_history to false,
      and will append the call history to
      GenerateContentResponse.automatic_function_calling_history.
      """,
  )


class AutomaticFunctionCallingConfigDict(TypedDict, total=False):
  """The configuration for automatic function calling."""

  disable: Optional[bool]
  """Whether to disable automatic function calling.
      If not set or set to False, will enable automatic function calling.
      If set to True, will disable automatic function calling.
      """

  maximum_remote_calls: Optional[int]
  """If automatic function calling is enabled,
      maximum number of remote calls for automatic function calling.
      This number should be a positive integer.
      If not set, SDK will set maximum number of remote calls to 10.
      """

  ignore_call_history: Optional[bool]
  """If automatic function calling is enabled,
      whether to ignore call history to the response.
      If not set, SDK will set ignore_call_history to false,
      and will append the call history to
      GenerateContentResponse.automatic_function_calling_history.
      """


AutomaticFunctionCallingConfigOrDict = Union[
    AutomaticFunctionCallingConfig, AutomaticFunctionCallingConfigDict
]


class ThinkingConfig(_common.BaseModel):
  """The thinking features configuration."""

  include_thoughts: Optional[bool] = Field(
      default=None,
      description="""Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.
      """,
  )
  thinking_budget: Optional[int] = Field(
      default=None,
      description="""Indicates the thinking budget in tokens. 0 is DISABLED. -1 is AUTOMATIC. The default values and allowed ranges are model dependent.
      """,
  )
  thinking_level: Optional[ThinkingLevel] = Field(
      default=None,
      description="""Optional. The level of thoughts tokens that the model should generate.""",
  )


class ThinkingConfigDict(TypedDict, total=False):
  """The thinking features configuration."""

  include_thoughts: Optional[bool]
  """Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.
      """

  thinking_budget: Optional[int]
  """Indicates the thinking budget in tokens. 0 is DISABLED. -1 is AUTOMATIC. The default values and allowed ranges are model dependent.
      """

  thinking_level: Optional[ThinkingLevel]
  """Optional. The level of thoughts tokens that the model should generate."""


ThinkingConfigOrDict = Union[ThinkingConfig, ThinkingConfigDict]


class ImageConfig(_common.BaseModel):
  """The image generation configuration to be used in GenerateContentConfig."""

  aspect_ratio: Optional[str] = Field(
      default=None,
      description="""Aspect ratio of the generated images. Supported values are
      "1:1", "2:3", "3:2", "3:4", "4:3", "9:16", "16:9", and "21:9".""",
  )
  image_size: Optional[str] = Field(
      default=None,
      description="""Optional. Specifies the size of generated images. Supported
      values are `1K`, `2K`, `4K`. If not specified, the model will use default
      value `1K`.""",
  )
  output_mime_type: Optional[str] = Field(
      default=None,
      description="""MIME type of the generated image. This field is not
      supported in Gemini API.""",
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""Compression quality of the generated image (for
      ``image/jpeg`` only). This field is not supported in Gemini API.""",
  )


class ImageConfigDict(TypedDict, total=False):
  """The image generation configuration to be used in GenerateContentConfig."""

  aspect_ratio: Optional[str]
  """Aspect ratio of the generated images. Supported values are
      "1:1", "2:3", "3:2", "3:4", "4:3", "9:16", "16:9", and "21:9"."""

  image_size: Optional[str]
  """Optional. Specifies the size of generated images. Supported
      values are `1K`, `2K`, `4K`. If not specified, the model will use default
      value `1K`."""

  output_mime_type: Optional[str]
  """MIME type of the generated image. This field is not
      supported in Gemini API."""

  output_compression_quality: Optional[int]
  """Compression quality of the generated image (for
      ``image/jpeg`` only). This field is not supported in Gemini API."""


ImageConfigOrDict = Union[ImageConfig, ImageConfigDict]


class FileStatus(_common.BaseModel):
  """Status of a File that uses a common error model."""

  details: Optional[list[dict[str, Any]]] = Field(
      default=None,
      description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""",
  )
  message: Optional[str] = Field(
      default=None,
      description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""",
  )
  code: Optional[int] = Field(
      default=None, description="""The status code. 0 for OK, 1 for CANCELLED"""
  )


class FileStatusDict(TypedDict, total=False):
  """Status of a File that uses a common error model."""

  details: Optional[list[dict[str, Any]]]
  """A list of messages that carry the error details. There is a common set of message types for APIs to use."""

  message: Optional[str]
  """A list of messages that carry the error details. There is a common set of message types for APIs to use."""

  code: Optional[int]
  """The status code. 0 for OK, 1 for CANCELLED"""


FileStatusOrDict = Union[FileStatus, FileStatusDict]


class File(_common.BaseModel):
  """A file uploaded to the API."""

  name: Optional[str] = Field(
      default=None,
      description="""The `File` resource name. The ID (name excluding the "files/" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456`""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'""",
  )
  mime_type: Optional[str] = Field(
      default=None, description="""Output only. MIME type of the file."""
  )
  size_bytes: Optional[int] = Field(
      default=None, description="""Output only. Size of the file in bytes."""
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. The timestamp of when the `File` was created.""",
  )
  expiration_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. The timestamp of when the `File` will be deleted. Only set if the `File` is scheduled to expire.""",
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. The timestamp of when the `File` was last updated.""",
  )
  sha256_hash: Optional[str] = Field(
      default=None,
      description="""Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded in base64 format.""",
  )
  uri: Optional[str] = Field(
      default=None, description="""Output only. The URI of the `File`."""
  )
  download_uri: Optional[str] = Field(
      default=None,
      description="""Output only. The URI of the `File`, only set for downloadable (generated) files.""",
  )
  state: Optional[FileState] = Field(
      default=None, description="""Output only. Processing state of the File."""
  )
  source: Optional[FileSource] = Field(
      default=None, description="""Output only. The source of the `File`."""
  )
  video_metadata: Optional[dict[str, Any]] = Field(
      default=None, description="""Output only. Metadata for a video."""
  )
  error: Optional[FileStatus] = Field(
      default=None,
      description="""Output only. Error status if File processing failed.""",
  )


class FileDict(TypedDict, total=False):
  """A file uploaded to the API."""

  name: Optional[str]
  """The `File` resource name. The ID (name excluding the "files/" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456`"""

  display_name: Optional[str]
  """Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'"""

  mime_type: Optional[str]
  """Output only. MIME type of the file."""

  size_bytes: Optional[int]
  """Output only. Size of the file in bytes."""

  create_time: Optional[datetime.datetime]
  """Output only. The timestamp of when the `File` was created."""

  expiration_time: Optional[datetime.datetime]
  """Output only. The timestamp of when the `File` will be deleted. Only set if the `File` is scheduled to expire."""

  update_time: Optional[datetime.datetime]
  """Output only. The timestamp of when the `File` was last updated."""

  sha256_hash: Optional[str]
  """Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded in base64 format."""

  uri: Optional[str]
  """Output only. The URI of the `File`."""

  download_uri: Optional[str]
  """Output only. The URI of the `File`, only set for downloadable (generated) files."""

  state: Optional[FileState]
  """Output only. Processing state of the File."""

  source: Optional[FileSource]
  """Output only. The source of the `File`."""

  video_metadata: Optional[dict[str, Any]]
  """Output only. Metadata for a video."""

  error: Optional[FileStatusDict]
  """Output only. Error status if File processing failed."""


FileOrDict = Union[File, FileDict]


if _is_pillow_image_imported:
  PartUnion = Union[str, PIL_Image, File, Part]
else:
  PartUnion = Union[str, File, Part]  # type: ignore[misc]


if _is_pillow_image_imported:
  PartUnionDict = Union[str, PIL_Image, File, FileDict, Part, PartDict]
else:
  PartUnionDict = Union[str, File, FileDict, Part, PartDict]  # type: ignore[misc]


ContentUnion = Union[Content, PartUnion, list[PartUnion]]


ContentUnionDict = Union[
    Content, ContentDict, PartUnionDict, list[PartUnionDict]
]


class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel):
  """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.

  This data type is not supported in Gemini API.
  """

  model_routing_preference: Optional[
      Literal['UNKNOWN', 'PRIORITIZE_QUALITY', 'BALANCED', 'PRIORITIZE_COST']
  ] = Field(default=None, description="""The model routing preference.""")


class GenerationConfigRoutingConfigAutoRoutingModeDict(TypedDict, total=False):
  """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.

  This data type is not supported in Gemini API.
  """

  model_routing_preference: Optional[
      Literal['UNKNOWN', 'PRIORITIZE_QUALITY', 'BALANCED', 'PRIORITIZE_COST']
  ]
  """The model routing preference."""


GenerationConfigRoutingConfigAutoRoutingModeOrDict = Union[
    GenerationConfigRoutingConfigAutoRoutingMode,
    GenerationConfigRoutingConfigAutoRoutingModeDict,
]


class GenerationConfigRoutingConfigManualRoutingMode(_common.BaseModel):
  """When manual routing is set, the specified model will be used directly.

  This data type is not supported in Gemini API.
  """

  model_name: Optional[str] = Field(
      default=None,
      description="""The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).""",
  )


class GenerationConfigRoutingConfigManualRoutingModeDict(
    TypedDict, total=False
):
  """When manual routing is set, the specified model will be used directly.

  This data type is not supported in Gemini API.
  """

  model_name: Optional[str]
  """The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models)."""


GenerationConfigRoutingConfigManualRoutingModeOrDict = Union[
    GenerationConfigRoutingConfigManualRoutingMode,
    GenerationConfigRoutingConfigManualRoutingModeDict,
]


class GenerationConfigRoutingConfig(_common.BaseModel):
  """The configuration for routing the request to a specific model.

  This data type is not supported in Gemini API.
  """

  auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingMode] = Field(
      default=None, description="""Automated routing."""
  )
  manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingMode] = Field(
      default=None, description="""Manual routing."""
  )


class GenerationConfigRoutingConfigDict(TypedDict, total=False):
  """The configuration for routing the request to a specific model.

  This data type is not supported in Gemini API.
  """

  auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingModeDict]
  """Automated routing."""

  manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingModeDict]
  """Manual routing."""


GenerationConfigRoutingConfigOrDict = Union[
    GenerationConfigRoutingConfig, GenerationConfigRoutingConfigDict
]


class SafetySetting(_common.BaseModel):
  """Safety settings."""

  category: Optional[HarmCategory] = Field(
      default=None, description="""Required. Harm category."""
  )
  method: Optional[HarmBlockMethod] = Field(
      default=None,
      description="""Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. This field is not supported in Gemini API.""",
  )
  threshold: Optional[HarmBlockThreshold] = Field(
      default=None, description="""Required. The harm block threshold."""
  )


class SafetySettingDict(TypedDict, total=False):
  """Safety settings."""

  category: Optional[HarmCategory]
  """Required. Harm category."""

  method: Optional[HarmBlockMethod]
  """Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. This field is not supported in Gemini API."""

  threshold: Optional[HarmBlockThreshold]
  """Required. The harm block threshold."""


SafetySettingOrDict = Union[SafetySetting, SafetySettingDict]


SpeechConfigUnion = Union[str, SpeechConfig]


SpeechConfigUnionDict = Union[str, SpeechConfig, SpeechConfigDict]


class GenerateContentConfig(_common.BaseModel):
  """Optional model configuration parameters.

  For more information, see `Content generation parameters
  <https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters>`_.
  """

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  should_return_http_response: Optional[bool] = Field(
      default=None,
      description=""" If true, the raw HTTP response will be returned in the 'sdk_http_response' field.""",
  )
  system_instruction: Optional[ContentUnion] = Field(
      default=None,
      description="""Instructions for the model to steer it toward better performance.
      For example, "Answer as concisely as possible" or "Don't use technical
      terms in your response".
      """,
  )
  temperature: Optional[float] = Field(
      default=None,
      description="""Value that controls the degree of randomness in token selection.
      Lower temperatures are good for prompts that require a less open-ended or
      creative response, while higher temperatures can lead to more diverse or
      creative results.
      """,
  )
  top_p: Optional[float] = Field(
      default=None,
      description="""Tokens are selected from the most to least probable until the sum
      of their probabilities equals this value. Use a lower value for less
      random responses and a higher value for more random responses.
      """,
  )
  top_k: Optional[float] = Field(
      default=None,
      description="""For each token selection step, the ``top_k`` tokens with the
      highest probabilities are sampled. Then tokens are further filtered based
      on ``top_p`` with the final token selected using temperature sampling. Use
      a lower number for less random responses and a higher number for more
      random responses.
      """,
  )
  candidate_count: Optional[int] = Field(
      default=None,
      description="""Number of response variations to return.
      """,
  )
  max_output_tokens: Optional[int] = Field(
      default=None,
      description="""Maximum number of tokens that can be generated in the response.
      """,
  )
  stop_sequences: Optional[list[str]] = Field(
      default=None,
      description="""List of strings that tells the model to stop generating text if one
      of the strings is encountered in the response.
      """,
  )
  response_logprobs: Optional[bool] = Field(
      default=None,
      description="""Whether to return the log probabilities of the tokens that were
      chosen by the model at each step.
      """,
  )
  logprobs: Optional[int] = Field(
      default=None,
      description="""Number of top candidate tokens to return the log probabilities for
      at each generation step.
      """,
  )
  presence_penalty: Optional[float] = Field(
      default=None,
      description="""Positive values penalize tokens that already appear in the
      generated text, increasing the probability of generating more diverse
      content.
      """,
  )
  frequency_penalty: Optional[float] = Field(
      default=None,
      description="""Positive values penalize tokens that repeatedly appear in the
      generated text, increasing the probability of generating more diverse
      content.
      """,
  )
  seed: Optional[int] = Field(
      default=None,
      description="""When ``seed`` is fixed to a specific number, the model makes a best
      effort to provide the same response for repeated requests. By default, a
      random number is used.
      """,
  )
  response_mime_type: Optional[str] = Field(
      default=None,
      description="""Output response mimetype of the generated candidate text.
      Supported mimetype:
        - `text/plain`: (default) Text output.
        - `application/json`: JSON response in the candidates.
      The model needs to be prompted to output the appropriate response type,
      otherwise the behavior is undefined.
      This is a preview feature.
      """,
  )
  response_schema: Optional[SchemaUnion] = Field(
      default=None,
      description="""The `Schema` object allows the definition of input and output data types.
      These types can be objects, but also primitives and arrays.
      Represents a select subset of an [OpenAPI 3.0 schema
      object](https://spec.openapis.org/oas/v3.0.3#schema).
      If set, a compatible response_mime_type must also be set.
      Compatible mimetypes: `application/json`: Schema for JSON response.

      If `response_schema` doesn't process your schema correctly, try using
      `response_json_schema` instead.
      """,
  )
  response_json_schema: Optional[Any] = Field(
      default=None,
      description="""Optional. Output schema of the generated response.
      This is an alternative to `response_schema` that accepts [JSON
      Schema](https://json-schema.org/). If set, `response_schema` must be
      omitted, but `response_mime_type` is required. While the full JSON Schema
      may be sent, not all features are supported. Specifically, only the
      following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor`
      - `type` - `format` - `title` - `description` - `enum` (for strings and
      numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` -
      `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) -
      `properties` - `additionalProperties` - `required` The non-standard
      `propertyOrdering` property may also be set. Cyclic references are
      unrolled to a limited degree and, as such, may only be used within
      non-required properties. (Nullable properties are not sufficient.) If
      `$ref` is set on a sub-schema, no other properties, except for than those
      starting as a `$`, may be set.""",
  )
  routing_config: Optional[GenerationConfigRoutingConfig] = Field(
      default=None,
      description="""Configuration for model router requests.
      """,
  )
  model_selection_config: Optional[ModelSelectionConfig] = Field(
      default=None,
      description="""Configuration for model selection.
      """,
  )
  safety_settings: Optional[list[SafetySetting]] = Field(
      default=None,
      description="""Safety settings in the request to block unsafe content in the
      response.
      """,
  )
  tools: Optional[ToolListUnion] = Field(
      default=None,
      description="""Code that enables the system to interact with external systems to
      perform an action outside of the knowledge and scope of the model.
      """,
  )
  tool_config: Optional[ToolConfig] = Field(
      default=None,
      description="""Associates model output to a specific function call.
      """,
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""Labels with user-defined metadata to break down billed charges.""",
  )
  cached_content: Optional[str] = Field(
      default=None,
      description="""Resource name of a context cache that can be used in subsequent
      requests.
      """,
  )
  response_modalities: Optional[list[str]] = Field(
      default=None,
      description="""The requested modalities of the response. Represents the set of
      modalities that the model can return.
      """,
  )
  media_resolution: Optional[MediaResolution] = Field(
      default=None,
      description="""If specified, the media resolution specified will be used.
    """,
  )
  speech_config: Optional[SpeechConfigUnion] = Field(
      default=None,
      description="""The speech generation configuration.
      """,
  )
  audio_timestamp: Optional[bool] = Field(
      default=None,
      description="""If enabled, audio timestamp will be included in the request to the
       model.
      """,
  )
  automatic_function_calling: Optional[AutomaticFunctionCallingConfig] = Field(
      default=None,
      description="""The configuration for automatic function calling.
      """,
  )
  thinking_config: Optional[ThinkingConfig] = Field(
      default=None,
      description="""The thinking features configuration.
      """,
  )
  image_config: Optional[ImageConfig] = Field(
      default=None,
      description="""The image generation configuration.
      """,
  )

  @pydantic.field_validator('response_schema', mode='before')
  @classmethod
  def _convert_literal_to_enum(cls, value: Any) -> Union[Any, EnumMeta]:
    if typing.get_origin(value) is typing.Literal:
      enum_vals = typing.get_args(value)
      if not all(isinstance(arg, str) for arg in enum_vals):
        # This doesn't stop execution, it tells pydantic to raise a ValidationError
        # when the class is instantiated with an unsupported Literal
        raise ValueError(f'Literal type {value} must be a list of strings.')
      # The title 'PlaceholderLiteralEnum' is removed from the generated Schema
      # before sending the request
      return Enum('PlaceholderLiteralEnum', {s: s for s in enum_vals})
    return value

  @pydantic.field_validator('image_config', mode='before')
  @classmethod
  def _check_image_config_type(cls, value: Any) -> Any:
    if isinstance(value, GenerateImagesConfig):
      raise ValueError(
          'image_config must be an instance of ImageConfig or compatible dict.'
      )
    return value


class GenerateContentConfigDict(TypedDict, total=False):
  """Optional model configuration parameters.

  For more information, see `Content generation parameters
  <https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters>`_.
  """

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  should_return_http_response: Optional[bool]
  """ If true, the raw HTTP response will be returned in the 'sdk_http_response' field."""

  system_instruction: Optional[ContentUnionDict]
  """Instructions for the model to steer it toward better performance.
      For example, "Answer as concisely as possible" or "Don't use technical
      terms in your response".
      """

  temperature: Optional[float]
  """Value that controls the degree of randomness in token selection.
      Lower temperatures are good for prompts that require a less open-ended or
      creative response, while higher temperatures can lead to more diverse or
      creative results.
      """

  top_p: Optional[float]
  """Tokens are selected from the most to least probable until the sum
      of their probabilities equals this value. Use a lower value for less
      random responses and a higher value for more random responses.
      """

  top_k: Optional[float]
  """For each token selection step, the ``top_k`` tokens with the
      highest probabilities are sampled. Then tokens are further filtered based
      on ``top_p`` with the final token selected using temperature sampling. Use
      a lower number for less random responses and a higher number for more
      random responses.
      """

  candidate_count: Optional[int]
  """Number of response variations to return.
      """

  max_output_tokens: Optional[int]
  """Maximum number of tokens that can be generated in the response.
      """

  stop_sequences: Optional[list[str]]
  """List of strings that tells the model to stop generating text if one
      of the strings is encountered in the response.
      """

  response_logprobs: Optional[bool]
  """Whether to return the log probabilities of the tokens that were
      chosen by the model at each step.
      """

  logprobs: Optional[int]
  """Number of top candidate tokens to return the log probabilities for
      at each generation step.
      """

  presence_penalty: Optional[float]
  """Positive values penalize tokens that already appear in the
      generated text, increasing the probability of generating more diverse
      content.
      """

  frequency_penalty: Optional[float]
  """Positive values penalize tokens that repeatedly appear in the
      generated text, increasing the probability of generating more diverse
      content.
      """

  seed: Optional[int]
  """When ``seed`` is fixed to a specific number, the model makes a best
      effort to provide the same response for repeated requests. By default, a
      random number is used.
      """

  response_mime_type: Optional[str]
  """Output response mimetype of the generated candidate text.
      Supported mimetype:
        - `text/plain`: (default) Text output.
        - `application/json`: JSON response in the candidates.
      The model needs to be prompted to output the appropriate response type,
      otherwise the behavior is undefined.
      This is a preview feature.
      """

  response_schema: Optional[SchemaUnionDict]
  """The `Schema` object allows the definition of input and output data types.
      These types can be objects, but also primitives and arrays.
      Represents a select subset of an [OpenAPI 3.0 schema
      object](https://spec.openapis.org/oas/v3.0.3#schema).
      If set, a compatible response_mime_type must also be set.
      Compatible mimetypes: `application/json`: Schema for JSON response.

      If `response_schema` doesn't process your schema correctly, try using
      `response_json_schema` instead.
      """

  response_json_schema: Optional[Any]
  """Optional. Output schema of the generated response.
      This is an alternative to `response_schema` that accepts [JSON
      Schema](https://json-schema.org/). If set, `response_schema` must be
      omitted, but `response_mime_type` is required. While the full JSON Schema
      may be sent, not all features are supported. Specifically, only the
      following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor`
      - `type` - `format` - `title` - `description` - `enum` (for strings and
      numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` -
      `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) -
      `properties` - `additionalProperties` - `required` The non-standard
      `propertyOrdering` property may also be set. Cyclic references are
      unrolled to a limited degree and, as such, may only be used within
      non-required properties. (Nullable properties are not sufficient.) If
      `$ref` is set on a sub-schema, no other properties, except for than those
      starting as a `$`, may be set."""

  routing_config: Optional[GenerationConfigRoutingConfigDict]
  """Configuration for model router requests.
      """

  model_selection_config: Optional[ModelSelectionConfigDict]
  """Configuration for model selection.
      """

  safety_settings: Optional[list[SafetySettingDict]]
  """Safety settings in the request to block unsafe content in the
      response.
      """

  tools: Optional[ToolListUnionDict]
  """Code that enables the system to interact with external systems to
      perform an action outside of the knowledge and scope of the model.
      """

  tool_config: Optional[ToolConfigDict]
  """Associates model output to a specific function call.
      """

  labels: Optional[dict[str, str]]
  """Labels with user-defined metadata to break down billed charges."""

  cached_content: Optional[str]
  """Resource name of a context cache that can be used in subsequent
      requests.
      """

  response_modalities: Optional[list[str]]
  """The requested modalities of the response. Represents the set of
      modalities that the model can return.
      """

  media_resolution: Optional[MediaResolution]
  """If specified, the media resolution specified will be used.
    """

  speech_config: Optional[SpeechConfigUnionDict]
  """The speech generation configuration.
      """

  audio_timestamp: Optional[bool]
  """If enabled, audio timestamp will be included in the request to the
       model.
      """

  automatic_function_calling: Optional[AutomaticFunctionCallingConfigDict]
  """The configuration for automatic function calling.
      """

  thinking_config: Optional[ThinkingConfigDict]
  """The thinking features configuration.
      """

  image_config: Optional[ImageConfigDict]
  """The image generation configuration.
      """


GenerateContentConfigOrDict = Union[
    GenerateContentConfig, GenerateContentConfigDict
]


ContentListUnion = Union[ContentUnion, list[ContentUnion]]


ContentListUnionDict = Union[ContentUnionDict, list[ContentUnionDict]]


class _GenerateContentParameters(_common.BaseModel):
  """Config for models.generate_content parameters."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  contents: Optional[ContentListUnion] = Field(
      default=None,
      description="""Content of the request.
      """,
  )
  config: Optional[GenerateContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional model parameters.
      """,
  )


class _GenerateContentParametersDict(TypedDict, total=False):
  """Config for models.generate_content parameters."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  contents: Optional[ContentListUnionDict]
  """Content of the request.
      """

  config: Optional[GenerateContentConfigDict]
  """Configuration that contains optional model parameters.
      """


_GenerateContentParametersOrDict = Union[
    _GenerateContentParameters, _GenerateContentParametersDict
]


class HttpResponse(_common.BaseModel):
  """A wrapper class for the http response."""

  headers: Optional[dict[str, str]] = Field(
      default=None,
      description="""Used to retain the processed HTTP headers in the response.""",
  )
  body: Optional[str] = Field(
      default=None,
      description="""The raw HTTP response body, in JSON format.""",
  )


class HttpResponseDict(TypedDict, total=False):
  """A wrapper class for the http response."""

  headers: Optional[dict[str, str]]
  """Used to retain the processed HTTP headers in the response."""

  body: Optional[str]
  """The raw HTTP response body, in JSON format."""


HttpResponseOrDict = Union[HttpResponse, HttpResponseDict]


class GoogleTypeDate(_common.BaseModel):
  """Represents a whole or partial calendar date, such as a birthday.

  The time of day and time zone are either specified elsewhere or are
  insignificant. The date is relative to the Gregorian Calendar. This can
  represent one of the following: * A full date, with non-zero year, month, and
  day values. * A month and day, with a zero year (for example, an anniversary).
  * A year on its own, with a zero month and a zero day. * A year and month,
  with a zero day (for example, a credit card expiration date). Related types: *
  google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp. This
  data type is not supported in Gemini API.
  """

  day: Optional[int] = Field(
      default=None,
      description="""Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.""",
  )
  month: Optional[int] = Field(
      default=None,
      description="""Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.""",
  )
  year: Optional[int] = Field(
      default=None,
      description="""Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.""",
  )


class GoogleTypeDateDict(TypedDict, total=False):
  """Represents a whole or partial calendar date, such as a birthday.

  The time of day and time zone are either specified elsewhere or are
  insignificant. The date is relative to the Gregorian Calendar. This can
  represent one of the following: * A full date, with non-zero year, month, and
  day values. * A month and day, with a zero year (for example, an anniversary).
  * A year on its own, with a zero month and a zero day. * A year and month,
  with a zero day (for example, a credit card expiration date). Related types: *
  google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp. This
  data type is not supported in Gemini API.
  """

  day: Optional[int]
  """Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant."""

  month: Optional[int]
  """Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day."""

  year: Optional[int]
  """Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year."""


GoogleTypeDateOrDict = Union[GoogleTypeDate, GoogleTypeDateDict]


class Citation(_common.BaseModel):
  """Source attributions for content.

  This data type is not supported in Gemini API.
  """

  end_index: Optional[int] = Field(
      default=None, description="""Output only. End index into the content."""
  )
  license: Optional[str] = Field(
      default=None, description="""Output only. License of the attribution."""
  )
  publication_date: Optional[GoogleTypeDate] = Field(
      default=None,
      description="""Output only. Publication date of the attribution.""",
  )
  start_index: Optional[int] = Field(
      default=None, description="""Output only. Start index into the content."""
  )
  title: Optional[str] = Field(
      default=None, description="""Output only. Title of the attribution."""
  )
  uri: Optional[str] = Field(
      default=None,
      description="""Output only. Url reference of the attribution.""",
  )


class CitationDict(TypedDict, total=False):
  """Source attributions for content.

  This data type is not supported in Gemini API.
  """

  end_index: Optional[int]
  """Output only. End index into the content."""

  license: Optional[str]
  """Output only. License of the attribution."""

  publication_date: Optional[GoogleTypeDateDict]
  """Output only. Publication date of the attribution."""

  start_index: Optional[int]
  """Output only. Start index into the content."""

  title: Optional[str]
  """Output only. Title of the attribution."""

  uri: Optional[str]
  """Output only. Url reference of the attribution."""


CitationOrDict = Union[Citation, CitationDict]


class CitationMetadata(_common.BaseModel):
  """Citation information when the model quotes another source."""

  citations: Optional[list[Citation]] = Field(
      default=None,
      description="""Contains citation information when the model directly quotes, at
      length, from another source. Can include traditional websites and code
      repositories.
      """,
  )

  @model_validator(mode='before')
  @classmethod
  def _rename_citation_sources(cls, data: Any) -> Any:
    if isinstance(data, dict) and 'citationSources' in data:
      data['citations'] = data.pop('citationSources')
    return data


class CitationMetadataDict(TypedDict, total=False):
  """Citation information when the model quotes another source."""

  citations: Optional[list[CitationDict]]
  """Contains citation information when the model directly quotes, at
      length, from another source. Can include traditional websites and code
      repositories.
      """


CitationMetadataOrDict = Union[CitationMetadata, CitationMetadataDict]


class GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution(_common.BaseModel):
  """Author attribution for a photo or review.

  This data type is not supported in Gemini API.
  """

  display_name: Optional[str] = Field(
      default=None, description="""Name of the author of the Photo or Review."""
  )
  photo_uri: Optional[str] = Field(
      default=None,
      description="""Profile photo URI of the author of the Photo or Review.""",
  )
  uri: Optional[str] = Field(
      default=None, description="""URI of the author of the Photo or Review."""
  )


class GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict(
    TypedDict, total=False
):
  """Author attribution for a photo or review.

  This data type is not supported in Gemini API.
  """

  display_name: Optional[str]
  """Name of the author of the Photo or Review."""

  photo_uri: Optional[str]
  """Profile photo URI of the author of the Photo or Review."""

  uri: Optional[str]
  """URI of the author of the Photo or Review."""


GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionOrDict = Union[
    GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution,
    GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict,
]


class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel):
  """Encapsulates a review snippet.

  This data type is not supported in Gemini API.
  """

  author_attribution: Optional[
      GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution
  ] = Field(default=None, description="""This review's author.""")
  flag_content_uri: Optional[str] = Field(
      default=None,
      description="""A link where users can flag a problem with the review.""",
  )
  google_maps_uri: Optional[str] = Field(
      default=None, description="""A link to show the review on Google Maps."""
  )
  relative_publish_time_description: Optional[str] = Field(
      default=None,
      description="""A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.""",
  )
  review: Optional[str] = Field(
      default=None,
      description="""A reference representing this place review which may be used to look up this place review again.""",
  )
  review_id: Optional[str] = Field(
      default=None, description="""Id of the review referencing the place."""
  )
  title: Optional[str] = Field(
      default=None, description="""Title of the review."""
  )


class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict(
    TypedDict, total=False
):
  """Encapsulates a review snippet.

  This data type is not supported in Gemini API.
  """

  author_attribution: Optional[
      GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict
  ]
  """This review's author."""

  flag_content_uri: Optional[str]
  """A link where users can flag a problem with the review."""

  google_maps_uri: Optional[str]
  """A link to show the review on Google Maps."""

  relative_publish_time_description: Optional[str]
  """A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country."""

  review: Optional[str]
  """A reference representing this place review which may be used to look up this place review again."""

  review_id: Optional[str]
  """Id of the review referencing the place."""

  title: Optional[str]
  """Title of the review."""


GroundingChunkMapsPlaceAnswerSourcesReviewSnippetOrDict = Union[
    GroundingChunkMapsPlaceAnswerSourcesReviewSnippet,
    GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict,
]


class GroundingChunkMapsPlaceAnswerSources(_common.BaseModel):
  """Sources used to generate the place answer.

  This data type is not supported in Gemini API.
  """

  flag_content_uri: Optional[str] = Field(
      default=None,
      description="""A link where users can flag a problem with the generated answer.""",
  )
  review_snippets: Optional[
      list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippet]
  ] = Field(
      default=None,
      description="""Snippets of reviews that are used to generate the answer.""",
  )


class GroundingChunkMapsPlaceAnswerSourcesDict(TypedDict, total=False):
  """Sources used to generate the place answer.

  This data type is not supported in Gemini API.
  """

  flag_content_uri: Optional[str]
  """A link where users can flag a problem with the generated answer."""

  review_snippets: Optional[
      list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict]
  ]
  """Snippets of reviews that are used to generate the answer."""


GroundingChunkMapsPlaceAnswerSourcesOrDict = Union[
    GroundingChunkMapsPlaceAnswerSources,
    GroundingChunkMapsPlaceAnswerSourcesDict,
]


class GroundingChunkMaps(_common.BaseModel):
  """Chunk from Google Maps. This data type is not supported in Gemini API."""

  place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSources] = Field(
      default=None,
      description="""Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content.""",
  )
  place_id: Optional[str] = Field(
      default=None,
      description="""This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place.""",
  )
  text: Optional[str] = Field(
      default=None, description="""Text of the place answer."""
  )
  title: Optional[str] = Field(
      default=None, description="""Title of the place."""
  )
  uri: Optional[str] = Field(
      default=None, description="""URI reference of the place."""
  )


class GroundingChunkMapsDict(TypedDict, total=False):
  """Chunk from Google Maps. This data type is not supported in Gemini API."""

  place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSourcesDict]
  """Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content."""

  place_id: Optional[str]
  """This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place."""

  text: Optional[str]
  """Text of the place answer."""

  title: Optional[str]
  """Title of the place."""

  uri: Optional[str]
  """URI reference of the place."""


GroundingChunkMapsOrDict = Union[GroundingChunkMaps, GroundingChunkMapsDict]


class RagChunkPageSpan(_common.BaseModel):
  """Represents where the chunk starts and ends in the document.

  This data type is not supported in Gemini API.
  """

  first_page: Optional[int] = Field(
      default=None,
      description="""Page where chunk starts in the document. Inclusive. 1-indexed.""",
  )
  last_page: Optional[int] = Field(
      default=None,
      description="""Page where chunk ends in the document. Inclusive. 1-indexed.""",
  )


class RagChunkPageSpanDict(TypedDict, total=False):
  """Represents where the chunk starts and ends in the document.

  This data type is not supported in Gemini API.
  """

  first_page: Optional[int]
  """Page where chunk starts in the document. Inclusive. 1-indexed."""

  last_page: Optional[int]
  """Page where chunk ends in the document. Inclusive. 1-indexed."""


RagChunkPageSpanOrDict = Union[RagChunkPageSpan, RagChunkPageSpanDict]


class RagChunk(_common.BaseModel):
  """A RagChunk includes the content of a chunk of a RagFile, and associated metadata.

  This data type is not supported in Gemini API.
  """

  page_span: Optional[RagChunkPageSpan] = Field(
      default=None,
      description="""If populated, represents where the chunk starts and ends in the document.""",
  )
  text: Optional[str] = Field(
      default=None, description="""The content of the chunk."""
  )


class RagChunkDict(TypedDict, total=False):
  """A RagChunk includes the content of a chunk of a RagFile, and associated metadata.

  This data type is not supported in Gemini API.
  """

  page_span: Optional[RagChunkPageSpanDict]
  """If populated, represents where the chunk starts and ends in the document."""

  text: Optional[str]
  """The content of the chunk."""


RagChunkOrDict = Union[RagChunk, RagChunkDict]


class GroundingChunkRetrievedContext(_common.BaseModel):
  """Chunk from context retrieved by the retrieval tools.

  This data type is not supported in Gemini API.
  """

  document_name: Optional[str] = Field(
      default=None,
      description="""Output only. The full document name for the referenced Vertex AI Search document.""",
  )
  rag_chunk: Optional[RagChunk] = Field(
      default=None,
      description="""Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool.""",
  )
  text: Optional[str] = Field(
      default=None, description="""Text of the attribution."""
  )
  title: Optional[str] = Field(
      default=None, description="""Title of the attribution."""
  )
  uri: Optional[str] = Field(
      default=None, description="""URI reference of the attribution."""
  )


class GroundingChunkRetrievedContextDict(TypedDict, total=False):
  """Chunk from context retrieved by the retrieval tools.

  This data type is not supported in Gemini API.
  """

  document_name: Optional[str]
  """Output only. The full document name for the referenced Vertex AI Search document."""

  rag_chunk: Optional[RagChunkDict]
  """Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool."""

  text: Optional[str]
  """Text of the attribution."""

  title: Optional[str]
  """Title of the attribution."""

  uri: Optional[str]
  """URI reference of the attribution."""


GroundingChunkRetrievedContextOrDict = Union[
    GroundingChunkRetrievedContext, GroundingChunkRetrievedContextDict
]


class GroundingChunkWeb(_common.BaseModel):
  """Chunk from the web."""

  domain: Optional[str] = Field(
      default=None,
      description="""Domain of the (original) URI. This field is not supported in Gemini API.""",
  )
  title: Optional[str] = Field(
      default=None, description="""Title of the chunk."""
  )
  uri: Optional[str] = Field(
      default=None, description="""URI reference of the chunk."""
  )


class GroundingChunkWebDict(TypedDict, total=False):
  """Chunk from the web."""

  domain: Optional[str]
  """Domain of the (original) URI. This field is not supported in Gemini API."""

  title: Optional[str]
  """Title of the chunk."""

  uri: Optional[str]
  """URI reference of the chunk."""


GroundingChunkWebOrDict = Union[GroundingChunkWeb, GroundingChunkWebDict]


class GroundingChunk(_common.BaseModel):
  """Grounding chunk."""

  maps: Optional[GroundingChunkMaps] = Field(
      default=None,
      description="""Grounding chunk from Google Maps. This field is not supported in Gemini API.""",
  )
  retrieved_context: Optional[GroundingChunkRetrievedContext] = Field(
      default=None,
      description="""Grounding chunk from context retrieved by the retrieval tools. This field is not supported in Gemini API.""",
  )
  web: Optional[GroundingChunkWeb] = Field(
      default=None, description="""Grounding chunk from the web."""
  )


class GroundingChunkDict(TypedDict, total=False):
  """Grounding chunk."""

  maps: Optional[GroundingChunkMapsDict]
  """Grounding chunk from Google Maps. This field is not supported in Gemini API."""

  retrieved_context: Optional[GroundingChunkRetrievedContextDict]
  """Grounding chunk from context retrieved by the retrieval tools. This field is not supported in Gemini API."""

  web: Optional[GroundingChunkWebDict]
  """Grounding chunk from the web."""


GroundingChunkOrDict = Union[GroundingChunk, GroundingChunkDict]


class Segment(_common.BaseModel):
  """Segment of the content."""

  end_index: Optional[int] = Field(
      default=None,
      description="""Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.""",
  )
  part_index: Optional[int] = Field(
      default=None,
      description="""Output only. The index of a Part object within its parent Content object.""",
  )
  start_index: Optional[int] = Field(
      default=None,
      description="""Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.""",
  )
  text: Optional[str] = Field(
      default=None,
      description="""Output only. The text corresponding to the segment from the response.""",
  )


class SegmentDict(TypedDict, total=False):
  """Segment of the content."""

  end_index: Optional[int]
  """Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero."""

  part_index: Optional[int]
  """Output only. The index of a Part object within its parent Content object."""

  start_index: Optional[int]
  """Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero."""

  text: Optional[str]
  """Output only. The text corresponding to the segment from the response."""


SegmentOrDict = Union[Segment, SegmentDict]


class GroundingSupport(_common.BaseModel):
  """Grounding support."""

  confidence_scores: Optional[list[float]] = Field(
      default=None,
      description="""Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.""",
  )
  grounding_chunk_indices: Optional[list[int]] = Field(
      default=None,
      description="""A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.""",
  )
  segment: Optional[Segment] = Field(
      default=None,
      description="""Segment of the content this support belongs to.""",
  )


class GroundingSupportDict(TypedDict, total=False):
  """Grounding support."""

  confidence_scores: Optional[list[float]]
  """Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored."""

  grounding_chunk_indices: Optional[list[int]]
  """A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim."""

  segment: Optional[SegmentDict]
  """Segment of the content this support belongs to."""


GroundingSupportOrDict = Union[GroundingSupport, GroundingSupportDict]


class RetrievalMetadata(_common.BaseModel):
  """Metadata related to retrieval in the grounding flow."""

  google_search_dynamic_retrieval_score: Optional[float] = Field(
      default=None,
      description="""Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.""",
  )


class RetrievalMetadataDict(TypedDict, total=False):
  """Metadata related to retrieval in the grounding flow."""

  google_search_dynamic_retrieval_score: Optional[float]
  """Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search."""


RetrievalMetadataOrDict = Union[RetrievalMetadata, RetrievalMetadataDict]


class SearchEntryPoint(_common.BaseModel):
  """Google search entry point."""

  rendered_content: Optional[str] = Field(
      default=None,
      description="""Optional. Web content snippet that can be embedded in a web page or an app webview.""",
  )
  sdk_blob: Optional[bytes] = Field(
      default=None,
      description="""Optional. Base64 encoded JSON representing array of tuple.""",
  )


class SearchEntryPointDict(TypedDict, total=False):
  """Google search entry point."""

  rendered_content: Optional[str]
  """Optional. Web content snippet that can be embedded in a web page or an app webview."""

  sdk_blob: Optional[bytes]
  """Optional. Base64 encoded JSON representing array of tuple."""


SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict]


class GroundingMetadataSourceFlaggingUri(_common.BaseModel):
  """Source content flagging uri for a place or review.

  This is currently populated only for Google Maps grounding. This data type is
  not supported in Gemini API.
  """

  flag_content_uri: Optional[str] = Field(
      default=None,
      description="""A link where users can flag a problem with the source (place or review).""",
  )
  source_id: Optional[str] = Field(
      default=None, description="""Id of the place or review."""
  )


class GroundingMetadataSourceFlaggingUriDict(TypedDict, total=False):
  """Source content flagging uri for a place or review.

  This is currently populated only for Google Maps grounding. This data type is
  not supported in Gemini API.
  """

  flag_content_uri: Optional[str]
  """A link where users can flag a problem with the source (place or review)."""

  source_id: Optional[str]
  """Id of the place or review."""


GroundingMetadataSourceFlaggingUriOrDict = Union[
    GroundingMetadataSourceFlaggingUri, GroundingMetadataSourceFlaggingUriDict
]


class GroundingMetadata(_common.BaseModel):
  """Metadata returned to client when grounding is enabled."""

  google_maps_widget_context_token: Optional[str] = Field(
      default=None,
      description="""Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding. This field is not supported in Gemini API.""",
  )
  grounding_chunks: Optional[list[GroundingChunk]] = Field(
      default=None,
      description="""List of supporting references retrieved from specified grounding source.""",
  )
  grounding_supports: Optional[list[GroundingSupport]] = Field(
      default=None, description="""Optional. List of grounding support."""
  )
  retrieval_metadata: Optional[RetrievalMetadata] = Field(
      default=None, description="""Optional. Output only. Retrieval metadata."""
  )
  retrieval_queries: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Queries executed by the retrieval tools. This field is not supported in Gemini API.""",
  )
  search_entry_point: Optional[SearchEntryPoint] = Field(
      default=None,
      description="""Optional. Google search entry for the following-up web searches.""",
  )
  source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUri]] = (
      Field(
          default=None,
          description="""Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding. This field is not supported in Gemini API.""",
      )
  )
  web_search_queries: Optional[list[str]] = Field(
      default=None,
      description="""Optional. Web search queries for the following-up web search.""",
  )


class GroundingMetadataDict(TypedDict, total=False):
  """Metadata returned to client when grounding is enabled."""

  google_maps_widget_context_token: Optional[str]
  """Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding. This field is not supported in Gemini API."""

  grounding_chunks: Optional[list[GroundingChunkDict]]
  """List of supporting references retrieved from specified grounding source."""

  grounding_supports: Optional[list[GroundingSupportDict]]
  """Optional. List of grounding support."""

  retrieval_metadata: Optional[RetrievalMetadataDict]
  """Optional. Output only. Retrieval metadata."""

  retrieval_queries: Optional[list[str]]
  """Optional. Queries executed by the retrieval tools. This field is not supported in Gemini API."""

  search_entry_point: Optional[SearchEntryPointDict]
  """Optional. Google search entry for the following-up web searches."""

  source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUriDict]]
  """Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding. This field is not supported in Gemini API."""

  web_search_queries: Optional[list[str]]
  """Optional. Web search queries for the following-up web search."""


GroundingMetadataOrDict = Union[GroundingMetadata, GroundingMetadataDict]


class LogprobsResultCandidate(_common.BaseModel):
  """Candidate for the logprobs token and score."""

  log_probability: Optional[float] = Field(
      default=None, description="""The candidate's log probability."""
  )
  token: Optional[str] = Field(
      default=None, description="""The candidate's token string value."""
  )
  token_id: Optional[int] = Field(
      default=None, description="""The candidate's token id value."""
  )


class LogprobsResultCandidateDict(TypedDict, total=False):
  """Candidate for the logprobs token and score."""

  log_probability: Optional[float]
  """The candidate's log probability."""

  token: Optional[str]
  """The candidate's token string value."""

  token_id: Optional[int]
  """The candidate's token id value."""


LogprobsResultCandidateOrDict = Union[
    LogprobsResultCandidate, LogprobsResultCandidateDict
]


class LogprobsResultTopCandidates(_common.BaseModel):
  """Candidates with top log probabilities at each decoding step."""

  candidates: Optional[list[LogprobsResultCandidate]] = Field(
      default=None,
      description="""Sorted by log probability in descending order.""",
  )


class LogprobsResultTopCandidatesDict(TypedDict, total=False):
  """Candidates with top log probabilities at each decoding step."""

  candidates: Optional[list[LogprobsResultCandidateDict]]
  """Sorted by log probability in descending order."""


LogprobsResultTopCandidatesOrDict = Union[
    LogprobsResultTopCandidates, LogprobsResultTopCandidatesDict
]


class LogprobsResult(_common.BaseModel):
  """Logprobs Result"""

  chosen_candidates: Optional[list[LogprobsResultCandidate]] = Field(
      default=None,
      description="""Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.""",
  )
  top_candidates: Optional[list[LogprobsResultTopCandidates]] = Field(
      default=None, description="""Length = total number of decoding steps."""
  )


class LogprobsResultDict(TypedDict, total=False):
  """Logprobs Result"""

  chosen_candidates: Optional[list[LogprobsResultCandidateDict]]
  """Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates."""

  top_candidates: Optional[list[LogprobsResultTopCandidatesDict]]
  """Length = total number of decoding steps."""


LogprobsResultOrDict = Union[LogprobsResult, LogprobsResultDict]


class SafetyRating(_common.BaseModel):
  """Safety rating corresponding to the generated content."""

  blocked: Optional[bool] = Field(
      default=None,
      description="""Output only. Indicates whether the content was filtered out because of this rating.""",
  )
  category: Optional[HarmCategory] = Field(
      default=None, description="""Output only. Harm category."""
  )
  overwritten_threshold: Optional[HarmBlockThreshold] = Field(
      default=None,
      description="""Output only. The overwritten threshold for the safety category of Gemini 2.0 image out. If minors are detected in the output image, the threshold of each safety category will be overwritten if user sets a lower threshold. This field is not supported in Gemini API.""",
  )
  probability: Optional[HarmProbability] = Field(
      default=None,
      description="""Output only. Harm probability levels in the content.""",
  )
  probability_score: Optional[float] = Field(
      default=None,
      description="""Output only. Harm probability score. This field is not supported in Gemini API.""",
  )
  severity: Optional[HarmSeverity] = Field(
      default=None,
      description="""Output only. Harm severity levels in the content. This field is not supported in Gemini API.""",
  )
  severity_score: Optional[float] = Field(
      default=None,
      description="""Output only. Harm severity score. This field is not supported in Gemini API.""",
  )


class SafetyRatingDict(TypedDict, total=False):
  """Safety rating corresponding to the generated content."""

  blocked: Optional[bool]
  """Output only. Indicates whether the content was filtered out because of this rating."""

  category: Optional[HarmCategory]
  """Output only. Harm category."""

  overwritten_threshold: Optional[HarmBlockThreshold]
  """Output only. The overwritten threshold for the safety category of Gemini 2.0 image out. If minors are detected in the output image, the threshold of each safety category will be overwritten if user sets a lower threshold. This field is not supported in Gemini API."""

  probability: Optional[HarmProbability]
  """Output only. Harm probability levels in the content."""

  probability_score: Optional[float]
  """Output only. Harm probability score. This field is not supported in Gemini API."""

  severity: Optional[HarmSeverity]
  """Output only. Harm severity levels in the content. This field is not supported in Gemini API."""

  severity_score: Optional[float]
  """Output only. Harm severity score. This field is not supported in Gemini API."""


SafetyRatingOrDict = Union[SafetyRating, SafetyRatingDict]


class UrlMetadata(_common.BaseModel):
  """Context of the a single url retrieval."""

  retrieved_url: Optional[str] = Field(
      default=None, description="""Retrieved url by the tool."""
  )
  url_retrieval_status: Optional[UrlRetrievalStatus] = Field(
      default=None, description="""Status of the url retrieval."""
  )


class UrlMetadataDict(TypedDict, total=False):
  """Context of the a single url retrieval."""

  retrieved_url: Optional[str]
  """Retrieved url by the tool."""

  url_retrieval_status: Optional[UrlRetrievalStatus]
  """Status of the url retrieval."""


UrlMetadataOrDict = Union[UrlMetadata, UrlMetadataDict]


class UrlContextMetadata(_common.BaseModel):
  """Metadata related to url context retrieval tool."""

  url_metadata: Optional[list[UrlMetadata]] = Field(
      default=None, description="""Output only. List of url context."""
  )


class UrlContextMetadataDict(TypedDict, total=False):
  """Metadata related to url context retrieval tool."""

  url_metadata: Optional[list[UrlMetadataDict]]
  """Output only. List of url context."""


UrlContextMetadataOrDict = Union[UrlContextMetadata, UrlContextMetadataDict]


class Candidate(_common.BaseModel):
  """A response candidate generated from the model."""

  content: Optional[Content] = Field(
      default=None,
      description="""Contains the multi-part content of the response.
      """,
  )
  citation_metadata: Optional[CitationMetadata] = Field(
      default=None,
      description="""Source attribution of the generated content.
      """,
  )
  finish_message: Optional[str] = Field(
      default=None,
      description="""Describes the reason the model stopped generating tokens.
      """,
  )
  token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens for this candidate.
      """,
  )
  finish_reason: Optional[FinishReason] = Field(
      default=None,
      description="""The reason why the model stopped generating tokens.
      If empty, the model has not stopped generating the tokens.
      """,
  )
  avg_logprobs: Optional[float] = Field(
      default=None,
      description="""Output only. Average log probability score of the candidate.""",
  )
  grounding_metadata: Optional[GroundingMetadata] = Field(
      default=None,
      description="""Output only. Metadata specifies sources used to ground generated content.""",
  )
  index: Optional[int] = Field(
      default=None, description="""Output only. Index of the candidate."""
  )
  logprobs_result: Optional[LogprobsResult] = Field(
      default=None,
      description="""Output only. Log-likelihood scores for the response tokens and top tokens""",
  )
  safety_ratings: Optional[list[SafetyRating]] = Field(
      default=None,
      description="""Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.""",
  )
  url_context_metadata: Optional[UrlContextMetadata] = Field(
      default=None,
      description="""Output only. Metadata related to url context retrieval tool.""",
  )


class CandidateDict(TypedDict, total=False):
  """A response candidate generated from the model."""

  content: Optional[ContentDict]
  """Contains the multi-part content of the response.
      """

  citation_metadata: Optional[CitationMetadataDict]
  """Source attribution of the generated content.
      """

  finish_message: Optional[str]
  """Describes the reason the model stopped generating tokens.
      """

  token_count: Optional[int]
  """Number of tokens for this candidate.
      """

  finish_reason: Optional[FinishReason]
  """The reason why the model stopped generating tokens.
      If empty, the model has not stopped generating the tokens.
      """

  avg_logprobs: Optional[float]
  """Output only. Average log probability score of the candidate."""

  grounding_metadata: Optional[GroundingMetadataDict]
  """Output only. Metadata specifies sources used to ground generated content."""

  index: Optional[int]
  """Output only. Index of the candidate."""

  logprobs_result: Optional[LogprobsResultDict]
  """Output only. Log-likelihood scores for the response tokens and top tokens"""

  safety_ratings: Optional[list[SafetyRatingDict]]
  """Output only. List of ratings for the safety of a response candidate. There is at most one rating per category."""

  url_context_metadata: Optional[UrlContextMetadataDict]
  """Output only. Metadata related to url context retrieval tool."""


CandidateOrDict = Union[Candidate, CandidateDict]


class GenerateContentResponsePromptFeedback(_common.BaseModel):
  """Content filter results for a prompt sent in the request.

  Note: This is sent only in the first stream chunk and only if no candidates
  were generated due to content violations.
  """

  block_reason: Optional[BlockedReason] = Field(
      default=None,
      description="""Output only. The reason why the prompt was blocked.""",
  )
  block_reason_message: Optional[str] = Field(
      default=None,
      description="""Output only. A readable message that explains the reason why the prompt was blocked. This field is not supported in Gemini API.""",
  )
  safety_ratings: Optional[list[SafetyRating]] = Field(
      default=None,
      description="""Output only. A list of safety ratings for the prompt. There is one rating per category.""",
  )


class GenerateContentResponsePromptFeedbackDict(TypedDict, total=False):
  """Content filter results for a prompt sent in the request.

  Note: This is sent only in the first stream chunk and only if no candidates
  were generated due to content violations.
  """

  block_reason: Optional[BlockedReason]
  """Output only. The reason why the prompt was blocked."""

  block_reason_message: Optional[str]
  """Output only. A readable message that explains the reason why the prompt was blocked. This field is not supported in Gemini API."""

  safety_ratings: Optional[list[SafetyRatingDict]]
  """Output only. A list of safety ratings for the prompt. There is one rating per category."""


GenerateContentResponsePromptFeedbackOrDict = Union[
    GenerateContentResponsePromptFeedback,
    GenerateContentResponsePromptFeedbackDict,
]


class ModalityTokenCount(_common.BaseModel):
  """Represents token counting info for a single modality."""

  modality: Optional[MediaModality] = Field(
      default=None,
      description="""The modality associated with this token count.""",
  )
  token_count: Optional[int] = Field(
      default=None, description="""Number of tokens."""
  )


class ModalityTokenCountDict(TypedDict, total=False):
  """Represents token counting info for a single modality."""

  modality: Optional[MediaModality]
  """The modality associated with this token count."""

  token_count: Optional[int]
  """Number of tokens."""


ModalityTokenCountOrDict = Union[ModalityTokenCount, ModalityTokenCountDict]


class GenerateContentResponseUsageMetadata(_common.BaseModel):
  """Usage metadata about the content generation request and response.

  This message provides a detailed breakdown of token usage and other relevant
  metrics. This data type is not supported in Gemini API.
  """

  cache_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""Output only. A detailed breakdown of the token count for each modality in the cached content.""",
  )
  cached_content_token_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of tokens in the cached content that was used for this request.""",
  )
  candidates_token_count: Optional[int] = Field(
      default=None,
      description="""The total number of tokens in the generated candidates.""",
  )
  candidates_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""Output only. A detailed breakdown of the token count for each modality in the generated candidates.""",
  )
  prompt_token_count: Optional[int] = Field(
      default=None,
      description="""The total number of tokens in the prompt. This includes any text, images, or other media provided in the request. When `cached_content` is set, this also includes the number of tokens in the cached content.""",
  )
  prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""Output only. A detailed breakdown of the token count for each modality in the prompt.""",
  )
  thoughts_token_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of tokens that were part of the model's generated "thoughts" output, if applicable.""",
  )
  tool_use_prompt_token_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of tokens in the results from tool executions, which are provided back to the model as input, if applicable.""",
  )
  tool_use_prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""Output only. A detailed breakdown by modality of the token counts from the results of tool executions, which are provided back to the model as input.""",
  )
  total_token_count: Optional[int] = Field(
      default=None,
      description="""The total number of tokens for the entire request. This is the sum of `prompt_token_count`, `candidates_token_count`, `tool_use_prompt_token_count`, and `thoughts_token_count`.""",
  )
  traffic_type: Optional[TrafficType] = Field(
      default=None,
      description="""Output only. The traffic type for this request.""",
  )


class GenerateContentResponseUsageMetadataDict(TypedDict, total=False):
  """Usage metadata about the content generation request and response.

  This message provides a detailed breakdown of token usage and other relevant
  metrics. This data type is not supported in Gemini API.
  """

  cache_tokens_details: Optional[list[ModalityTokenCountDict]]
  """Output only. A detailed breakdown of the token count for each modality in the cached content."""

  cached_content_token_count: Optional[int]
  """Output only. The number of tokens in the cached content that was used for this request."""

  candidates_token_count: Optional[int]
  """The total number of tokens in the generated candidates."""

  candidates_tokens_details: Optional[list[ModalityTokenCountDict]]
  """Output only. A detailed breakdown of the token count for each modality in the generated candidates."""

  prompt_token_count: Optional[int]
  """The total number of tokens in the prompt. This includes any text, images, or other media provided in the request. When `cached_content` is set, this also includes the number of tokens in the cached content."""

  prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
  """Output only. A detailed breakdown of the token count for each modality in the prompt."""

  thoughts_token_count: Optional[int]
  """Output only. The number of tokens that were part of the model's generated "thoughts" output, if applicable."""

  tool_use_prompt_token_count: Optional[int]
  """Output only. The number of tokens in the results from tool executions, which are provided back to the model as input, if applicable."""

  tool_use_prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
  """Output only. A detailed breakdown by modality of the token counts from the results of tool executions, which are provided back to the model as input."""

  total_token_count: Optional[int]
  """The total number of tokens for the entire request. This is the sum of `prompt_token_count`, `candidates_token_count`, `tool_use_prompt_token_count`, and `thoughts_token_count`."""

  traffic_type: Optional[TrafficType]
  """Output only. The traffic type for this request."""


GenerateContentResponseUsageMetadataOrDict = Union[
    GenerateContentResponseUsageMetadata,
    GenerateContentResponseUsageMetadataDict,
]


class GenerateContentResponse(_common.BaseModel):
  """Response message for PredictionService.GenerateContent."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  candidates: Optional[list[Candidate]] = Field(
      default=None,
      description="""Response variations returned by the model.
      """,
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Timestamp when the request is made to the server.
      """,
  )
  model_version: Optional[str] = Field(
      default=None,
      description="""Output only. The model version used to generate the response.""",
  )
  prompt_feedback: Optional[GenerateContentResponsePromptFeedback] = Field(
      default=None,
      description="""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""",
  )
  response_id: Optional[str] = Field(
      default=None,
      description="""Output only. response_id is used to identify each response. It is the encoding of the event_id.""",
  )
  usage_metadata: Optional[GenerateContentResponseUsageMetadata] = Field(
      default=None, description="""Usage metadata about the response(s)."""
  )
  automatic_function_calling_history: Optional[list[Content]] = None
  parsed: Optional[Union[pydantic.BaseModel, dict[Any, Any], Enum]] = Field(
      default=None,
      description="""First candidate from the parsed response if response_schema is provided. Not available for streaming.""",
  )

  def _get_text(self) -> Optional[str]:
    """Returns the concatenation of all text parts in the response.

    This is an internal method that allows customizing or disabling the warning
    message.

    Returns:
      The concatenation of all text parts in the response.
    """
    if (
        not self.candidates
        or not self.candidates[0].content
        or not self.candidates[0].content.parts
    ):
      return None
    global _response_text_warning_logged
    if len(self.candidates) > 1 and not _response_text_warning_logged:
      logger.warning(
          f'there are {len(self.candidates)} candidates, returning text result'
          ' from the first candidate. Access response.candidates directly to'
          ' get the result from other candidates.'
      )
      _response_text_warning_logged = True
    text = ''
    any_text_part_text = False
    non_text_parts = []
    for part in self.candidates[0].content.parts:
      for field_name, field_value in part.model_dump(
          exclude={'text', 'thought', 'thought_signature'}
      ).items():
        if field_value is not None:
          non_text_parts.append(field_name)
      if isinstance(part.text, str):
        if isinstance(part.thought, bool) and part.thought:
          continue
        any_text_part_text = True
        text += part.text
    global _response_text_non_text_warning_logged
    if non_text_parts and not _response_text_non_text_warning_logged:
      logger.warning(
          'Warning: there are non-text parts in the response:'
          f' {non_text_parts}, returning concatenated text result'
          ' from text parts. Check the full candidates.content.parts accessor'
          ' to get the full model response.'
      )
      _response_text_non_text_warning_logged = True
    # part.text == '' is different from part.text is None
    return text if any_text_part_text else None

  @property
  def parts(self) -> Optional[list[Part]]:
    """Returns the content-parts in the response.

    If there are multiple candidates, returns the parts from only the first one.
    """
    if (
        not self.candidates
        or self.candidates[0].content is None
        or self.candidates[0].content.parts is None
    ):
      return None
    global _response_parts_warning_logged
    if len(self.candidates) > 1 and not _response_parts_warning_logged:
      logger.warning(
          'Warning: there are multiple candidates in the response, returning'
          ' parts from the first one.'
      )
      _response_parts_warning_logged = True

    return self.candidates[0].content.parts

  @property
  def text(self) -> Optional[str]:
    """Returns the concatenation of all text parts in the response.

    If there are multiple candidates, returns the text from only the first one.
    If there are non-text parts in the response, this returns only the text
    parts.
    """
    return self._get_text()

  @property
  def function_calls(self) -> Optional[list[FunctionCall]]:
    """Returns the list of function calls in the response.

    If there are multiple candidates, this returns the function calls from only
    the
    first one.
    """
    if (
        not self.candidates
        or not self.candidates[0].content
        or not self.candidates[0].content.parts
    ):
      return None
    global _response_function_calls_warning_logged
    if len(self.candidates) > 1 and not _response_function_calls_warning_logged:
      logger.warning(
          'Warning: there are multiple candidates in the response, returning'
          ' function calls from the first one.'
      )
      _response_function_calls_warning_logged = True
    function_calls = [
        part.function_call
        for part in self.candidates[0].content.parts
        if part.function_call is not None
    ]

    return function_calls if function_calls else None

  @property
  def executable_code(self) -> Optional[str]:
    """Returns the executable code in the response.

    If there are multiple candidates, this returns the executable code from only
    the
    first one.
    """
    if (
        not self.candidates
        or not self.candidates[0].content
        or not self.candidates[0].content.parts
    ):
      return None
    global _response_executable_code_warning_logged
    if (
        len(self.candidates) > 1
        and not _response_executable_code_warning_logged
    ):
      logging.warning(
          'Warning: there are multiple candidates in the response, returning'
          ' executable code from the first one.'
      )
      _response_executable_code_warning_logged = True
    for part in self.candidates[0].content.parts:
      if part.executable_code is not None:
        return part.executable_code.code
    return None

  @property
  def code_execution_result(self) -> Optional[str]:
    """Returns the code execution result in the response.

    If there are multiple candidates, this returns the code execution result
    from only the
    first one.
    """
    if (
        not self.candidates
        or not self.candidates[0].content
        or not self.candidates[0].content.parts
    ):
      return None
    global _response_code_execution_warning_logged
    if len(self.candidates) > 1 and not _response_code_execution_warning_logged:
      logging.warning(
          'Warning: there are multiple candidates in the response, returning'
          ' code execution result from the first one.'
      )
      _response_code_execution_warning_logged = True
    for part in self.candidates[0].content.parts:
      if part.code_execution_result is not None:
        return part.code_execution_result.output
    return None

  @classmethod
  def _from_response(
      cls: typing.Type[T],
      *,
      response: dict[str, object],
      kwargs: dict[str, object],
  ) -> T:
    result = super()._from_response(response=response, kwargs=kwargs)

    # Handles response schema.
    response_schema = _common.get_value_by_path(
        kwargs, ['config', 'response_schema']
    )
    # Handles response_json_schema. Backend will throw error if both
    # response_schema and response_json_schema are set.
    if response_schema is None:
      response_schema = _common.get_value_by_path(
          kwargs, ['config', 'response_json_schema']
      )
    if (
        inspect.isclass(response_schema)
        and not (
            isinstance(response_schema, builtin_types.GenericAlias)
        )  # Needed for Python 3.9 and 3.10
        and issubclass(response_schema, pydantic.BaseModel)
    ):
      # Pydantic schema.
      try:
        result_text = result._get_text()
        if result_text is not None:
          result.parsed = response_schema.model_validate_json(result_text)
      # may not be a valid json per stream response
      except pydantic.ValidationError:
        pass
      except json.decoder.JSONDecodeError:
        pass
    elif (
        isinstance(response_schema, EnumMeta) and result._get_text() is not None
    ):
      # Enum with "application/json" returns response in double quotes.
      result_text = result._get_text()
      if result_text is None:
        raise ValueError('Response is empty.')
      enum_value = result_text.replace('"', '')
      try:
        result.parsed = response_schema(enum_value)
        if (
            hasattr(response_schema, '__name__')
            and response_schema.__name__ == 'PlaceholderLiteralEnum'
        ):
          result.parsed = str(response_schema(enum_value).name)  # type: ignore
      except ValueError:
        pass
    elif isinstance(response_schema, builtin_types.GenericAlias) or isinstance(
        response_schema, type
    ):

      class Placeholder(pydantic.BaseModel):
        placeholder: response_schema  # type: ignore[valid-type]

      try:
        result_text = result._get_text()
        if result_text is not None:
          parsed = {'placeholder': json.loads(result_text)}
          placeholder = Placeholder.model_validate(parsed)
          result.parsed = placeholder.placeholder
      except json.decoder.JSONDecodeError:
        pass
      except pydantic.ValidationError:
        pass

    elif isinstance(response_schema, dict) or isinstance(
        response_schema, Schema
    ):
      # With just the Schema, we don't know what pydantic model the user would
      # want the result converted to. So just return json.
      # JSON schema.
      try:
        result_text = result._get_text()
        if result_text is not None:
          result.parsed = json.loads(result_text)
      # may not be a valid json per stream response
      except json.decoder.JSONDecodeError:
        pass
    elif typing.get_origin(response_schema) in _UNION_TYPES:
      # Union schema.
      union_types = typing.get_args(response_schema)
      for union_type in union_types:
        if issubclass(union_type, pydantic.BaseModel):
          try:
            result_text = result._get_text()
            if result_text is not None:

              class Placeholder(pydantic.BaseModel):  # type: ignore[no-redef]
                placeholder: response_schema  # type: ignore[valid-type]

              parsed = {'placeholder': json.loads(result_text)}
              placeholder = Placeholder.model_validate(parsed)
              result.parsed = placeholder.placeholder
          except json.decoder.JSONDecodeError:
            pass
          except pydantic.ValidationError:
            pass
        else:
          try:
            result_text = result._get_text()
            if result_text is not None:
              result.parsed = json.loads(result_text)
          # may not be a valid json per stream response
          except json.decoder.JSONDecodeError:
            pass

    return result


class GenerateContentResponseDict(TypedDict, total=False):
  """Response message for PredictionService.GenerateContent."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  candidates: Optional[list[CandidateDict]]
  """Response variations returned by the model.
      """

  create_time: Optional[datetime.datetime]
  """Timestamp when the request is made to the server.
      """

  model_version: Optional[str]
  """Output only. The model version used to generate the response."""

  prompt_feedback: Optional[GenerateContentResponsePromptFeedbackDict]
  """Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."""

  response_id: Optional[str]
  """Output only. response_id is used to identify each response. It is the encoding of the event_id."""

  usage_metadata: Optional[GenerateContentResponseUsageMetadataDict]
  """Usage metadata about the response(s)."""


GenerateContentResponseOrDict = Union[
    GenerateContentResponse, GenerateContentResponseDict
]


class EmbedContentConfig(_common.BaseModel):
  """Optional parameters for the embed_content method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  task_type: Optional[str] = Field(
      default=None,
      description="""Type of task for which the embedding will be used.
      """,
  )
  title: Optional[str] = Field(
      default=None,
      description="""Title for the text. Only applicable when TaskType is
      `RETRIEVAL_DOCUMENT`.
      """,
  )
  output_dimensionality: Optional[int] = Field(
      default=None,
      description="""Reduced dimension for the output embedding. If set,
      excessive values in the output embedding are truncated from the end.
      Supported by newer models since 2024 only. You cannot set this value if
      using the earlier model (`models/embedding-001`).
      """,
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""Vertex API only. The MIME type of the input.
      """,
  )
  auto_truncate: Optional[bool] = Field(
      default=None,
      description="""Vertex API only. Whether to silently truncate inputs longer than
      the max sequence length. If this option is set to false, oversized inputs
      will lead to an INVALID_ARGUMENT error, similar to other text APIs.
      """,
  )


class EmbedContentConfigDict(TypedDict, total=False):
  """Optional parameters for the embed_content method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  task_type: Optional[str]
  """Type of task for which the embedding will be used.
      """

  title: Optional[str]
  """Title for the text. Only applicable when TaskType is
      `RETRIEVAL_DOCUMENT`.
      """

  output_dimensionality: Optional[int]
  """Reduced dimension for the output embedding. If set,
      excessive values in the output embedding are truncated from the end.
      Supported by newer models since 2024 only. You cannot set this value if
      using the earlier model (`models/embedding-001`).
      """

  mime_type: Optional[str]
  """Vertex API only. The MIME type of the input.
      """

  auto_truncate: Optional[bool]
  """Vertex API only. Whether to silently truncate inputs longer than
      the max sequence length. If this option is set to false, oversized inputs
      will lead to an INVALID_ARGUMENT error, similar to other text APIs.
      """


EmbedContentConfigOrDict = Union[EmbedContentConfig, EmbedContentConfigDict]


class _EmbedContentParameters(_common.BaseModel):
  """Parameters for the embed_content method."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  contents: Optional[ContentListUnion] = Field(
      default=None,
      description="""The content to embed. Only the `parts.text` fields will be counted.
      """,
  )
  config: Optional[EmbedContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional parameters.
      """,
  )


class _EmbedContentParametersDict(TypedDict, total=False):
  """Parameters for the embed_content method."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  contents: Optional[ContentListUnionDict]
  """The content to embed. Only the `parts.text` fields will be counted.
      """

  config: Optional[EmbedContentConfigDict]
  """Configuration that contains optional parameters.
      """


_EmbedContentParametersOrDict = Union[
    _EmbedContentParameters, _EmbedContentParametersDict
]


class ContentEmbeddingStatistics(_common.BaseModel):
  """Statistics of the input text associated with the result of content embedding."""

  truncated: Optional[bool] = Field(
      default=None,
      description="""Vertex API only. If the input text was truncated due to having
      a length longer than the allowed maximum input.
      """,
  )
  token_count: Optional[float] = Field(
      default=None,
      description="""Vertex API only. Number of tokens of the input text.
      """,
  )


class ContentEmbeddingStatisticsDict(TypedDict, total=False):
  """Statistics of the input text associated with the result of content embedding."""

  truncated: Optional[bool]
  """Vertex API only. If the input text was truncated due to having
      a length longer than the allowed maximum input.
      """

  token_count: Optional[float]
  """Vertex API only. Number of tokens of the input text.
      """


ContentEmbeddingStatisticsOrDict = Union[
    ContentEmbeddingStatistics, ContentEmbeddingStatisticsDict
]


class ContentEmbedding(_common.BaseModel):
  """The embedding generated from an input content."""

  values: Optional[list[float]] = Field(
      default=None,
      description="""A list of floats representing an embedding.
      """,
  )
  statistics: Optional[ContentEmbeddingStatistics] = Field(
      default=None,
      description="""Vertex API only. Statistics of the input text associated with this
      embedding.
      """,
  )


class ContentEmbeddingDict(TypedDict, total=False):
  """The embedding generated from an input content."""

  values: Optional[list[float]]
  """A list of floats representing an embedding.
      """

  statistics: Optional[ContentEmbeddingStatisticsDict]
  """Vertex API only. Statistics of the input text associated with this
      embedding.
      """


ContentEmbeddingOrDict = Union[ContentEmbedding, ContentEmbeddingDict]


class EmbedContentMetadata(_common.BaseModel):
  """Request-level metadata for the Vertex Embed Content API."""

  billable_character_count: Optional[int] = Field(
      default=None,
      description="""Vertex API only. The total number of billable characters included
      in the request.
      """,
  )


class EmbedContentMetadataDict(TypedDict, total=False):
  """Request-level metadata for the Vertex Embed Content API."""

  billable_character_count: Optional[int]
  """Vertex API only. The total number of billable characters included
      in the request.
      """


EmbedContentMetadataOrDict = Union[
    EmbedContentMetadata, EmbedContentMetadataDict
]


class EmbedContentResponse(_common.BaseModel):
  """Response for the embed_content method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  embeddings: Optional[list[ContentEmbedding]] = Field(
      default=None,
      description="""The embeddings for each request, in the same order as provided in
      the batch request.
      """,
  )
  metadata: Optional[EmbedContentMetadata] = Field(
      default=None,
      description="""Vertex API only. Metadata about the request.
      """,
  )


class EmbedContentResponseDict(TypedDict, total=False):
  """Response for the embed_content method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  embeddings: Optional[list[ContentEmbeddingDict]]
  """The embeddings for each request, in the same order as provided in
      the batch request.
      """

  metadata: Optional[EmbedContentMetadataDict]
  """Vertex API only. Metadata about the request.
      """


EmbedContentResponseOrDict = Union[
    EmbedContentResponse, EmbedContentResponseDict
]


class GenerateImagesConfig(_common.BaseModel):
  """The config for generating an images."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""Cloud Storage URI used to store the generated images.""",
  )
  negative_prompt: Optional[str] = Field(
      default=None,
      description="""Description of what to discourage in the generated images.""",
  )
  number_of_images: Optional[int] = Field(
      default=None, description="""Number of images to generate."""
  )
  aspect_ratio: Optional[str] = Field(
      default=None,
      description="""Aspect ratio of the generated images. Supported values are
      "1:1", "3:4", "4:3", "9:16", and "16:9".""",
  )
  guidance_scale: Optional[float] = Field(
      default=None,
      description="""Controls how much the model adheres to the text prompt. Large
      values increase output and prompt alignment, but may compromise image
      quality.""",
  )
  seed: Optional[int] = Field(
      default=None,
      description="""Random seed for image generation. This is not available when
      ``add_watermark`` is set to true.""",
  )
  safety_filter_level: Optional[SafetyFilterLevel] = Field(
      default=None, description="""Filter level for safety filtering."""
  )
  person_generation: Optional[PersonGeneration] = Field(
      default=None, description="""Allows generation of people by the model."""
  )
  include_safety_attributes: Optional[bool] = Field(
      default=None,
      description="""Whether to report the safety scores of each generated image and
      the positive prompt in the response.""",
  )
  include_rai_reason: Optional[bool] = Field(
      default=None,
      description="""Whether to include the Responsible AI filter reason if the image
      is filtered out of the response.""",
  )
  language: Optional[ImagePromptLanguage] = Field(
      default=None, description="""Language of the text in the prompt."""
  )
  output_mime_type: Optional[str] = Field(
      default=None, description="""MIME type of the generated image."""
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""Compression quality of the generated image (for ``image/jpeg``
      only).""",
  )
  add_watermark: Optional[bool] = Field(
      default=None,
      description="""Whether to add a watermark to the generated images.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )
  image_size: Optional[str] = Field(
      default=None,
      description="""The size of the largest dimension of the generated image.
      Supported sizes are 1K and 2K (not supported for Imagen 3 models).""",
  )
  enhance_prompt: Optional[bool] = Field(
      default=None, description="""Whether to use the prompt rewriting logic."""
  )


class GenerateImagesConfigDict(TypedDict, total=False):
  """The config for generating an images."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  output_gcs_uri: Optional[str]
  """Cloud Storage URI used to store the generated images."""

  negative_prompt: Optional[str]
  """Description of what to discourage in the generated images."""

  number_of_images: Optional[int]
  """Number of images to generate."""

  aspect_ratio: Optional[str]
  """Aspect ratio of the generated images. Supported values are
      "1:1", "3:4", "4:3", "9:16", and "16:9"."""

  guidance_scale: Optional[float]
  """Controls how much the model adheres to the text prompt. Large
      values increase output and prompt alignment, but may compromise image
      quality."""

  seed: Optional[int]
  """Random seed for image generation. This is not available when
      ``add_watermark`` is set to true."""

  safety_filter_level: Optional[SafetyFilterLevel]
  """Filter level for safety filtering."""

  person_generation: Optional[PersonGeneration]
  """Allows generation of people by the model."""

  include_safety_attributes: Optional[bool]
  """Whether to report the safety scores of each generated image and
      the positive prompt in the response."""

  include_rai_reason: Optional[bool]
  """Whether to include the Responsible AI filter reason if the image
      is filtered out of the response."""

  language: Optional[ImagePromptLanguage]
  """Language of the text in the prompt."""

  output_mime_type: Optional[str]
  """MIME type of the generated image."""

  output_compression_quality: Optional[int]
  """Compression quality of the generated image (for ``image/jpeg``
      only)."""

  add_watermark: Optional[bool]
  """Whether to add a watermark to the generated images."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""

  image_size: Optional[str]
  """The size of the largest dimension of the generated image.
      Supported sizes are 1K and 2K (not supported for Imagen 3 models)."""

  enhance_prompt: Optional[bool]
  """Whether to use the prompt rewriting logic."""


GenerateImagesConfigOrDict = Union[
    GenerateImagesConfig, GenerateImagesConfigDict
]


class _GenerateImagesParameters(_common.BaseModel):
  """The parameters for generating images."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  prompt: Optional[str] = Field(
      default=None,
      description="""Text prompt that typically describes the images to output.
      """,
  )
  config: Optional[GenerateImagesConfig] = Field(
      default=None,
      description="""Configuration for generating images.
      """,
  )


class _GenerateImagesParametersDict(TypedDict, total=False):
  """The parameters for generating images."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  prompt: Optional[str]
  """Text prompt that typically describes the images to output.
      """

  config: Optional[GenerateImagesConfigDict]
  """Configuration for generating images.
      """


_GenerateImagesParametersOrDict = Union[
    _GenerateImagesParameters, _GenerateImagesParametersDict
]


class Image(_common.BaseModel):
  """An image."""

  gcs_uri: Optional[str] = Field(
      default=None,
      description="""The Cloud Storage URI of the image. ``Image`` can contain a value
      for this field or the ``image_bytes`` field but not both.""",
  )
  image_bytes: Optional[bytes] = Field(
      default=None,
      description="""The image bytes data. ``Image`` can contain a value for this field
      or the ``gcs_uri`` field but not both.""",
  )
  mime_type: Optional[str] = Field(
      default=None, description="""The MIME type of the image."""
  )

  _loaded_image: Optional['PIL_Image'] = None

  """Image."""

  @classmethod
  def from_file(
      cls, *, location: str, mime_type: Optional[str] = None
  ) -> 'Image':
    """Lazy-loads an image from a local file or Google Cloud Storage.

    Args:
        location: The local path or Google Cloud Storage URI from which to load
          the image.
        mime_type: The MIME type of the image. If not provided, the MIME type
          will be automatically determined.

    Returns:
        A loaded image as an `Image` object.
    """
    import urllib
    import pathlib
    import mimetypes

    parsed_url = urllib.parse.urlparse(location)
    if (
        parsed_url.scheme == 'https'
        and parsed_url.netloc == 'storage.googleapis.com'
    ):
      parsed_url = parsed_url._replace(
          scheme='gs',
          netloc='',
          path=f'/{urllib.parse.unquote(parsed_url.path)}',
      )
      location = urllib.parse.urlunparse(parsed_url)

    if parsed_url.scheme == 'gs':
      return cls(gcs_uri=location)

    # Load image from local path
    image_bytes = pathlib.Path(location).read_bytes()

    if not mime_type:
      mime_type, _ = mimetypes.guess_type(location)
    image = cls(image_bytes=image_bytes, mime_type=mime_type)
    return image

  def show(self) -> None:
    """Shows the image.

    This method only works in a notebook environment.
    """
    in_notebook = 'ipykernel' in sys.modules
    if in_notebook:
      try:
        from IPython import display as IPython_display
      except ImportError:
        IPython_display = None

      if IPython_display:
        IPython_display.display(self._pil_image)
    else:
      img = self._pil_image
      if img is not None:
        img.show()

  @property
  def _pil_image(self) -> Optional['PIL_Image']:
    PIL_Image: Optional[builtin_types.ModuleType]
    try:
      from PIL import Image as PIL_Image
    except ImportError:
      PIL_Image = None
    import io

    if self._loaded_image is None:
      if not PIL_Image:
        raise RuntimeError(
            'The PIL module is not available. Please install the Pillow'
            ' package. `pip install pillow`'
        )
      if self.image_bytes is None:
        raise ValueError('The image bytes are not set.')
      self._loaded_image = PIL_Image.open(io.BytesIO(self.image_bytes))
    return self._loaded_image

  def save(self, location: str) -> None:
    """Saves the image to a file.

    Args:
        location: Local path where to save the image.
    """
    import pathlib

    if self.image_bytes is None:
      raise ValueError('The image bytes are not set.')
    pathlib.Path(location).write_bytes(self.image_bytes)


JOB_STATES_SUCCEEDED_VERTEX = [
    'JOB_STATE_SUCCEEDED',
]

JOB_STATES_SUCCEEDED_MLDEV = [
    'ACTIVE',
]

JOB_STATES_SUCCEEDED = JOB_STATES_SUCCEEDED_VERTEX + JOB_STATES_SUCCEEDED_MLDEV


JOB_STATES_ENDED_VERTEX = [
    'JOB_STATE_SUCCEEDED',
    'JOB_STATE_FAILED',
    'JOB_STATE_CANCELLED',
    'JOB_STATE_EXPIRED',
]

JOB_STATES_ENDED_MLDEV = [
    'ACTIVE',
    'FAILED',
]

JOB_STATES_ENDED = JOB_STATES_ENDED_VERTEX + JOB_STATES_ENDED_MLDEV


class ImageDict(TypedDict, total=False):
  """An image."""

  gcs_uri: Optional[str]
  """The Cloud Storage URI of the image. ``Image`` can contain a value
      for this field or the ``image_bytes`` field but not both."""

  image_bytes: Optional[bytes]
  """The image bytes data. ``Image`` can contain a value for this field
      or the ``gcs_uri`` field but not both."""

  mime_type: Optional[str]
  """The MIME type of the image."""


ImageOrDict = Union[Image, ImageDict]


class SafetyAttributes(_common.BaseModel):
  """Safety attributes of a GeneratedImage or the user-provided prompt."""

  categories: Optional[list[str]] = Field(
      default=None, description="""List of RAI categories."""
  )
  scores: Optional[list[float]] = Field(
      default=None, description="""List of scores of each categories."""
  )
  content_type: Optional[str] = Field(
      default=None, description="""Internal use only."""
  )


class SafetyAttributesDict(TypedDict, total=False):
  """Safety attributes of a GeneratedImage or the user-provided prompt."""

  categories: Optional[list[str]]
  """List of RAI categories."""

  scores: Optional[list[float]]
  """List of scores of each categories."""

  content_type: Optional[str]
  """Internal use only."""


SafetyAttributesOrDict = Union[SafetyAttributes, SafetyAttributesDict]


class GeneratedImage(_common.BaseModel):
  """An output image."""

  image: Optional[Image] = Field(
      default=None, description="""The output image data."""
  )
  rai_filtered_reason: Optional[str] = Field(
      default=None,
      description="""Responsible AI filter reason if the image is filtered out of the
      response.""",
  )
  safety_attributes: Optional[SafetyAttributes] = Field(
      default=None,
      description="""Safety attributes of the image. Lists of RAI categories and their
      scores of each content.""",
  )
  enhanced_prompt: Optional[str] = Field(
      default=None,
      description="""The rewritten prompt used for the image generation if the prompt
      enhancer is enabled.""",
  )


class GeneratedImageDict(TypedDict, total=False):
  """An output image."""

  image: Optional[ImageDict]
  """The output image data."""

  rai_filtered_reason: Optional[str]
  """Responsible AI filter reason if the image is filtered out of the
      response."""

  safety_attributes: Optional[SafetyAttributesDict]
  """Safety attributes of the image. Lists of RAI categories and their
      scores of each content."""

  enhanced_prompt: Optional[str]
  """The rewritten prompt used for the image generation if the prompt
      enhancer is enabled."""


GeneratedImageOrDict = Union[GeneratedImage, GeneratedImageDict]


class GenerateImagesResponse(_common.BaseModel):
  """The output images response."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  generated_images: Optional[list[GeneratedImage]] = Field(
      default=None, description="""List of generated images."""
  )
  positive_prompt_safety_attributes: Optional[SafetyAttributes] = Field(
      default=None,
      description="""Safety attributes of the positive prompt. Only populated if
      ``include_safety_attributes`` is set to True.""",
  )

  @property
  def images(self) -> list[Optional[Image]]:
    """Returns the list of all generated images.

    A convenience method for accessing the images. Some attributes of the
    generated image are only available through the ``GeneratedImage`` object.
    """
    if not self.generated_images:
      return []
    return [generated_image.image for generated_image in self.generated_images]


class GenerateImagesResponseDict(TypedDict, total=False):
  """The output images response."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  generated_images: Optional[list[GeneratedImageDict]]
  """List of generated images."""

  positive_prompt_safety_attributes: Optional[SafetyAttributesDict]
  """Safety attributes of the positive prompt. Only populated if
      ``include_safety_attributes`` is set to True."""


GenerateImagesResponseOrDict = Union[
    GenerateImagesResponse, GenerateImagesResponseDict
]


class MaskReferenceConfig(_common.BaseModel):
  """Configuration for a Mask reference image."""

  mask_mode: Optional[MaskReferenceMode] = Field(
      default=None,
      description="""Prompts the model to generate a mask instead of you needing to
      provide one (unless MASK_MODE_USER_PROVIDED is used).""",
  )
  segmentation_classes: Optional[list[int]] = Field(
      default=None,
      description="""A list of up to 5 class ids to use for semantic segmentation.
      Automatically creates an image mask based on specific objects.""",
  )
  mask_dilation: Optional[float] = Field(
      default=None,
      description="""Dilation percentage of the mask provided.
      Float between 0 and 1.""",
  )


class MaskReferenceConfigDict(TypedDict, total=False):
  """Configuration for a Mask reference image."""

  mask_mode: Optional[MaskReferenceMode]
  """Prompts the model to generate a mask instead of you needing to
      provide one (unless MASK_MODE_USER_PROVIDED is used)."""

  segmentation_classes: Optional[list[int]]
  """A list of up to 5 class ids to use for semantic segmentation.
      Automatically creates an image mask based on specific objects."""

  mask_dilation: Optional[float]
  """Dilation percentage of the mask provided.
      Float between 0 and 1."""


MaskReferenceConfigOrDict = Union[MaskReferenceConfig, MaskReferenceConfigDict]


class ControlReferenceConfig(_common.BaseModel):
  """Configuration for a Control reference image."""

  control_type: Optional[ControlReferenceType] = Field(
      default=None,
      description="""The type of control reference image to use.""",
  )
  enable_control_image_computation: Optional[bool] = Field(
      default=None,
      description="""Defaults to False. When set to True, the control image will be
      computed by the model based on the control type. When set to False,
      the control image must be provided by the user.""",
  )


class ControlReferenceConfigDict(TypedDict, total=False):
  """Configuration for a Control reference image."""

  control_type: Optional[ControlReferenceType]
  """The type of control reference image to use."""

  enable_control_image_computation: Optional[bool]
  """Defaults to False. When set to True, the control image will be
      computed by the model based on the control type. When set to False,
      the control image must be provided by the user."""


ControlReferenceConfigOrDict = Union[
    ControlReferenceConfig, ControlReferenceConfigDict
]


class StyleReferenceConfig(_common.BaseModel):
  """Configuration for a Style reference image."""

  style_description: Optional[str] = Field(
      default=None,
      description="""A text description of the style to use for the generated image.""",
  )


class StyleReferenceConfigDict(TypedDict, total=False):
  """Configuration for a Style reference image."""

  style_description: Optional[str]
  """A text description of the style to use for the generated image."""


StyleReferenceConfigOrDict = Union[
    StyleReferenceConfig, StyleReferenceConfigDict
]


class SubjectReferenceConfig(_common.BaseModel):
  """Configuration for a Subject reference image."""

  subject_type: Optional[SubjectReferenceType] = Field(
      default=None,
      description="""The subject type of a subject reference image.""",
  )
  subject_description: Optional[str] = Field(
      default=None, description="""Subject description for the image."""
  )


class SubjectReferenceConfigDict(TypedDict, total=False):
  """Configuration for a Subject reference image."""

  subject_type: Optional[SubjectReferenceType]
  """The subject type of a subject reference image."""

  subject_description: Optional[str]
  """Subject description for the image."""


SubjectReferenceConfigOrDict = Union[
    SubjectReferenceConfig, SubjectReferenceConfigDict
]


class _ReferenceImageAPI(_common.BaseModel):
  """Private class that represents a Reference image that is sent to API."""

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )
  mask_image_config: Optional[MaskReferenceConfig] = Field(
      default=None,
      description="""Configuration for the mask reference image.""",
  )
  control_image_config: Optional[ControlReferenceConfig] = Field(
      default=None,
      description="""Configuration for the control reference image.""",
  )
  style_image_config: Optional[StyleReferenceConfig] = Field(
      default=None,
      description="""Configuration for the style reference image.""",
  )
  subject_image_config: Optional[SubjectReferenceConfig] = Field(
      default=None,
      description="""Configuration for the subject reference image.""",
  )


class _ReferenceImageAPIDict(TypedDict, total=False):
  """Private class that represents a Reference image that is sent to API."""

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""

  mask_image_config: Optional[MaskReferenceConfigDict]
  """Configuration for the mask reference image."""

  control_image_config: Optional[ControlReferenceConfigDict]
  """Configuration for the control reference image."""

  style_image_config: Optional[StyleReferenceConfigDict]
  """Configuration for the style reference image."""

  subject_image_config: Optional[SubjectReferenceConfigDict]
  """Configuration for the subject reference image."""


_ReferenceImageAPIOrDict = Union[_ReferenceImageAPI, _ReferenceImageAPIDict]


class EditImageConfig(_common.BaseModel):
  """Configuration for editing an image."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""Cloud Storage URI used to store the generated images.""",
  )
  negative_prompt: Optional[str] = Field(
      default=None,
      description="""Description of what to discourage in the generated images.""",
  )
  number_of_images: Optional[int] = Field(
      default=None, description="""Number of images to generate."""
  )
  aspect_ratio: Optional[str] = Field(
      default=None,
      description="""Aspect ratio of the generated images. Supported values are
      "1:1", "3:4", "4:3", "9:16", and "16:9".""",
  )
  guidance_scale: Optional[float] = Field(
      default=None,
      description="""Controls how much the model adheres to the text prompt. Large
      values increase output and prompt alignment, but may compromise image
      quality.""",
  )
  seed: Optional[int] = Field(
      default=None,
      description="""Random seed for image generation. This is not available when
      ``add_watermark`` is set to true.""",
  )
  safety_filter_level: Optional[SafetyFilterLevel] = Field(
      default=None, description="""Filter level for safety filtering."""
  )
  person_generation: Optional[PersonGeneration] = Field(
      default=None, description="""Allows generation of people by the model."""
  )
  include_safety_attributes: Optional[bool] = Field(
      default=None,
      description="""Whether to report the safety scores of each generated image and
      the positive prompt in the response.""",
  )
  include_rai_reason: Optional[bool] = Field(
      default=None,
      description="""Whether to include the Responsible AI filter reason if the image
      is filtered out of the response.""",
  )
  language: Optional[ImagePromptLanguage] = Field(
      default=None, description="""Language of the text in the prompt."""
  )
  output_mime_type: Optional[str] = Field(
      default=None, description="""MIME type of the generated image."""
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""Compression quality of the generated image (for ``image/jpeg``
      only).""",
  )
  add_watermark: Optional[bool] = Field(
      default=None,
      description="""Whether to add a watermark to the generated images.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )
  edit_mode: Optional[EditMode] = Field(
      default=None,
      description="""Describes the editing mode for the request.""",
  )
  base_steps: Optional[int] = Field(
      default=None,
      description="""The number of sampling steps. A higher value has better image
      quality, while a lower value has better latency.""",
  )


class EditImageConfigDict(TypedDict, total=False):
  """Configuration for editing an image."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  output_gcs_uri: Optional[str]
  """Cloud Storage URI used to store the generated images."""

  negative_prompt: Optional[str]
  """Description of what to discourage in the generated images."""

  number_of_images: Optional[int]
  """Number of images to generate."""

  aspect_ratio: Optional[str]
  """Aspect ratio of the generated images. Supported values are
      "1:1", "3:4", "4:3", "9:16", and "16:9"."""

  guidance_scale: Optional[float]
  """Controls how much the model adheres to the text prompt. Large
      values increase output and prompt alignment, but may compromise image
      quality."""

  seed: Optional[int]
  """Random seed for image generation. This is not available when
      ``add_watermark`` is set to true."""

  safety_filter_level: Optional[SafetyFilterLevel]
  """Filter level for safety filtering."""

  person_generation: Optional[PersonGeneration]
  """Allows generation of people by the model."""

  include_safety_attributes: Optional[bool]
  """Whether to report the safety scores of each generated image and
      the positive prompt in the response."""

  include_rai_reason: Optional[bool]
  """Whether to include the Responsible AI filter reason if the image
      is filtered out of the response."""

  language: Optional[ImagePromptLanguage]
  """Language of the text in the prompt."""

  output_mime_type: Optional[str]
  """MIME type of the generated image."""

  output_compression_quality: Optional[int]
  """Compression quality of the generated image (for ``image/jpeg``
      only)."""

  add_watermark: Optional[bool]
  """Whether to add a watermark to the generated images."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""

  edit_mode: Optional[EditMode]
  """Describes the editing mode for the request."""

  base_steps: Optional[int]
  """The number of sampling steps. A higher value has better image
      quality, while a lower value has better latency."""


EditImageConfigOrDict = Union[EditImageConfig, EditImageConfigDict]


class _EditImageParameters(_common.BaseModel):
  """Parameters for the request to edit an image."""

  model: Optional[str] = Field(
      default=None, description="""The model to use."""
  )
  prompt: Optional[str] = Field(
      default=None,
      description="""A text description of the edit to apply to the image.""",
  )
  reference_images: Optional[list[_ReferenceImageAPI]] = Field(
      default=None, description="""The reference images for editing."""
  )
  config: Optional[EditImageConfig] = Field(
      default=None, description="""Configuration for editing."""
  )


class _EditImageParametersDict(TypedDict, total=False):
  """Parameters for the request to edit an image."""

  model: Optional[str]
  """The model to use."""

  prompt: Optional[str]
  """A text description of the edit to apply to the image."""

  reference_images: Optional[list[_ReferenceImageAPIDict]]
  """The reference images for editing."""

  config: Optional[EditImageConfigDict]
  """Configuration for editing."""


_EditImageParametersOrDict = Union[
    _EditImageParameters, _EditImageParametersDict
]


class EditImageResponse(_common.BaseModel):
  """Response for the request to edit an image."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  generated_images: Optional[list[GeneratedImage]] = Field(
      default=None, description="""Generated images."""
  )


class EditImageResponseDict(TypedDict, total=False):
  """Response for the request to edit an image."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  generated_images: Optional[list[GeneratedImageDict]]
  """Generated images."""


EditImageResponseOrDict = Union[EditImageResponse, EditImageResponseDict]


class _UpscaleImageAPIConfig(_common.BaseModel):
  """Internal API config for UpscaleImage.

  These fields require default values sent to the API which are not intended
  to be modifiable or exposed to users in the SDK method.
  """

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""Cloud Storage URI used to store the generated images.""",
  )
  safety_filter_level: Optional[SafetyFilterLevel] = Field(
      default=None, description="""Filter level for safety filtering."""
  )
  person_generation: Optional[PersonGeneration] = Field(
      default=None, description="""Allows generation of people by the model."""
  )
  include_rai_reason: Optional[bool] = Field(
      default=None,
      description="""Whether to include a reason for filtered-out images in the
      response.""",
  )
  output_mime_type: Optional[str] = Field(
      default=None,
      description="""The image format that the output should be saved as.""",
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""The level of compression. Only applicable if the
      ``output_mime_type`` is ``image/jpeg``.""",
  )
  enhance_input_image: Optional[bool] = Field(
      default=None,
      description="""Whether to add an image enhancing step before upscaling.
      It is expected to suppress the noise and JPEG compression artifacts
      from the input image.""",
  )
  image_preservation_factor: Optional[float] = Field(
      default=None,
      description="""With a higher image preservation factor, the original image
      pixels are more respected. With a lower image preservation factor, the
      output image will have be more different from the input image, but
      with finer details and less noise.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )
  number_of_images: Optional[int] = Field(default=None, description="""""")
  mode: Optional[str] = Field(default=None, description="""""")


class _UpscaleImageAPIConfigDict(TypedDict, total=False):
  """Internal API config for UpscaleImage.

  These fields require default values sent to the API which are not intended
  to be modifiable or exposed to users in the SDK method.
  """

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  output_gcs_uri: Optional[str]
  """Cloud Storage URI used to store the generated images."""

  safety_filter_level: Optional[SafetyFilterLevel]
  """Filter level for safety filtering."""

  person_generation: Optional[PersonGeneration]
  """Allows generation of people by the model."""

  include_rai_reason: Optional[bool]
  """Whether to include a reason for filtered-out images in the
      response."""

  output_mime_type: Optional[str]
  """The image format that the output should be saved as."""

  output_compression_quality: Optional[int]
  """The level of compression. Only applicable if the
      ``output_mime_type`` is ``image/jpeg``."""

  enhance_input_image: Optional[bool]
  """Whether to add an image enhancing step before upscaling.
      It is expected to suppress the noise and JPEG compression artifacts
      from the input image."""

  image_preservation_factor: Optional[float]
  """With a higher image preservation factor, the original image
      pixels are more respected. With a lower image preservation factor, the
      output image will have be more different from the input image, but
      with finer details and less noise."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""

  number_of_images: Optional[int]
  """"""

  mode: Optional[str]
  """"""


_UpscaleImageAPIConfigOrDict = Union[
    _UpscaleImageAPIConfig, _UpscaleImageAPIConfigDict
]


class _UpscaleImageAPIParameters(_common.BaseModel):
  """API parameters for UpscaleImage."""

  model: Optional[str] = Field(
      default=None, description="""The model to use."""
  )
  image: Optional[Image] = Field(
      default=None, description="""The input image to upscale."""
  )
  upscale_factor: Optional[str] = Field(
      default=None,
      description="""The factor to upscale the image (x2 or x4).""",
  )
  config: Optional[_UpscaleImageAPIConfig] = Field(
      default=None, description="""Configuration for upscaling."""
  )


class _UpscaleImageAPIParametersDict(TypedDict, total=False):
  """API parameters for UpscaleImage."""

  model: Optional[str]
  """The model to use."""

  image: Optional[ImageDict]
  """The input image to upscale."""

  upscale_factor: Optional[str]
  """The factor to upscale the image (x2 or x4)."""

  config: Optional[_UpscaleImageAPIConfigDict]
  """Configuration for upscaling."""


_UpscaleImageAPIParametersOrDict = Union[
    _UpscaleImageAPIParameters, _UpscaleImageAPIParametersDict
]


class UpscaleImageResponse(_common.BaseModel):

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  generated_images: Optional[list[GeneratedImage]] = Field(
      default=None, description="""Generated images."""
  )


class UpscaleImageResponseDict(TypedDict, total=False):

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  generated_images: Optional[list[GeneratedImageDict]]
  """Generated images."""


UpscaleImageResponseOrDict = Union[
    UpscaleImageResponse, UpscaleImageResponseDict
]


class ProductImage(_common.BaseModel):
  """An image of the product."""

  product_image: Optional[Image] = Field(
      default=None,
      description="""An image of the product to be recontextualized.""",
  )


class ProductImageDict(TypedDict, total=False):
  """An image of the product."""

  product_image: Optional[ImageDict]
  """An image of the product to be recontextualized."""


ProductImageOrDict = Union[ProductImage, ProductImageDict]


class RecontextImageSource(_common.BaseModel):
  """A set of source input(s) for image recontextualization."""

  prompt: Optional[str] = Field(
      default=None,
      description="""A text prompt for guiding the model during image
      recontextualization. Not supported for Virtual Try-On.""",
  )
  person_image: Optional[Image] = Field(
      default=None,
      description="""Image of the person or subject who will be wearing the
      product(s).""",
  )
  product_images: Optional[list[ProductImage]] = Field(
      default=None, description="""A list of product images."""
  )


class RecontextImageSourceDict(TypedDict, total=False):
  """A set of source input(s) for image recontextualization."""

  prompt: Optional[str]
  """A text prompt for guiding the model during image
      recontextualization. Not supported for Virtual Try-On."""

  person_image: Optional[ImageDict]
  """Image of the person or subject who will be wearing the
      product(s)."""

  product_images: Optional[list[ProductImageDict]]
  """A list of product images."""


RecontextImageSourceOrDict = Union[
    RecontextImageSource, RecontextImageSourceDict
]


class RecontextImageConfig(_common.BaseModel):
  """Configuration for recontextualizing an image."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  number_of_images: Optional[int] = Field(
      default=None, description="""Number of images to generate."""
  )
  base_steps: Optional[int] = Field(
      default=None,
      description="""The number of sampling steps. A higher value has better image
      quality, while a lower value has better latency.""",
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""Cloud Storage URI used to store the generated images.""",
  )
  seed: Optional[int] = Field(
      default=None, description="""Random seed for image generation."""
  )
  safety_filter_level: Optional[SafetyFilterLevel] = Field(
      default=None, description="""Filter level for safety filtering."""
  )
  person_generation: Optional[PersonGeneration] = Field(
      default=None,
      description="""Whether allow to generate person images, and restrict to specific
      ages.""",
  )
  add_watermark: Optional[bool] = Field(
      default=None,
      description="""Whether to add a SynthID watermark to the generated images.""",
  )
  output_mime_type: Optional[str] = Field(
      default=None, description="""MIME type of the generated image."""
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""Compression quality of the generated image (for ``image/jpeg``
      only).""",
  )
  enhance_prompt: Optional[bool] = Field(
      default=None, description="""Whether to use the prompt rewriting logic."""
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )


class RecontextImageConfigDict(TypedDict, total=False):
  """Configuration for recontextualizing an image."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  number_of_images: Optional[int]
  """Number of images to generate."""

  base_steps: Optional[int]
  """The number of sampling steps. A higher value has better image
      quality, while a lower value has better latency."""

  output_gcs_uri: Optional[str]
  """Cloud Storage URI used to store the generated images."""

  seed: Optional[int]
  """Random seed for image generation."""

  safety_filter_level: Optional[SafetyFilterLevel]
  """Filter level for safety filtering."""

  person_generation: Optional[PersonGeneration]
  """Whether allow to generate person images, and restrict to specific
      ages."""

  add_watermark: Optional[bool]
  """Whether to add a SynthID watermark to the generated images."""

  output_mime_type: Optional[str]
  """MIME type of the generated image."""

  output_compression_quality: Optional[int]
  """Compression quality of the generated image (for ``image/jpeg``
      only)."""

  enhance_prompt: Optional[bool]
  """Whether to use the prompt rewriting logic."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""


RecontextImageConfigOrDict = Union[
    RecontextImageConfig, RecontextImageConfigDict
]


class _RecontextImageParameters(_common.BaseModel):
  """The parameters for recontextualizing an image."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  source: Optional[RecontextImageSource] = Field(
      default=None,
      description="""A set of source input(s) for image recontextualization.""",
  )
  config: Optional[RecontextImageConfig] = Field(
      default=None,
      description="""Configuration for image recontextualization.""",
  )


class _RecontextImageParametersDict(TypedDict, total=False):
  """The parameters for recontextualizing an image."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  source: Optional[RecontextImageSourceDict]
  """A set of source input(s) for image recontextualization."""

  config: Optional[RecontextImageConfigDict]
  """Configuration for image recontextualization."""


_RecontextImageParametersOrDict = Union[
    _RecontextImageParameters, _RecontextImageParametersDict
]


class RecontextImageResponse(_common.BaseModel):
  """The output images response."""

  generated_images: Optional[list[GeneratedImage]] = Field(
      default=None, description="""List of generated images."""
  )


class RecontextImageResponseDict(TypedDict, total=False):
  """The output images response."""

  generated_images: Optional[list[GeneratedImageDict]]
  """List of generated images."""


RecontextImageResponseOrDict = Union[
    RecontextImageResponse, RecontextImageResponseDict
]


class ScribbleImage(_common.BaseModel):
  """An image mask representing a brush scribble."""

  image: Optional[Image] = Field(
      default=None,
      description="""The brush scribble to guide segmentation. Valid for the interactive mode.""",
  )


class ScribbleImageDict(TypedDict, total=False):
  """An image mask representing a brush scribble."""

  image: Optional[ImageDict]
  """The brush scribble to guide segmentation. Valid for the interactive mode."""


ScribbleImageOrDict = Union[ScribbleImage, ScribbleImageDict]


class SegmentImageSource(_common.BaseModel):
  """A set of source input(s) for image segmentation."""

  prompt: Optional[str] = Field(
      default=None,
      description="""A text prompt for guiding the model during image segmentation.
      Required for prompt mode and semantic mode, disallowed for other modes.""",
  )
  image: Optional[Image] = Field(
      default=None, description="""The image to be segmented."""
  )
  scribble_image: Optional[ScribbleImage] = Field(
      default=None,
      description="""The brush scribble to guide segmentation.
      Required for the interactive mode, disallowed for other modes.""",
  )


class SegmentImageSourceDict(TypedDict, total=False):
  """A set of source input(s) for image segmentation."""

  prompt: Optional[str]
  """A text prompt for guiding the model during image segmentation.
      Required for prompt mode and semantic mode, disallowed for other modes."""

  image: Optional[ImageDict]
  """The image to be segmented."""

  scribble_image: Optional[ScribbleImageDict]
  """The brush scribble to guide segmentation.
      Required for the interactive mode, disallowed for other modes."""


SegmentImageSourceOrDict = Union[SegmentImageSource, SegmentImageSourceDict]


class SegmentImageConfig(_common.BaseModel):
  """Configuration for segmenting an image."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  mode: Optional[SegmentMode] = Field(
      default=None, description="""The segmentation mode to use."""
  )
  max_predictions: Optional[int] = Field(
      default=None,
      description="""The maximum number of predictions to return up to, by top
      confidence score.""",
  )
  confidence_threshold: Optional[float] = Field(
      default=None,
      description="""The confidence score threshold for the detections as a decimal
      value. Only predictions with a confidence score higher than this
      threshold will be returned.""",
  )
  mask_dilation: Optional[float] = Field(
      default=None,
      description="""A decimal value representing how much dilation to apply to the
      masks. 0 for no dilation. 1.0 means the masked area covers the whole
      image.""",
  )
  binary_color_threshold: Optional[float] = Field(
      default=None,
      description="""The binary color threshold to apply to the masks. The threshold
      can be set to a decimal value between 0 and 255 non-inclusive.
      Set to -1 for no binary color thresholding.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )


class SegmentImageConfigDict(TypedDict, total=False):
  """Configuration for segmenting an image."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  mode: Optional[SegmentMode]
  """The segmentation mode to use."""

  max_predictions: Optional[int]
  """The maximum number of predictions to return up to, by top
      confidence score."""

  confidence_threshold: Optional[float]
  """The confidence score threshold for the detections as a decimal
      value. Only predictions with a confidence score higher than this
      threshold will be returned."""

  mask_dilation: Optional[float]
  """A decimal value representing how much dilation to apply to the
      masks. 0 for no dilation. 1.0 means the masked area covers the whole
      image."""

  binary_color_threshold: Optional[float]
  """The binary color threshold to apply to the masks. The threshold
      can be set to a decimal value between 0 and 255 non-inclusive.
      Set to -1 for no binary color thresholding."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""


SegmentImageConfigOrDict = Union[SegmentImageConfig, SegmentImageConfigDict]


class _SegmentImageParameters(_common.BaseModel):
  """The parameters for segmenting an image."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  source: Optional[SegmentImageSource] = Field(
      default=None,
      description="""A set of source input(s) for image segmentation.""",
  )
  config: Optional[SegmentImageConfig] = Field(
      default=None, description="""Configuration for image segmentation."""
  )


class _SegmentImageParametersDict(TypedDict, total=False):
  """The parameters for segmenting an image."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  source: Optional[SegmentImageSourceDict]
  """A set of source input(s) for image segmentation."""

  config: Optional[SegmentImageConfigDict]
  """Configuration for image segmentation."""


_SegmentImageParametersOrDict = Union[
    _SegmentImageParameters, _SegmentImageParametersDict
]


class EntityLabel(_common.BaseModel):
  """An entity representing the segmented area."""

  label: Optional[str] = Field(
      default=None, description="""The label of the segmented entity."""
  )
  score: Optional[float] = Field(
      default=None,
      description="""The confidence score of the detected label.""",
  )


class EntityLabelDict(TypedDict, total=False):
  """An entity representing the segmented area."""

  label: Optional[str]
  """The label of the segmented entity."""

  score: Optional[float]
  """The confidence score of the detected label."""


EntityLabelOrDict = Union[EntityLabel, EntityLabelDict]


class GeneratedImageMask(_common.BaseModel):
  """A generated image mask."""

  mask: Optional[Image] = Field(
      default=None, description="""The generated image mask."""
  )
  labels: Optional[list[EntityLabel]] = Field(
      default=None,
      description="""The detected entities on the segmented area.""",
  )


class GeneratedImageMaskDict(TypedDict, total=False):
  """A generated image mask."""

  mask: Optional[ImageDict]
  """The generated image mask."""

  labels: Optional[list[EntityLabelDict]]
  """The detected entities on the segmented area."""


GeneratedImageMaskOrDict = Union[GeneratedImageMask, GeneratedImageMaskDict]


class SegmentImageResponse(_common.BaseModel):
  """The output images response."""

  generated_masks: Optional[list[GeneratedImageMask]] = Field(
      default=None,
      description="""List of generated image masks.
      """,
  )


class SegmentImageResponseDict(TypedDict, total=False):
  """The output images response."""

  generated_masks: Optional[list[GeneratedImageMaskDict]]
  """List of generated image masks.
      """


SegmentImageResponseOrDict = Union[
    SegmentImageResponse, SegmentImageResponseDict
]


class GetModelConfig(_common.BaseModel):
  """Optional parameters for models.get method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetModelConfigDict(TypedDict, total=False):
  """Optional parameters for models.get method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetModelConfigOrDict = Union[GetModelConfig, GetModelConfigDict]


class _GetModelParameters(_common.BaseModel):

  model: Optional[str] = Field(default=None, description="""""")
  config: Optional[GetModelConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _GetModelParametersDict(TypedDict, total=False):

  model: Optional[str]
  """"""

  config: Optional[GetModelConfigDict]
  """Optional parameters for the request."""


_GetModelParametersOrDict = Union[_GetModelParameters, _GetModelParametersDict]


class Endpoint(_common.BaseModel):
  """An endpoint where you deploy models."""

  name: Optional[str] = Field(
      default=None, description="""Resource name of the endpoint."""
  )
  deployed_model_id: Optional[str] = Field(
      default=None,
      description="""ID of the model that's deployed to the endpoint.""",
  )


class EndpointDict(TypedDict, total=False):
  """An endpoint where you deploy models."""

  name: Optional[str]
  """Resource name of the endpoint."""

  deployed_model_id: Optional[str]
  """ID of the model that's deployed to the endpoint."""


EndpointOrDict = Union[Endpoint, EndpointDict]


class TunedModelInfo(_common.BaseModel):
  """A tuned machine learning model."""

  base_model: Optional[str] = Field(
      default=None,
      description="""ID of the base model that you want to tune.""",
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Date and time when the base model was created.""",
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Date and time when the base model was last updated.""",
  )


class TunedModelInfoDict(TypedDict, total=False):
  """A tuned machine learning model."""

  base_model: Optional[str]
  """ID of the base model that you want to tune."""

  create_time: Optional[datetime.datetime]
  """Date and time when the base model was created."""

  update_time: Optional[datetime.datetime]
  """Date and time when the base model was last updated."""


TunedModelInfoOrDict = Union[TunedModelInfo, TunedModelInfoDict]


class Checkpoint(_common.BaseModel):
  """Describes the machine learning model version checkpoint."""

  checkpoint_id: Optional[str] = Field(
      default=None,
      description="""The ID of the checkpoint.
      """,
  )
  epoch: Optional[int] = Field(
      default=None,
      description="""The epoch of the checkpoint.
      """,
  )
  step: Optional[int] = Field(
      default=None,
      description="""The step of the checkpoint.
      """,
  )


class CheckpointDict(TypedDict, total=False):
  """Describes the machine learning model version checkpoint."""

  checkpoint_id: Optional[str]
  """The ID of the checkpoint.
      """

  epoch: Optional[int]
  """The epoch of the checkpoint.
      """

  step: Optional[int]
  """The step of the checkpoint.
      """


CheckpointOrDict = Union[Checkpoint, CheckpointDict]


class Model(_common.BaseModel):
  """A trained machine learning model."""

  name: Optional[str] = Field(
      default=None, description="""Resource name of the model."""
  )
  display_name: Optional[str] = Field(
      default=None, description="""Display name of the model."""
  )
  description: Optional[str] = Field(
      default=None, description="""Description of the model."""
  )
  version: Optional[str] = Field(
      default=None,
      description="""Version ID of the model. A new version is committed when a new
      model version is uploaded or trained under an existing model ID. The
      version ID is an auto-incrementing decimal number in string
      representation.""",
  )
  endpoints: Optional[list[Endpoint]] = Field(
      default=None,
      description="""List of deployed models created from this base model. Note that a
      model could have been deployed to endpoints in different locations.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""Labels with user-defined metadata to organize your models.""",
  )
  tuned_model_info: Optional[TunedModelInfo] = Field(
      default=None,
      description="""Information about the tuned model from the base model.""",
  )
  input_token_limit: Optional[int] = Field(
      default=None,
      description="""The maximum number of input tokens that the model can handle.""",
  )
  output_token_limit: Optional[int] = Field(
      default=None,
      description="""The maximum number of output tokens that the model can generate.""",
  )
  supported_actions: Optional[list[str]] = Field(
      default=None,
      description="""List of actions that are supported by the model.""",
  )
  default_checkpoint_id: Optional[str] = Field(
      default=None,
      description="""The default checkpoint id of a model version.
      """,
  )
  checkpoints: Optional[list[Checkpoint]] = Field(
      default=None, description="""The checkpoints of the model."""
  )
  temperature: Optional[float] = Field(
      default=None,
      description="""Temperature value used for sampling set when the dataset was saved.
      This value is used to tune the degree of randomness.""",
  )
  max_temperature: Optional[float] = Field(
      default=None,
      description="""The maximum temperature value used for sampling set when the
      dataset was saved. This value is used to tune the degree of randomness.""",
  )
  top_p: Optional[float] = Field(
      default=None,
      description="""Optional. Specifies the nucleus sampling threshold. The model
      considers only the smallest set of tokens whose cumulative probability is
      at least `top_p`. This helps generate more diverse and less repetitive
      responses. For example, a `top_p` of 0.9 means the model considers tokens
      until the cumulative probability of the tokens to select from reaches 0.9.
      It's recommended to adjust either temperature or `top_p`, but not both.""",
  )
  top_k: Optional[int] = Field(
      default=None,
      description="""Optional. Specifies the top-k sampling threshold. The model
      considers only the top k most probable tokens for the next token. This can
      be useful for generating more coherent and less random text. For example,
      a `top_k` of 40 means the model will choose the next word from the 40 most
      likely words.""",
  )
  thinking: Optional[bool] = Field(
      default=None,
      description="""Whether the model supports thinking features. If true, thoughts are
      returned only if the model supports thought and thoughts are available.""",
  )


class ModelDict(TypedDict, total=False):
  """A trained machine learning model."""

  name: Optional[str]
  """Resource name of the model."""

  display_name: Optional[str]
  """Display name of the model."""

  description: Optional[str]
  """Description of the model."""

  version: Optional[str]
  """Version ID of the model. A new version is committed when a new
      model version is uploaded or trained under an existing model ID. The
      version ID is an auto-incrementing decimal number in string
      representation."""

  endpoints: Optional[list[EndpointDict]]
  """List of deployed models created from this base model. Note that a
      model could have been deployed to endpoints in different locations."""

  labels: Optional[dict[str, str]]
  """Labels with user-defined metadata to organize your models."""

  tuned_model_info: Optional[TunedModelInfoDict]
  """Information about the tuned model from the base model."""

  input_token_limit: Optional[int]
  """The maximum number of input tokens that the model can handle."""

  output_token_limit: Optional[int]
  """The maximum number of output tokens that the model can generate."""

  supported_actions: Optional[list[str]]
  """List of actions that are supported by the model."""

  default_checkpoint_id: Optional[str]
  """The default checkpoint id of a model version.
      """

  checkpoints: Optional[list[CheckpointDict]]
  """The checkpoints of the model."""

  temperature: Optional[float]
  """Temperature value used for sampling set when the dataset was saved.
      This value is used to tune the degree of randomness."""

  max_temperature: Optional[float]
  """The maximum temperature value used for sampling set when the
      dataset was saved. This value is used to tune the degree of randomness."""

  top_p: Optional[float]
  """Optional. Specifies the nucleus sampling threshold. The model
      considers only the smallest set of tokens whose cumulative probability is
      at least `top_p`. This helps generate more diverse and less repetitive
      responses. For example, a `top_p` of 0.9 means the model considers tokens
      until the cumulative probability of the tokens to select from reaches 0.9.
      It's recommended to adjust either temperature or `top_p`, but not both."""

  top_k: Optional[int]
  """Optional. Specifies the top-k sampling threshold. The model
      considers only the top k most probable tokens for the next token. This can
      be useful for generating more coherent and less random text. For example,
      a `top_k` of 40 means the model will choose the next word from the 40 most
      likely words."""

  thinking: Optional[bool]
  """Whether the model supports thinking features. If true, thoughts are
      returned only if the model supports thought and thoughts are available."""


ModelOrDict = Union[Model, ModelDict]


class ListModelsConfig(_common.BaseModel):

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")
  filter: Optional[str] = Field(default=None, description="""""")
  query_base: Optional[bool] = Field(
      default=None,
      description="""Set true to list base models, false to list tuned models.""",
  )


class ListModelsConfigDict(TypedDict, total=False):

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""

  filter: Optional[str]
  """"""

  query_base: Optional[bool]
  """Set true to list base models, false to list tuned models."""


ListModelsConfigOrDict = Union[ListModelsConfig, ListModelsConfigDict]


class _ListModelsParameters(_common.BaseModel):

  config: Optional[ListModelsConfig] = Field(default=None, description="""""")


class _ListModelsParametersDict(TypedDict, total=False):

  config: Optional[ListModelsConfigDict]
  """"""


_ListModelsParametersOrDict = Union[
    _ListModelsParameters, _ListModelsParametersDict
]


class ListModelsResponse(_common.BaseModel):

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(default=None, description="""""")
  models: Optional[list[Model]] = Field(default=None, description="""""")


class ListModelsResponseDict(TypedDict, total=False):

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """"""

  models: Optional[list[ModelDict]]
  """"""


ListModelsResponseOrDict = Union[ListModelsResponse, ListModelsResponseDict]


class UpdateModelConfig(_common.BaseModel):
  """Configuration for updating a tuned model."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  display_name: Optional[str] = Field(default=None, description="""""")
  description: Optional[str] = Field(default=None, description="""""")
  default_checkpoint_id: Optional[str] = Field(default=None, description="""""")


class UpdateModelConfigDict(TypedDict, total=False):
  """Configuration for updating a tuned model."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  display_name: Optional[str]
  """"""

  description: Optional[str]
  """"""

  default_checkpoint_id: Optional[str]
  """"""


UpdateModelConfigOrDict = Union[UpdateModelConfig, UpdateModelConfigDict]


class _UpdateModelParameters(_common.BaseModel):
  """Configuration for updating a tuned model."""

  model: Optional[str] = Field(default=None, description="""""")
  config: Optional[UpdateModelConfig] = Field(default=None, description="""""")


class _UpdateModelParametersDict(TypedDict, total=False):
  """Configuration for updating a tuned model."""

  model: Optional[str]
  """"""

  config: Optional[UpdateModelConfigDict]
  """"""


_UpdateModelParametersOrDict = Union[
    _UpdateModelParameters, _UpdateModelParametersDict
]


class DeleteModelConfig(_common.BaseModel):
  """Configuration for deleting a tuned model."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class DeleteModelConfigDict(TypedDict, total=False):
  """Configuration for deleting a tuned model."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


DeleteModelConfigOrDict = Union[DeleteModelConfig, DeleteModelConfigDict]


class _DeleteModelParameters(_common.BaseModel):
  """Parameters for deleting a tuned model."""

  model: Optional[str] = Field(default=None, description="""""")
  config: Optional[DeleteModelConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _DeleteModelParametersDict(TypedDict, total=False):
  """Parameters for deleting a tuned model."""

  model: Optional[str]
  """"""

  config: Optional[DeleteModelConfigDict]
  """Optional parameters for the request."""


_DeleteModelParametersOrDict = Union[
    _DeleteModelParameters, _DeleteModelParametersDict
]


class DeleteModelResponse(_common.BaseModel):

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class DeleteModelResponseDict(TypedDict, total=False):

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]


class GenerationConfig(_common.BaseModel):
  """Generation config."""

  model_selection_config: Optional[ModelSelectionConfig] = Field(
      default=None, description="""Optional. Config for model selection."""
  )
  response_json_schema: Optional[Any] = Field(
      default=None,
      description="""Output schema of the generated response. This is an alternative to
      `response_schema` that accepts [JSON Schema](https://json-schema.org/).
      """,
  )
  audio_timestamp: Optional[bool] = Field(
      default=None,
      description="""Optional. If enabled, audio timestamp will be included in the request to the model. This field is not supported in Gemini API.""",
  )
  candidate_count: Optional[int] = Field(
      default=None,
      description="""Optional. Number of candidates to generate.""",
  )
  enable_affective_dialog: Optional[bool] = Field(
      default=None,
      description="""Optional. If enabled, the model will detect emotions and adapt its responses accordingly. This field is not supported in Gemini API.""",
  )
  frequency_penalty: Optional[float] = Field(
      default=None, description="""Optional. Frequency penalties."""
  )
  logprobs: Optional[int] = Field(
      default=None, description="""Optional. Logit probabilities."""
  )
  max_output_tokens: Optional[int] = Field(
      default=None,
      description="""Optional. The maximum number of output tokens to generate per message.""",
  )
  media_resolution: Optional[MediaResolution] = Field(
      default=None,
      description="""Optional. If specified, the media resolution specified will be used.""",
  )
  presence_penalty: Optional[float] = Field(
      default=None, description="""Optional. Positive penalties."""
  )
  response_logprobs: Optional[bool] = Field(
      default=None,
      description="""Optional. If true, export the logprobs results in response.""",
  )
  response_mime_type: Optional[str] = Field(
      default=None,
      description="""Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""",
  )
  response_modalities: Optional[list[Modality]] = Field(
      default=None, description="""Optional. The modalities of the response."""
  )
  response_schema: Optional[Schema] = Field(
      default=None,
      description="""Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.""",
  )
  routing_config: Optional[GenerationConfigRoutingConfig] = Field(
      default=None,
      description="""Optional. Routing configuration. This field is not supported in Gemini API.""",
  )
  seed: Optional[int] = Field(default=None, description="""Optional. Seed.""")
  speech_config: Optional[SpeechConfig] = Field(
      default=None, description="""Optional. The speech generation config."""
  )
  stop_sequences: Optional[list[str]] = Field(
      default=None, description="""Optional. Stop sequences."""
  )
  temperature: Optional[float] = Field(
      default=None,
      description="""Optional. Controls the randomness of predictions.""",
  )
  thinking_config: Optional[ThinkingConfig] = Field(
      default=None,
      description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""",
  )
  top_k: Optional[float] = Field(
      default=None,
      description="""Optional. If specified, top-k sampling will be used.""",
  )
  top_p: Optional[float] = Field(
      default=None,
      description="""Optional. If specified, nucleus sampling will be used.""",
  )
  enable_enhanced_civic_answers: Optional[bool] = Field(
      default=None,
      description="""Optional. Enables enhanced civic answers. It may not be available for all models. This field is not supported in Vertex AI.""",
  )


class GenerationConfigDict(TypedDict, total=False):
  """Generation config."""

  model_selection_config: Optional[ModelSelectionConfigDict]
  """Optional. Config for model selection."""

  response_json_schema: Optional[Any]
  """Output schema of the generated response. This is an alternative to
      `response_schema` that accepts [JSON Schema](https://json-schema.org/).
      """

  audio_timestamp: Optional[bool]
  """Optional. If enabled, audio timestamp will be included in the request to the model. This field is not supported in Gemini API."""

  candidate_count: Optional[int]
  """Optional. Number of candidates to generate."""

  enable_affective_dialog: Optional[bool]
  """Optional. If enabled, the model will detect emotions and adapt its responses accordingly. This field is not supported in Gemini API."""

  frequency_penalty: Optional[float]
  """Optional. Frequency penalties."""

  logprobs: Optional[int]
  """Optional. Logit probabilities."""

  max_output_tokens: Optional[int]
  """Optional. The maximum number of output tokens to generate per message."""

  media_resolution: Optional[MediaResolution]
  """Optional. If specified, the media resolution specified will be used."""

  presence_penalty: Optional[float]
  """Optional. Positive penalties."""

  response_logprobs: Optional[bool]
  """Optional. If true, export the logprobs results in response."""

  response_mime_type: Optional[str]
  """Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature."""

  response_modalities: Optional[list[Modality]]
  """Optional. The modalities of the response."""

  response_schema: Optional[SchemaDict]
  """Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response."""

  routing_config: Optional[GenerationConfigRoutingConfigDict]
  """Optional. Routing configuration. This field is not supported in Gemini API."""

  seed: Optional[int]
  """Optional. Seed."""

  speech_config: Optional[SpeechConfigDict]
  """Optional. The speech generation config."""

  stop_sequences: Optional[list[str]]
  """Optional. Stop sequences."""

  temperature: Optional[float]
  """Optional. Controls the randomness of predictions."""

  thinking_config: Optional[ThinkingConfigDict]
  """Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking."""

  top_k: Optional[float]
  """Optional. If specified, top-k sampling will be used."""

  top_p: Optional[float]
  """Optional. If specified, nucleus sampling will be used."""

  enable_enhanced_civic_answers: Optional[bool]
  """Optional. Enables enhanced civic answers. It may not be available for all models. This field is not supported in Vertex AI."""


GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict]


class CountTokensConfig(_common.BaseModel):
  """Config for the count_tokens method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  system_instruction: Optional[ContentUnion] = Field(
      default=None,
      description="""Instructions for the model to steer it toward better performance.
      """,
  )
  tools: Optional[list[Tool]] = Field(
      default=None,
      description="""Code that enables the system to interact with external systems to
      perform an action outside of the knowledge and scope of the model.
      """,
  )
  generation_config: Optional[GenerationConfig] = Field(
      default=None,
      description="""Configuration that the model uses to generate the response. Not
      supported by the Gemini Developer API.
      """,
  )


class CountTokensConfigDict(TypedDict, total=False):
  """Config for the count_tokens method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  system_instruction: Optional[ContentUnionDict]
  """Instructions for the model to steer it toward better performance.
      """

  tools: Optional[list[ToolDict]]
  """Code that enables the system to interact with external systems to
      perform an action outside of the knowledge and scope of the model.
      """

  generation_config: Optional[GenerationConfigDict]
  """Configuration that the model uses to generate the response. Not
      supported by the Gemini Developer API.
      """


CountTokensConfigOrDict = Union[CountTokensConfig, CountTokensConfigDict]


class _CountTokensParameters(_common.BaseModel):
  """Parameters for counting tokens."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  contents: Optional[ContentListUnion] = Field(
      default=None, description="""Input content."""
  )
  config: Optional[CountTokensConfig] = Field(
      default=None, description="""Configuration for counting tokens."""
  )


class _CountTokensParametersDict(TypedDict, total=False):
  """Parameters for counting tokens."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  contents: Optional[ContentListUnionDict]
  """Input content."""

  config: Optional[CountTokensConfigDict]
  """Configuration for counting tokens."""


_CountTokensParametersOrDict = Union[
    _CountTokensParameters, _CountTokensParametersDict
]


class CountTokensResponse(_common.BaseModel):
  """Response for counting tokens."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  total_tokens: Optional[int] = Field(
      default=None, description="""Total number of tokens."""
  )
  cached_content_token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens in the cached part of the prompt (the cached content).""",
  )


class CountTokensResponseDict(TypedDict, total=False):
  """Response for counting tokens."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  total_tokens: Optional[int]
  """Total number of tokens."""

  cached_content_token_count: Optional[int]
  """Number of tokens in the cached part of the prompt (the cached content)."""


CountTokensResponseOrDict = Union[CountTokensResponse, CountTokensResponseDict]


class ComputeTokensConfig(_common.BaseModel):
  """Optional parameters for computing tokens."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class ComputeTokensConfigDict(TypedDict, total=False):
  """Optional parameters for computing tokens."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


ComputeTokensConfigOrDict = Union[ComputeTokensConfig, ComputeTokensConfigDict]


class _ComputeTokensParameters(_common.BaseModel):
  """Parameters for computing tokens."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  contents: Optional[ContentListUnion] = Field(
      default=None, description="""Input content."""
  )
  config: Optional[ComputeTokensConfig] = Field(
      default=None,
      description="""Optional parameters for the request.
      """,
  )


class _ComputeTokensParametersDict(TypedDict, total=False):
  """Parameters for computing tokens."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  contents: Optional[ContentListUnionDict]
  """Input content."""

  config: Optional[ComputeTokensConfigDict]
  """Optional parameters for the request.
      """


_ComputeTokensParametersOrDict = Union[
    _ComputeTokensParameters, _ComputeTokensParametersDict
]


class TokensInfo(_common.BaseModel):
  """Tokens info with a list of tokens and the corresponding list of token ids."""

  role: Optional[str] = Field(
      default=None,
      description="""Optional fields for the role from the corresponding Content.""",
  )
  token_ids: Optional[list[int]] = Field(
      default=None, description="""A list of token ids from the input."""
  )
  tokens: Optional[list[bytes]] = Field(
      default=None, description="""A list of tokens from the input."""
  )


class TokensInfoDict(TypedDict, total=False):
  """Tokens info with a list of tokens and the corresponding list of token ids."""

  role: Optional[str]
  """Optional fields for the role from the corresponding Content."""

  token_ids: Optional[list[int]]
  """A list of token ids from the input."""

  tokens: Optional[list[bytes]]
  """A list of tokens from the input."""


TokensInfoOrDict = Union[TokensInfo, TokensInfoDict]


class ComputeTokensResponse(_common.BaseModel):
  """Response for computing tokens."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  tokens_info: Optional[list[TokensInfo]] = Field(
      default=None,
      description="""Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances.""",
  )


class ComputeTokensResponseDict(TypedDict, total=False):
  """Response for computing tokens."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  tokens_info: Optional[list[TokensInfoDict]]
  """Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances."""


ComputeTokensResponseOrDict = Union[
    ComputeTokensResponse, ComputeTokensResponseDict
]


class Video(_common.BaseModel):
  """A generated video."""

  uri: Optional[str] = Field(
      default=None, description="""Path to another storage."""
  )
  video_bytes: Optional[bytes] = Field(
      default=None, description="""Video bytes."""
  )
  mime_type: Optional[str] = Field(
      default=None, description="""Video encoding, for example ``video/mp4``."""
  )

  @classmethod
  def from_file(
      cls, *, location: str, mime_type: Optional[str] = None
  ) -> 'Video':
    """Loads a video from a local file.

    Args:
        location: The local path to load the video from.
        mime_type: The MIME type of the video. If not provided, the MIME type
          will be automatically determined.

    Returns:
        A loaded video as an `Video` object.
    """
    import mimetypes  # pylint: disable=g-import-not-at-top
    import pathlib  # pylint: disable=g-import-not-at-top

    video_bytes = pathlib.Path(location).read_bytes()

    if not mime_type:
      mime_type, _ = mimetypes.guess_type(location)
    video = cls(video_bytes=video_bytes, mime_type=mime_type)
    return video

  def save(
      self,
      path: str,
  ) -> None:
    """Saves the video to a file.

    Args:
        path: Local path where to save the video.
    """
    import pathlib  # pylint: disable=g-import-not-at-top

    if not self.video_bytes:
      raise NotImplementedError('Saving remote videos is not supported.')

    pathlib.Path(path).write_bytes(self.video_bytes)

  def show(self) -> None:
    """Shows the video.

    If the video has no mime_type, it is assumed to be video/mp4.

    This method only works in a notebook environment.
    """
    if self.uri and not self.video_bytes:
      raise ValueError('Showing remote videos is not supported.')
    if not self.video_bytes:
      raise ValueError('Video has no bytes.')

    mime_type = self.mime_type or 'video/mp4'

    try:
      from IPython import display as IPython_display
    except ImportError:
      IPython_display = None

    if IPython_display:
      IPython_display.display(
          IPython_display.Video(
              data=self.video_bytes, mimetype=mime_type, embed=True
          )
      )

  def __repr__(self) -> str:
    video_bytes = '<video_bytes>' if self.video_bytes else 'None'
    return (
        f'Video(uri={self.uri}, video_bytes={video_bytes},'
        f' mime_type={self.mime_type})'
    )


class VideoDict(TypedDict, total=False):
  """A generated video."""

  uri: Optional[str]
  """Path to another storage."""

  video_bytes: Optional[bytes]
  """Video bytes."""

  mime_type: Optional[str]
  """Video encoding, for example ``video/mp4``."""


VideoOrDict = Union[Video, VideoDict]


class GenerateVideosSource(_common.BaseModel):
  """A set of source input(s) for video generation."""

  prompt: Optional[str] = Field(
      default=None,
      description="""The text prompt for generating the videos.
      Optional if image or video is provided.""",
  )
  image: Optional[Image] = Field(
      default=None,
      description="""The input image for generating the videos.
      Optional if prompt is provided. Not allowed if video is provided.""",
  )
  video: Optional[Video] = Field(
      default=None,
      description="""The input video for video extension use cases.
      Optional if prompt is provided. Not allowed if image is provided.""",
  )


class GenerateVideosSourceDict(TypedDict, total=False):
  """A set of source input(s) for video generation."""

  prompt: Optional[str]
  """The text prompt for generating the videos.
      Optional if image or video is provided."""

  image: Optional[ImageDict]
  """The input image for generating the videos.
      Optional if prompt is provided. Not allowed if video is provided."""

  video: Optional[VideoDict]
  """The input video for video extension use cases.
      Optional if prompt is provided. Not allowed if image is provided."""


GenerateVideosSourceOrDict = Union[
    GenerateVideosSource, GenerateVideosSourceDict
]


class VideoGenerationReferenceImage(_common.BaseModel):
  """A reference image for video generation."""

  image: Optional[Image] = Field(
      default=None, description="""The reference image."""
  )
  reference_type: Optional[VideoGenerationReferenceType] = Field(
      default=None,
      description="""The type of the reference image, which defines how the reference
      image will be used to generate the video.""",
  )


class VideoGenerationReferenceImageDict(TypedDict, total=False):
  """A reference image for video generation."""

  image: Optional[ImageDict]
  """The reference image."""

  reference_type: Optional[VideoGenerationReferenceType]
  """The type of the reference image, which defines how the reference
      image will be used to generate the video."""


VideoGenerationReferenceImageOrDict = Union[
    VideoGenerationReferenceImage, VideoGenerationReferenceImageDict
]


class VideoGenerationMask(_common.BaseModel):
  """A mask for video generation."""

  image: Optional[Image] = Field(
      default=None,
      description="""The image mask to use for generating videos.""",
  )
  mask_mode: Optional[VideoGenerationMaskMode] = Field(
      default=None,
      description="""Describes how the mask will be used. Inpainting masks must
      match the aspect ratio of the input video. Outpainting masks can be
      either 9:16 or 16:9.""",
  )


class VideoGenerationMaskDict(TypedDict, total=False):
  """A mask for video generation."""

  image: Optional[ImageDict]
  """The image mask to use for generating videos."""

  mask_mode: Optional[VideoGenerationMaskMode]
  """Describes how the mask will be used. Inpainting masks must
      match the aspect ratio of the input video. Outpainting masks can be
      either 9:16 or 16:9."""


VideoGenerationMaskOrDict = Union[VideoGenerationMask, VideoGenerationMaskDict]


class GenerateVideosConfig(_common.BaseModel):
  """Configuration for generating videos."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  number_of_videos: Optional[int] = Field(
      default=None, description="""Number of output videos."""
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""The gcs bucket where to save the generated videos.""",
  )
  fps: Optional[int] = Field(
      default=None, description="""Frames per second for video generation."""
  )
  duration_seconds: Optional[int] = Field(
      default=None,
      description="""Duration of the clip for video generation in seconds.""",
  )
  seed: Optional[int] = Field(
      default=None,
      description="""The RNG seed. If RNG seed is exactly same for each request with
      unchanged inputs, the prediction results will be consistent. Otherwise,
      a random RNG seed will be used each time to produce a different
      result.""",
  )
  aspect_ratio: Optional[str] = Field(
      default=None,
      description="""The aspect ratio for the generated video. 16:9 (landscape) and
      9:16 (portrait) are supported.""",
  )
  resolution: Optional[str] = Field(
      default=None,
      description="""The resolution for the generated video. 720p and 1080p are
      supported.""",
  )
  person_generation: Optional[str] = Field(
      default=None,
      description="""Whether allow to generate person videos, and restrict to specific
      ages. Supported values are: dont_allow, allow_adult.""",
  )
  pubsub_topic: Optional[str] = Field(
      default=None,
      description="""The pubsub topic where to publish the video generation
      progress.""",
  )
  negative_prompt: Optional[str] = Field(
      default=None,
      description="""Explicitly state what should not be included in the generated
      videos.""",
  )
  enhance_prompt: Optional[bool] = Field(
      default=None, description="""Whether to use the prompt rewriting logic."""
  )
  generate_audio: Optional[bool] = Field(
      default=None,
      description="""Whether to generate audio along with the video.""",
  )
  last_frame: Optional[Image] = Field(
      default=None,
      description="""Image to use as the last frame of generated videos.
      Only supported for image to video use cases.""",
  )
  reference_images: Optional[list[VideoGenerationReferenceImage]] = Field(
      default=None,
      description="""The images to use as the references to generate the videos.
      If this field is provided, the text prompt field must also be provided.
      The image, video, or last_frame field are not supported. Each image must
      be associated with a type. Veo 2 supports up to 3 asset images *or* 1
      style image.""",
  )
  mask: Optional[VideoGenerationMask] = Field(
      default=None, description="""The mask to use for generating videos."""
  )
  compression_quality: Optional[VideoCompressionQuality] = Field(
      default=None,
      description="""Compression quality of the generated videos.""",
  )


class GenerateVideosConfigDict(TypedDict, total=False):
  """Configuration for generating videos."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  number_of_videos: Optional[int]
  """Number of output videos."""

  output_gcs_uri: Optional[str]
  """The gcs bucket where to save the generated videos."""

  fps: Optional[int]
  """Frames per second for video generation."""

  duration_seconds: Optional[int]
  """Duration of the clip for video generation in seconds."""

  seed: Optional[int]
  """The RNG seed. If RNG seed is exactly same for each request with
      unchanged inputs, the prediction results will be consistent. Otherwise,
      a random RNG seed will be used each time to produce a different
      result."""

  aspect_ratio: Optional[str]
  """The aspect ratio for the generated video. 16:9 (landscape) and
      9:16 (portrait) are supported."""

  resolution: Optional[str]
  """The resolution for the generated video. 720p and 1080p are
      supported."""

  person_generation: Optional[str]
  """Whether allow to generate person videos, and restrict to specific
      ages. Supported values are: dont_allow, allow_adult."""

  pubsub_topic: Optional[str]
  """The pubsub topic where to publish the video generation
      progress."""

  negative_prompt: Optional[str]
  """Explicitly state what should not be included in the generated
      videos."""

  enhance_prompt: Optional[bool]
  """Whether to use the prompt rewriting logic."""

  generate_audio: Optional[bool]
  """Whether to generate audio along with the video."""

  last_frame: Optional[ImageDict]
  """Image to use as the last frame of generated videos.
      Only supported for image to video use cases."""

  reference_images: Optional[list[VideoGenerationReferenceImageDict]]
  """The images to use as the references to generate the videos.
      If this field is provided, the text prompt field must also be provided.
      The image, video, or last_frame field are not supported. Each image must
      be associated with a type. Veo 2 supports up to 3 asset images *or* 1
      style image."""

  mask: Optional[VideoGenerationMaskDict]
  """The mask to use for generating videos."""

  compression_quality: Optional[VideoCompressionQuality]
  """Compression quality of the generated videos."""


GenerateVideosConfigOrDict = Union[
    GenerateVideosConfig, GenerateVideosConfigDict
]


class _GenerateVideosParameters(_common.BaseModel):
  """Class that represents the parameters for generating videos."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  prompt: Optional[str] = Field(
      default=None,
      description="""The text prompt for generating the videos.
      Optional if image or video is provided.""",
  )
  image: Optional[Image] = Field(
      default=None,
      description="""The input image for generating the videos.
      Optional if prompt is provided. Not allowed if video is provided.""",
  )
  video: Optional[Video] = Field(
      default=None,
      description="""The input video for video extension use cases.
      Optional if prompt is provided. Not allowed if image is provided.""",
  )
  source: Optional[GenerateVideosSource] = Field(
      default=None,
      description="""A set of source input(s) for video generation.""",
  )
  config: Optional[GenerateVideosConfig] = Field(
      default=None, description="""Configuration for generating videos."""
  )


class _GenerateVideosParametersDict(TypedDict, total=False):
  """Class that represents the parameters for generating videos."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  prompt: Optional[str]
  """The text prompt for generating the videos.
      Optional if image or video is provided."""

  image: Optional[ImageDict]
  """The input image for generating the videos.
      Optional if prompt is provided. Not allowed if video is provided."""

  video: Optional[VideoDict]
  """The input video for video extension use cases.
      Optional if prompt is provided. Not allowed if image is provided."""

  source: Optional[GenerateVideosSourceDict]
  """A set of source input(s) for video generation."""

  config: Optional[GenerateVideosConfigDict]
  """Configuration for generating videos."""


_GenerateVideosParametersOrDict = Union[
    _GenerateVideosParameters, _GenerateVideosParametersDict
]


class GeneratedVideo(_common.BaseModel):
  """A generated video."""

  video: Optional[Video] = Field(
      default=None, description="""The output video"""
  )


class GeneratedVideoDict(TypedDict, total=False):
  """A generated video."""

  video: Optional[VideoDict]
  """The output video"""


GeneratedVideoOrDict = Union[GeneratedVideo, GeneratedVideoDict]


class GenerateVideosResponse(_common.BaseModel):
  """Response with generated videos."""

  generated_videos: Optional[list[GeneratedVideo]] = Field(
      default=None, description="""List of the generated videos"""
  )
  rai_media_filtered_count: Optional[int] = Field(
      default=None,
      description="""Returns if any videos were filtered due to RAI policies.""",
  )
  rai_media_filtered_reasons: Optional[list[str]] = Field(
      default=None, description="""Returns rai failure reasons if any."""
  )


class GenerateVideosResponseDict(TypedDict, total=False):
  """Response with generated videos."""

  generated_videos: Optional[list[GeneratedVideoDict]]
  """List of the generated videos"""

  rai_media_filtered_count: Optional[int]
  """Returns if any videos were filtered due to RAI policies."""

  rai_media_filtered_reasons: Optional[list[str]]
  """Returns rai failure reasons if any."""


GenerateVideosResponseOrDict = Union[
    GenerateVideosResponse, GenerateVideosResponseDict
]


class Operation(ABC):
  """A long-running operation."""

  name: Optional[str] = Field(
      default=None,
      description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""",
  )
  metadata: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata.  Any method that returns a long-running operation should document the metadata type, if any.""",
  )
  done: Optional[bool] = Field(
      default=None,
      description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""",
  )
  error: Optional[dict[str, Any]] = Field(
      default=None,
      description="""The error result of the operation in case of failure or cancellation.""",
  )

  @classmethod
  @abstractmethod
  def from_api_response(
      cls, api_response: Any, is_vertex_ai: bool = False
  ) -> Self:
    """Creates an Operation from an API response."""
    pass


class GenerateVideosOperation(_common.BaseModel, Operation):
  """A video generation operation."""

  response: Optional[GenerateVideosResponse] = Field(
      default=None, description="""The generated videos."""
  )
  result: Optional[GenerateVideosResponse] = Field(
      default=None, description="""The generated videos."""
  )

  @classmethod
  def from_api_response(
      cls, api_response: Any, is_vertex_ai: bool = False
  ) -> Self:
    """Instantiates a GenerateVideosOperation from an API response."""

    if is_vertex_ai:
      response_dict = _GenerateVideosOperation_from_vertex(api_response)
    else:
      response_dict = _GenerateVideosOperation_from_mldev(api_response)

    return cls._from_response(response=response_dict, kwargs={})


class GetTuningJobConfig(_common.BaseModel):
  """Optional parameters for tunings.get method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetTuningJobConfigDict(TypedDict, total=False):
  """Optional parameters for tunings.get method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetTuningJobConfigOrDict = Union[GetTuningJobConfig, GetTuningJobConfigDict]


class _GetTuningJobParameters(_common.BaseModel):
  """Parameters for the get method."""

  name: Optional[str] = Field(default=None, description="""""")
  config: Optional[GetTuningJobConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _GetTuningJobParametersDict(TypedDict, total=False):
  """Parameters for the get method."""

  name: Optional[str]
  """"""

  config: Optional[GetTuningJobConfigDict]
  """Optional parameters for the request."""


_GetTuningJobParametersOrDict = Union[
    _GetTuningJobParameters, _GetTuningJobParametersDict
]


class TunedModelCheckpoint(_common.BaseModel):
  """TunedModelCheckpoint for the Tuned Model of a Tuning Job."""

  checkpoint_id: Optional[str] = Field(
      default=None,
      description="""The ID of the checkpoint.
      """,
  )
  epoch: Optional[int] = Field(
      default=None,
      description="""The epoch of the checkpoint.
      """,
  )
  step: Optional[int] = Field(
      default=None,
      description="""The step of the checkpoint.
      """,
  )
  endpoint: Optional[str] = Field(
      default=None,
      description="""The Endpoint resource name that the checkpoint is deployed to.
      Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
      """,
  )


class TunedModelCheckpointDict(TypedDict, total=False):
  """TunedModelCheckpoint for the Tuned Model of a Tuning Job."""

  checkpoint_id: Optional[str]
  """The ID of the checkpoint.
      """

  epoch: Optional[int]
  """The epoch of the checkpoint.
      """

  step: Optional[int]
  """The step of the checkpoint.
      """

  endpoint: Optional[str]
  """The Endpoint resource name that the checkpoint is deployed to.
      Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
      """


TunedModelCheckpointOrDict = Union[
    TunedModelCheckpoint, TunedModelCheckpointDict
]


class TunedModel(_common.BaseModel):
  """TunedModel for the Tuned Model of a Tuning Job."""

  model: Optional[str] = Field(
      default=None,
      description="""Output only. The resource name of the TunedModel.
      Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
      When tuning from a base model, the version_id will be 1.
      For continuous tuning, the version id will be incremented by 1 from the
      last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
      """,
  )
  endpoint: Optional[str] = Field(
      default=None,
      description="""Output only. A resource name of an Endpoint.
      Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
      """,
  )
  checkpoints: Optional[list[TunedModelCheckpoint]] = Field(
      default=None,
      description="""The checkpoints associated with this TunedModel.
      This field is only populated for tuning jobs that enable intermediate
      checkpoints.""",
  )


class TunedModelDict(TypedDict, total=False):
  """TunedModel for the Tuned Model of a Tuning Job."""

  model: Optional[str]
  """Output only. The resource name of the TunedModel.
      Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
      When tuning from a base model, the version_id will be 1.
      For continuous tuning, the version id will be incremented by 1 from the
      last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
      """

  endpoint: Optional[str]
  """Output only. A resource name of an Endpoint.
      Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
      """

  checkpoints: Optional[list[TunedModelCheckpointDict]]
  """The checkpoints associated with this TunedModel.
      This field is only populated for tuning jobs that enable intermediate
      checkpoints."""


TunedModelOrDict = Union[TunedModel, TunedModelDict]


class SupervisedHyperParameters(_common.BaseModel):
  """Hyperparameters for SFT. This data type is not supported in Gemini API."""

  adapter_size: Optional[AdapterSize] = Field(
      default=None, description="""Optional. Adapter size for tuning."""
  )
  batch_size: Optional[int] = Field(
      default=None,
      description="""Optional. Batch size for tuning. This feature is only available for open source models.""",
  )
  epoch_count: Optional[int] = Field(
      default=None,
      description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
  )
  learning_rate: Optional[float] = Field(
      default=None,
      description="""Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models.""",
  )
  learning_rate_multiplier: Optional[float] = Field(
      default=None,
      description="""Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models.""",
  )


class SupervisedHyperParametersDict(TypedDict, total=False):
  """Hyperparameters for SFT. This data type is not supported in Gemini API."""

  adapter_size: Optional[AdapterSize]
  """Optional. Adapter size for tuning."""

  batch_size: Optional[int]
  """Optional. Batch size for tuning. This feature is only available for open source models."""

  epoch_count: Optional[int]
  """Optional. Number of complete passes the model makes over the entire training dataset during training."""

  learning_rate: Optional[float]
  """Optional. Learning rate for tuning. Mutually exclusive with `learning_rate_multiplier`. This feature is only available for open source models."""

  learning_rate_multiplier: Optional[float]
  """Optional. Multiplier for adjusting the default learning rate. Mutually exclusive with `learning_rate`. This feature is only available for 1P models."""


SupervisedHyperParametersOrDict = Union[
    SupervisedHyperParameters, SupervisedHyperParametersDict
]


class SupervisedTuningSpec(_common.BaseModel):
  """Supervised tuning spec for tuning."""

  export_last_checkpoint_only: Optional[bool] = Field(
      default=None,
      description="""Optional. If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT. Default is false.""",
  )
  hyper_parameters: Optional[SupervisedHyperParameters] = Field(
      default=None, description="""Optional. Hyperparameters for SFT."""
  )
  training_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
  )
  tuning_mode: Optional[TuningMode] = Field(
      default=None, description="""Tuning mode."""
  )
  validation_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
  )


class SupervisedTuningSpecDict(TypedDict, total=False):
  """Supervised tuning spec for tuning."""

  export_last_checkpoint_only: Optional[bool]
  """Optional. If set to true, disable intermediate checkpoints for SFT and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT. Default is false."""

  hyper_parameters: Optional[SupervisedHyperParametersDict]
  """Optional. Hyperparameters for SFT."""

  training_dataset_uri: Optional[str]
  """Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""

  tuning_mode: Optional[TuningMode]
  """Tuning mode."""

  validation_dataset_uri: Optional[str]
  """Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""


SupervisedTuningSpecOrDict = Union[
    SupervisedTuningSpec, SupervisedTuningSpecDict
]


class PreferenceOptimizationHyperParameters(_common.BaseModel):
  """Hyperparameters for Preference Optimization.

  This data type is not supported in Gemini API.
  """

  adapter_size: Optional[AdapterSize] = Field(
      default=None,
      description="""Optional. Adapter size for preference optimization.""",
  )
  beta: Optional[float] = Field(
      default=None,
      description="""Optional. Weight for KL Divergence regularization.""",
  )
  epoch_count: Optional[int] = Field(
      default=None,
      description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
  )
  learning_rate_multiplier: Optional[float] = Field(
      default=None,
      description="""Optional. Multiplier for adjusting the default learning rate.""",
  )


class PreferenceOptimizationHyperParametersDict(TypedDict, total=False):
  """Hyperparameters for Preference Optimization.

  This data type is not supported in Gemini API.
  """

  adapter_size: Optional[AdapterSize]
  """Optional. Adapter size for preference optimization."""

  beta: Optional[float]
  """Optional. Weight for KL Divergence regularization."""

  epoch_count: Optional[int]
  """Optional. Number of complete passes the model makes over the entire training dataset during training."""

  learning_rate_multiplier: Optional[float]
  """Optional. Multiplier for adjusting the default learning rate."""


PreferenceOptimizationHyperParametersOrDict = Union[
    PreferenceOptimizationHyperParameters,
    PreferenceOptimizationHyperParametersDict,
]


class PreferenceOptimizationSpec(_common.BaseModel):
  """Preference optimization tuning spec for tuning."""

  export_last_checkpoint_only: Optional[bool] = Field(
      default=None,
      description="""Optional. If set to true, disable intermediate checkpoints for Preference Optimization and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for Preference Optimization. Default is false.""",
  )
  hyper_parameters: Optional[PreferenceOptimizationHyperParameters] = Field(
      default=None,
      description="""Optional. Hyperparameters for Preference Optimization.""",
  )
  training_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Required. Cloud Storage path to file containing training dataset for preference optimization tuning. The dataset must be formatted as a JSONL file.""",
  )
  validation_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Optional. Cloud Storage path to file containing validation dataset for preference optimization tuning. The dataset must be formatted as a JSONL file.""",
  )


class PreferenceOptimizationSpecDict(TypedDict, total=False):
  """Preference optimization tuning spec for tuning."""

  export_last_checkpoint_only: Optional[bool]
  """Optional. If set to true, disable intermediate checkpoints for Preference Optimization and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints for Preference Optimization. Default is false."""

  hyper_parameters: Optional[PreferenceOptimizationHyperParametersDict]
  """Optional. Hyperparameters for Preference Optimization."""

  training_dataset_uri: Optional[str]
  """Required. Cloud Storage path to file containing training dataset for preference optimization tuning. The dataset must be formatted as a JSONL file."""

  validation_dataset_uri: Optional[str]
  """Optional. Cloud Storage path to file containing validation dataset for preference optimization tuning. The dataset must be formatted as a JSONL file."""


PreferenceOptimizationSpecOrDict = Union[
    PreferenceOptimizationSpec, PreferenceOptimizationSpecDict
]


class GcsDestination(_common.BaseModel):
  """The Google Cloud Storage location where the output is to be written to."""

  output_uri_prefix: Optional[str] = Field(
      default=None,
      description="""Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist.""",
  )

  @pydantic.model_validator(mode='after')
  def _validate_gcs_path(self) -> 'GcsDestination':
    if self.output_uri_prefix and not self.output_uri_prefix.startswith(
        'gs://'
    ):
      raise ValueError(
          'output_uri_prefix must be a valid GCS path starting with "gs://".'
      )
    return self


class GcsDestinationDict(TypedDict, total=False):
  """The Google Cloud Storage location where the output is to be written to."""

  output_uri_prefix: Optional[str]
  """Required. Google Cloud Storage URI to output directory. If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist."""


GcsDestinationOrDict = Union[GcsDestination, GcsDestinationDict]


class OutputConfig(_common.BaseModel):
  """Config for evaluation output."""

  gcs_destination: Optional[GcsDestination] = Field(
      default=None,
      description="""Cloud storage destination for evaluation output.""",
  )


class OutputConfigDict(TypedDict, total=False):
  """Config for evaluation output."""

  gcs_destination: Optional[GcsDestinationDict]
  """Cloud storage destination for evaluation output."""


OutputConfigOrDict = Union[OutputConfig, OutputConfigDict]


class AutoraterConfig(_common.BaseModel):
  """Autorater config used for evaluation."""

  sampling_count: Optional[int] = Field(
      default=None,
      description="""Number of samples for each instance in the dataset.
  If not specified, the default is 4. Minimum value is 1, maximum value
  is 32.""",
  )
  flip_enabled: Optional[bool] = Field(
      default=None,
      description="""Optional. Default is true. Whether to flip the candidate and baseline
  responses. This is only applicable to the pairwise metric. If enabled, also
  provide PairwiseMetricSpec.candidate_response_field_name and
  PairwiseMetricSpec.baseline_response_field_name. When rendering
  PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
  fields will be flipped for half of the samples to reduce bias.""",
  )
  autorater_model: Optional[str] = Field(
      default=None,
      description="""The fully qualified name of the publisher model or tuned autorater
  endpoint to use.

  Publisher model format:
  `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`

  Tuned model endpoint format:
  `projects/{project}/locations/{location}/endpoints/{endpoint}`""",
  )
  generation_config: Optional[GenerationConfig] = Field(
      default=None,
      description="""Configuration options for model generation and outputs.""",
  )


class AutoraterConfigDict(TypedDict, total=False):
  """Autorater config used for evaluation."""

  sampling_count: Optional[int]
  """Number of samples for each instance in the dataset.
  If not specified, the default is 4. Minimum value is 1, maximum value
  is 32."""

  flip_enabled: Optional[bool]
  """Optional. Default is true. Whether to flip the candidate and baseline
  responses. This is only applicable to the pairwise metric. If enabled, also
  provide PairwiseMetricSpec.candidate_response_field_name and
  PairwiseMetricSpec.baseline_response_field_name. When rendering
  PairwiseMetricSpec.metric_prompt_template, the candidate and baseline
  fields will be flipped for half of the samples to reduce bias."""

  autorater_model: Optional[str]
  """The fully qualified name of the publisher model or tuned autorater
  endpoint to use.

  Publisher model format:
  `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`

  Tuned model endpoint format:
  `projects/{project}/locations/{location}/endpoints/{endpoint}`"""

  generation_config: Optional[GenerationConfigDict]
  """Configuration options for model generation and outputs."""


AutoraterConfigOrDict = Union[AutoraterConfig, AutoraterConfigDict]


class Metric(_common.BaseModel):
  """The metric used for evaluation."""

  name: Optional[str] = Field(
      default=None, description="""The name of the metric."""
  )
  custom_function: Optional[Callable[..., Any]] = Field(
      default=None,
      description="""The custom function that defines the end-to-end logic for metric computation.""",
  )
  prompt_template: Optional[str] = Field(
      default=None, description="""The prompt template for the metric."""
  )
  judge_model_system_instruction: Optional[str] = Field(
      default=None,
      description="""The system instruction for the judge model.""",
  )
  return_raw_output: Optional[bool] = Field(
      default=None,
      description="""Whether to return the raw output from the judge model.""",
  )
  parse_and_reduce_fn: Optional[Callable[..., Any]] = Field(
      default=None,
      description="""The parse and reduce function for the judge model.""",
  )
  aggregate_summary_fn: Optional[Callable[..., Any]] = Field(
      default=None,
      description="""The aggregate summary function for the judge model.""",
  )

  # Allow extra fields to support metric-specific config fields.
  model_config = ConfigDict(extra='allow')

  _is_predefined: bool = PrivateAttr(default=False)
  """A boolean indicating whether the metric is predefined."""

  _config_source: Optional[str] = PrivateAttr(default=None)
  """An optional string indicating the source of the metric configuration."""

  _version: Optional[str] = PrivateAttr(default=None)
  """An optional string indicating the version of the metric."""

  @model_validator(mode='after')  # type: ignore[arg-type]
  def validate_name(self) -> 'Metric':
    if not self.name:
      raise ValueError('Metric name cannot be empty.')
    self.name = self.name.lower()
    return self

  def to_yaml_file(self, file_path: str, version: Optional[str] = None) -> None:
    """Dumps the metric object to a YAML file.

    Args:
        file_path: The path to the YAML file.
        version: Optional version string to include in the YAML output.

    Raises:
        ImportError: If the pyyaml library is not installed.
    """
    if yaml is None:
      raise ImportError(
          'YAML serialization requires the pyyaml library. Please install'
          " it using 'pip install google-cloud-aiplatform[evaluation]'."
      )

    fields_to_exclude_callables = set()
    for field_name, field_info in self.model_fields.items():
      annotation = field_info.annotation
      origin = typing.get_origin(annotation)

      is_field_callable_type = False
      if annotation is Callable or origin is Callable:  # type: ignore[comparison-overlap]
        is_field_callable_type = True
      elif origin is Union:
        args = typing.get_args(annotation)
        if any(
            arg is Callable or typing.get_origin(arg) is Callable
            for arg in args
        ):
          is_field_callable_type = True

      if is_field_callable_type:
        fields_to_exclude_callables.add(field_name)

    data_to_dump = self.model_dump(
        exclude_unset=True,
        exclude_none=True,
        mode='json',
        exclude=fields_to_exclude_callables
        if fields_to_exclude_callables
        else None,
    )

    if version:
      data_to_dump['version'] = version

    with open(file_path, 'w', encoding='utf-8') as f:
      yaml.dump(data_to_dump, f, sort_keys=False, allow_unicode=True)


class MetricDict(TypedDict, total=False):
  """The metric used for evaluation."""

  name: Optional[str]
  """The name of the metric."""

  custom_function: Optional[Callable[..., Any]]
  """The custom function that defines the end-to-end logic for metric computation."""

  prompt_template: Optional[str]
  """The prompt template for the metric."""

  judge_model_system_instruction: Optional[str]
  """The system instruction for the judge model."""

  return_raw_output: Optional[bool]
  """Whether to return the raw output from the judge model."""

  parse_and_reduce_fn: Optional[Callable[..., Any]]
  """The parse and reduce function for the judge model."""

  aggregate_summary_fn: Optional[Callable[..., Any]]
  """The aggregate summary function for the judge model."""


MetricOrDict = Union[Metric, MetricDict]


class EvaluationConfig(_common.BaseModel):
  """Evaluation config for tuning."""

  metrics: Optional[list[Metric]] = Field(
      default=None, description="""The metrics used for evaluation."""
  )
  output_config: Optional[OutputConfig] = Field(
      default=None, description="""Config for evaluation output."""
  )
  autorater_config: Optional[AutoraterConfig] = Field(
      default=None, description="""Autorater config for evaluation."""
  )


class EvaluationConfigDict(TypedDict, total=False):
  """Evaluation config for tuning."""

  metrics: Optional[list[MetricDict]]
  """The metrics used for evaluation."""

  output_config: Optional[OutputConfigDict]
  """Config for evaluation output."""

  autorater_config: Optional[AutoraterConfigDict]
  """Autorater config for evaluation."""


EvaluationConfigOrDict = Union[EvaluationConfig, EvaluationConfigDict]


class GoogleRpcStatus(_common.BaseModel):
  """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs.

  It is used by [gRPC](https://github.com/grpc). Each `Status` message contains
  three pieces of data: error code, error message, and error details. You can
  find out more about this error model and how to work with it in the [API
  Design Guide](https://cloud.google.com/apis/design/errors). This data type is
  not supported in Gemini API.
  """

  code: Optional[int] = Field(
      default=None,
      description="""The status code, which should be an enum value of google.rpc.Code.""",
  )
  details: Optional[list[dict[str, Any]]] = Field(
      default=None,
      description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""",
  )
  message: Optional[str] = Field(
      default=None,
      description="""A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.""",
  )


class GoogleRpcStatusDict(TypedDict, total=False):
  """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs.

  It is used by [gRPC](https://github.com/grpc). Each `Status` message contains
  three pieces of data: error code, error message, and error details. You can
  find out more about this error model and how to work with it in the [API
  Design Guide](https://cloud.google.com/apis/design/errors). This data type is
  not supported in Gemini API.
  """

  code: Optional[int]
  """The status code, which should be an enum value of google.rpc.Code."""

  details: Optional[list[dict[str, Any]]]
  """A list of messages that carry the error details. There is a common set of message types for APIs to use."""

  message: Optional[str]
  """A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client."""


GoogleRpcStatusOrDict = Union[GoogleRpcStatus, GoogleRpcStatusDict]


class PreTunedModel(_common.BaseModel):
  """A pre-tuned model for continuous tuning.

  This data type is not supported in Gemini API.
  """

  base_model: Optional[str] = Field(
      default=None,
      description="""Output only. The name of the base model this PreTunedModel was tuned from.""",
  )
  checkpoint_id: Optional[str] = Field(
      default=None,
      description="""Optional. The source checkpoint id. If not specified, the default checkpoint will be used.""",
  )
  tuned_model_name: Optional[str] = Field(
      default=None,
      description="""The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`""",
  )


class PreTunedModelDict(TypedDict, total=False):
  """A pre-tuned model for continuous tuning.

  This data type is not supported in Gemini API.
  """

  base_model: Optional[str]
  """Output only. The name of the base model this PreTunedModel was tuned from."""

  checkpoint_id: Optional[str]
  """Optional. The source checkpoint id. If not specified, the default checkpoint will be used."""

  tuned_model_name: Optional[str]
  """The resource name of the Model. E.g., a model resource name with a specified version id or alias: `projects/{project}/locations/{location}/models/{model}@{version_id}` `projects/{project}/locations/{location}/models/{model}@{alias}` Or, omit the version id to use the default version: `projects/{project}/locations/{location}/models/{model}`"""


PreTunedModelOrDict = Union[PreTunedModel, PreTunedModelDict]


class DatasetDistributionDistributionBucket(_common.BaseModel):
  """Dataset bucket used to create a histogram for the distribution given a population of values.

  This data type is not supported in Gemini API.
  """

  count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of values in the bucket.""",
  )
  left: Optional[float] = Field(
      default=None, description="""Output only. Left bound of the bucket."""
  )
  right: Optional[float] = Field(
      default=None, description="""Output only. Right bound of the bucket."""
  )


class DatasetDistributionDistributionBucketDict(TypedDict, total=False):
  """Dataset bucket used to create a histogram for the distribution given a population of values.

  This data type is not supported in Gemini API.
  """

  count: Optional[int]
  """Output only. Number of values in the bucket."""

  left: Optional[float]
  """Output only. Left bound of the bucket."""

  right: Optional[float]
  """Output only. Right bound of the bucket."""


DatasetDistributionDistributionBucketOrDict = Union[
    DatasetDistributionDistributionBucket,
    DatasetDistributionDistributionBucketDict,
]


class DatasetDistribution(_common.BaseModel):
  """Distribution computed over a tuning dataset.

  This data type is not supported in Gemini API.
  """

  buckets: Optional[list[DatasetDistributionDistributionBucket]] = Field(
      default=None, description="""Output only. Defines the histogram bucket."""
  )
  max: Optional[float] = Field(
      default=None,
      description="""Output only. The maximum of the population values.""",
  )
  mean: Optional[float] = Field(
      default=None,
      description="""Output only. The arithmetic mean of the values in the population.""",
  )
  median: Optional[float] = Field(
      default=None,
      description="""Output only. The median of the values in the population.""",
  )
  min: Optional[float] = Field(
      default=None,
      description="""Output only. The minimum of the population values.""",
  )
  p5: Optional[float] = Field(
      default=None,
      description="""Output only. The 5th percentile of the values in the population.""",
  )
  p95: Optional[float] = Field(
      default=None,
      description="""Output only. The 95th percentile of the values in the population.""",
  )
  sum: Optional[float] = Field(
      default=None,
      description="""Output only. Sum of a given population of values.""",
  )


class DatasetDistributionDict(TypedDict, total=False):
  """Distribution computed over a tuning dataset.

  This data type is not supported in Gemini API.
  """

  buckets: Optional[list[DatasetDistributionDistributionBucketDict]]
  """Output only. Defines the histogram bucket."""

  max: Optional[float]
  """Output only. The maximum of the population values."""

  mean: Optional[float]
  """Output only. The arithmetic mean of the values in the population."""

  median: Optional[float]
  """Output only. The median of the values in the population."""

  min: Optional[float]
  """Output only. The minimum of the population values."""

  p5: Optional[float]
  """Output only. The 5th percentile of the values in the population."""

  p95: Optional[float]
  """Output only. The 95th percentile of the values in the population."""

  sum: Optional[float]
  """Output only. Sum of a given population of values."""


DatasetDistributionOrDict = Union[DatasetDistribution, DatasetDistributionDict]


class DatasetStats(_common.BaseModel):
  """Statistics computed over a tuning dataset.

  This data type is not supported in Gemini API.
  """

  total_billable_character_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of billable characters in the tuning dataset.""",
  )
  total_tuning_character_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of tuning characters in the tuning dataset.""",
  )
  tuning_dataset_example_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of examples in the tuning dataset.""",
  )
  tuning_step_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of tuning steps for this Tuning Job.""",
  )
  user_dataset_examples: Optional[list[Content]] = Field(
      default=None,
      description="""Output only. Sample user messages in the training dataset uri.""",
  )
  user_input_token_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user input tokens.""",
  )
  user_message_per_example_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for the messages per example.""",
  )
  user_output_token_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user output tokens.""",
  )


class DatasetStatsDict(TypedDict, total=False):
  """Statistics computed over a tuning dataset.

  This data type is not supported in Gemini API.
  """

  total_billable_character_count: Optional[int]
  """Output only. Number of billable characters in the tuning dataset."""

  total_tuning_character_count: Optional[int]
  """Output only. Number of tuning characters in the tuning dataset."""

  tuning_dataset_example_count: Optional[int]
  """Output only. Number of examples in the tuning dataset."""

  tuning_step_count: Optional[int]
  """Output only. Number of tuning steps for this Tuning Job."""

  user_dataset_examples: Optional[list[ContentDict]]
  """Output only. Sample user messages in the training dataset uri."""

  user_input_token_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for the user input tokens."""

  user_message_per_example_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for the messages per example."""

  user_output_token_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for the user output tokens."""


DatasetStatsOrDict = Union[DatasetStats, DatasetStatsDict]


class DistillationDataStats(_common.BaseModel):
  """Statistics computed for datasets used for distillation.

  This data type is not supported in Gemini API.
  """

  training_dataset_stats: Optional[DatasetStats] = Field(
      default=None,
      description="""Output only. Statistics computed for the training dataset.""",
  )


class DistillationDataStatsDict(TypedDict, total=False):
  """Statistics computed for datasets used for distillation.

  This data type is not supported in Gemini API.
  """

  training_dataset_stats: Optional[DatasetStatsDict]
  """Output only. Statistics computed for the training dataset."""


DistillationDataStatsOrDict = Union[
    DistillationDataStats, DistillationDataStatsDict
]


class GeminiPreferenceExampleCompletion(_common.BaseModel):
  """Completion and its preference score.

  This data type is not supported in Gemini API.
  """

  completion: Optional[Content] = Field(
      default=None,
      description="""Single turn completion for the given prompt.""",
  )
  score: Optional[float] = Field(
      default=None, description="""The score for the given completion."""
  )


class GeminiPreferenceExampleCompletionDict(TypedDict, total=False):
  """Completion and its preference score.

  This data type is not supported in Gemini API.
  """

  completion: Optional[ContentDict]
  """Single turn completion for the given prompt."""

  score: Optional[float]
  """The score for the given completion."""


GeminiPreferenceExampleCompletionOrDict = Union[
    GeminiPreferenceExampleCompletion, GeminiPreferenceExampleCompletionDict
]


class GeminiPreferenceExample(_common.BaseModel):
  """Input example for preference optimization.

  This data type is not supported in Gemini API.
  """

  completions: Optional[list[GeminiPreferenceExampleCompletion]] = Field(
      default=None, description="""List of completions for a given prompt."""
  )
  contents: Optional[list[Content]] = Field(
      default=None,
      description="""Multi-turn contents that represents the Prompt.""",
  )


class GeminiPreferenceExampleDict(TypedDict, total=False):
  """Input example for preference optimization.

  This data type is not supported in Gemini API.
  """

  completions: Optional[list[GeminiPreferenceExampleCompletionDict]]
  """List of completions for a given prompt."""

  contents: Optional[list[ContentDict]]
  """Multi-turn contents that represents the Prompt."""


GeminiPreferenceExampleOrDict = Union[
    GeminiPreferenceExample, GeminiPreferenceExampleDict
]


class PreferenceOptimizationDataStats(_common.BaseModel):
  """Statistics computed for datasets used for preference optimization.

  This data type is not supported in Gemini API.
  """

  score_variance_per_example_distribution: Optional[DatasetDistribution] = (
      Field(
          default=None,
          description="""Output only. Dataset distributions for scores variance per example.""",
      )
  )
  scores_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for scores.""",
  )
  total_billable_token_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of billable tokens in the tuning dataset.""",
  )
  tuning_dataset_example_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of examples in the tuning dataset.""",
  )
  tuning_step_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of tuning steps for this Tuning Job.""",
  )
  user_dataset_examples: Optional[list[GeminiPreferenceExample]] = Field(
      default=None,
      description="""Output only. Sample user examples in the training dataset.""",
  )
  user_input_token_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user input tokens.""",
  )
  user_output_token_distribution: Optional[DatasetDistribution] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user output tokens.""",
  )


class PreferenceOptimizationDataStatsDict(TypedDict, total=False):
  """Statistics computed for datasets used for preference optimization.

  This data type is not supported in Gemini API.
  """

  score_variance_per_example_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for scores variance per example."""

  scores_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for scores."""

  total_billable_token_count: Optional[int]
  """Output only. Number of billable tokens in the tuning dataset."""

  tuning_dataset_example_count: Optional[int]
  """Output only. Number of examples in the tuning dataset."""

  tuning_step_count: Optional[int]
  """Output only. Number of tuning steps for this Tuning Job."""

  user_dataset_examples: Optional[list[GeminiPreferenceExampleDict]]
  """Output only. Sample user examples in the training dataset."""

  user_input_token_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for the user input tokens."""

  user_output_token_distribution: Optional[DatasetDistributionDict]
  """Output only. Dataset distributions for the user output tokens."""


PreferenceOptimizationDataStatsOrDict = Union[
    PreferenceOptimizationDataStats, PreferenceOptimizationDataStatsDict
]


class SupervisedTuningDatasetDistributionDatasetBucket(_common.BaseModel):
  """Dataset bucket used to create a histogram for the distribution given a population of values.

  This data type is not supported in Gemini API.
  """

  count: Optional[float] = Field(
      default=None,
      description="""Output only. Number of values in the bucket.""",
  )
  left: Optional[float] = Field(
      default=None, description="""Output only. Left bound of the bucket."""
  )
  right: Optional[float] = Field(
      default=None, description="""Output only. Right bound of the bucket."""
  )


class SupervisedTuningDatasetDistributionDatasetBucketDict(
    TypedDict, total=False
):
  """Dataset bucket used to create a histogram for the distribution given a population of values.

  This data type is not supported in Gemini API.
  """

  count: Optional[float]
  """Output only. Number of values in the bucket."""

  left: Optional[float]
  """Output only. Left bound of the bucket."""

  right: Optional[float]
  """Output only. Right bound of the bucket."""


SupervisedTuningDatasetDistributionDatasetBucketOrDict = Union[
    SupervisedTuningDatasetDistributionDatasetBucket,
    SupervisedTuningDatasetDistributionDatasetBucketDict,
]


class SupervisedTuningDatasetDistribution(_common.BaseModel):
  """Dataset distribution for Supervised Tuning.

  This data type is not supported in Gemini API.
  """

  billable_sum: Optional[int] = Field(
      default=None,
      description="""Output only. Sum of a given population of values that are billable.""",
  )
  buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucket]] = (
      Field(
          default=None,
          description="""Output only. Defines the histogram bucket.""",
      )
  )
  max: Optional[float] = Field(
      default=None,
      description="""Output only. The maximum of the population values.""",
  )
  mean: Optional[float] = Field(
      default=None,
      description="""Output only. The arithmetic mean of the values in the population.""",
  )
  median: Optional[float] = Field(
      default=None,
      description="""Output only. The median of the values in the population.""",
  )
  min: Optional[float] = Field(
      default=None,
      description="""Output only. The minimum of the population values.""",
  )
  p5: Optional[float] = Field(
      default=None,
      description="""Output only. The 5th percentile of the values in the population.""",
  )
  p95: Optional[float] = Field(
      default=None,
      description="""Output only. The 95th percentile of the values in the population.""",
  )
  sum: Optional[int] = Field(
      default=None,
      description="""Output only. Sum of a given population of values.""",
  )


class SupervisedTuningDatasetDistributionDict(TypedDict, total=False):
  """Dataset distribution for Supervised Tuning.

  This data type is not supported in Gemini API.
  """

  billable_sum: Optional[int]
  """Output only. Sum of a given population of values that are billable."""

  buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucketDict]]
  """Output only. Defines the histogram bucket."""

  max: Optional[float]
  """Output only. The maximum of the population values."""

  mean: Optional[float]
  """Output only. The arithmetic mean of the values in the population."""

  median: Optional[float]
  """Output only. The median of the values in the population."""

  min: Optional[float]
  """Output only. The minimum of the population values."""

  p5: Optional[float]
  """Output only. The 5th percentile of the values in the population."""

  p95: Optional[float]
  """Output only. The 95th percentile of the values in the population."""

  sum: Optional[int]
  """Output only. Sum of a given population of values."""


SupervisedTuningDatasetDistributionOrDict = Union[
    SupervisedTuningDatasetDistribution, SupervisedTuningDatasetDistributionDict
]


class SupervisedTuningDataStats(_common.BaseModel):
  """Tuning data statistics for Supervised Tuning.

  This data type is not supported in Gemini API.
  """

  dropped_example_reasons: Optional[list[str]] = Field(
      default=None,
      description="""Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped.""",
  )
  total_billable_character_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of billable characters in the tuning dataset.""",
  )
  total_billable_token_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of billable tokens in the tuning dataset.""",
  )
  total_truncated_example_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc.""",
  )
  total_tuning_character_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of tuning characters in the tuning dataset.""",
  )
  truncated_example_indices: Optional[list[int]] = Field(
      default=None,
      description="""Output only. A partial sample of the indices (starting from 1) of the dropped examples.""",
  )
  tuning_dataset_example_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of examples in the tuning dataset.""",
  )
  tuning_step_count: Optional[int] = Field(
      default=None,
      description="""Output only. Number of tuning steps for this Tuning Job.""",
  )
  user_dataset_examples: Optional[list[Content]] = Field(
      default=None,
      description="""Output only. Sample user messages in the training dataset uri.""",
  )
  user_input_token_distribution: Optional[
      SupervisedTuningDatasetDistribution
  ] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user input tokens.""",
  )
  user_message_per_example_distribution: Optional[
      SupervisedTuningDatasetDistribution
  ] = Field(
      default=None,
      description="""Output only. Dataset distributions for the messages per example.""",
  )
  user_output_token_distribution: Optional[
      SupervisedTuningDatasetDistribution
  ] = Field(
      default=None,
      description="""Output only. Dataset distributions for the user output tokens.""",
  )


class SupervisedTuningDataStatsDict(TypedDict, total=False):
  """Tuning data statistics for Supervised Tuning.

  This data type is not supported in Gemini API.
  """

  dropped_example_reasons: Optional[list[str]]
  """Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped."""

  total_billable_character_count: Optional[int]
  """Output only. Number of billable characters in the tuning dataset."""

  total_billable_token_count: Optional[int]
  """Output only. Number of billable tokens in the tuning dataset."""

  total_truncated_example_count: Optional[int]
  """Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc."""

  total_tuning_character_count: Optional[int]
  """Output only. Number of tuning characters in the tuning dataset."""

  truncated_example_indices: Optional[list[int]]
  """Output only. A partial sample of the indices (starting from 1) of the dropped examples."""

  tuning_dataset_example_count: Optional[int]
  """Output only. Number of examples in the tuning dataset."""

  tuning_step_count: Optional[int]
  """Output only. Number of tuning steps for this Tuning Job."""

  user_dataset_examples: Optional[list[ContentDict]]
  """Output only. Sample user messages in the training dataset uri."""

  user_input_token_distribution: Optional[
      SupervisedTuningDatasetDistributionDict
  ]
  """Output only. Dataset distributions for the user input tokens."""

  user_message_per_example_distribution: Optional[
      SupervisedTuningDatasetDistributionDict
  ]
  """Output only. Dataset distributions for the messages per example."""

  user_output_token_distribution: Optional[
      SupervisedTuningDatasetDistributionDict
  ]
  """Output only. Dataset distributions for the user output tokens."""


SupervisedTuningDataStatsOrDict = Union[
    SupervisedTuningDataStats, SupervisedTuningDataStatsDict
]


class TuningDataStats(_common.BaseModel):
  """The tuning data statistic values for TuningJob.

  This data type is not supported in Gemini API.
  """

  distillation_data_stats: Optional[DistillationDataStats] = Field(
      default=None, description="""Output only. Statistics for distillation."""
  )
  preference_optimization_data_stats: Optional[
      PreferenceOptimizationDataStats
  ] = Field(
      default=None,
      description="""Output only. Statistics for preference optimization.""",
  )
  supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field(
      default=None, description="""The SFT Tuning data stats."""
  )


class TuningDataStatsDict(TypedDict, total=False):
  """The tuning data statistic values for TuningJob.

  This data type is not supported in Gemini API.
  """

  distillation_data_stats: Optional[DistillationDataStatsDict]
  """Output only. Statistics for distillation."""

  preference_optimization_data_stats: Optional[
      PreferenceOptimizationDataStatsDict
  ]
  """Output only. Statistics for preference optimization."""

  supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict]
  """The SFT Tuning data stats."""


TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict]


class EncryptionSpec(_common.BaseModel):
  """Represents a customer-managed encryption key spec that can be applied to a top-level resource.

  This data type is not supported in Gemini API.
  """

  kms_key_name: Optional[str] = Field(
      default=None,
      description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""",
  )


class EncryptionSpecDict(TypedDict, total=False):
  """Represents a customer-managed encryption key spec that can be applied to a top-level resource.

  This data type is not supported in Gemini API.
  """

  kms_key_name: Optional[str]
  """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created."""


EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict]


class PartnerModelTuningSpec(_common.BaseModel):
  """Tuning spec for Partner models.

  This data type is not supported in Gemini API.
  """

  hyper_parameters: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""",
  )
  training_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
  )
  validation_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
  )


class PartnerModelTuningSpecDict(TypedDict, total=False):
  """Tuning spec for Partner models.

  This data type is not supported in Gemini API.
  """

  hyper_parameters: Optional[dict[str, Any]]
  """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model."""

  training_dataset_uri: Optional[str]
  """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""

  validation_dataset_uri: Optional[str]
  """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""


PartnerModelTuningSpecOrDict = Union[
    PartnerModelTuningSpec, PartnerModelTuningSpecDict
]


class VeoHyperParameters(_common.BaseModel):
  """Hyperparameters for Veo. This data type is not supported in Gemini API."""

  epoch_count: Optional[int] = Field(
      default=None,
      description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
  )
  learning_rate_multiplier: Optional[float] = Field(
      default=None,
      description="""Optional. Multiplier for adjusting the default learning rate.""",
  )
  tuning_task: Optional[TuningTask] = Field(
      default=None,
      description="""Optional. The tuning task. Either I2V or T2V.""",
  )


class VeoHyperParametersDict(TypedDict, total=False):
  """Hyperparameters for Veo. This data type is not supported in Gemini API."""

  epoch_count: Optional[int]
  """Optional. Number of complete passes the model makes over the entire training dataset during training."""

  learning_rate_multiplier: Optional[float]
  """Optional. Multiplier for adjusting the default learning rate."""

  tuning_task: Optional[TuningTask]
  """Optional. The tuning task. Either I2V or T2V."""


VeoHyperParametersOrDict = Union[VeoHyperParameters, VeoHyperParametersDict]


class VeoTuningSpec(_common.BaseModel):
  """Tuning Spec for Veo Model Tuning.

  This data type is not supported in Gemini API.
  """

  hyper_parameters: Optional[VeoHyperParameters] = Field(
      default=None, description="""Optional. Hyperparameters for Veo."""
  )
  training_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
  )
  validation_dataset_uri: Optional[str] = Field(
      default=None,
      description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
  )


class VeoTuningSpecDict(TypedDict, total=False):
  """Tuning Spec for Veo Model Tuning.

  This data type is not supported in Gemini API.
  """

  hyper_parameters: Optional[VeoHyperParametersDict]
  """Optional. Hyperparameters for Veo."""

  training_dataset_uri: Optional[str]
  """Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""

  validation_dataset_uri: Optional[str]
  """Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""


VeoTuningSpecOrDict = Union[VeoTuningSpec, VeoTuningSpecDict]


class TuningJob(_common.BaseModel):
  """A tuning job."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  name: Optional[str] = Field(
      default=None,
      description="""Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`""",
  )
  state: Optional[JobState] = Field(
      default=None,
      description="""Output only. The detailed state of the job.""",
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. Time when the TuningJob was created.""",
  )
  start_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state.""",
  )
  end_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`.""",
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. Time when the TuningJob was most recently updated.""",
  )
  error: Optional[GoogleRpcStatus] = Field(
      default=None,
      description="""Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.""",
  )
  description: Optional[str] = Field(
      default=None,
      description="""Optional. The description of the TuningJob.""",
  )
  base_model: Optional[str] = Field(
      default=None,
      description="""The base model that is being tuned. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models).""",
  )
  tuned_model: Optional[TunedModel] = Field(
      default=None,
      description="""Output only. The tuned model resources associated with this TuningJob.""",
  )
  pre_tuned_model: Optional[PreTunedModel] = Field(
      default=None, description="""The pre-tuned model for continuous tuning."""
  )
  supervised_tuning_spec: Optional[SupervisedTuningSpec] = Field(
      default=None, description="""Tuning Spec for Supervised Fine Tuning."""
  )
  preference_optimization_spec: Optional[PreferenceOptimizationSpec] = Field(
      default=None, description="""Tuning Spec for Preference Optimization."""
  )
  tuning_data_stats: Optional[TuningDataStats] = Field(
      default=None,
      description="""Output only. The tuning data statistics associated with this TuningJob.""",
  )
  encryption_spec: Optional[EncryptionSpec] = Field(
      default=None,
      description="""Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key.""",
  )
  partner_model_tuning_spec: Optional[PartnerModelTuningSpec] = Field(
      default=None,
      description="""Tuning Spec for open sourced and third party Partner models.""",
  )
  evaluation_config: Optional[EvaluationConfig] = Field(
      default=None, description="""Evaluation config for the tuning job."""
  )
  custom_base_model: Optional[str] = Field(
      default=None,
      description="""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models.""",
  )
  experiment: Optional[str] = Field(
      default=None,
      description="""Output only. The Experiment associated with this TuningJob.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""",
  )
  output_uri: Optional[str] = Field(
      default=None,
      description="""Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models.""",
  )
  pipeline_job: Optional[str] = Field(
      default=None,
      description="""Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`.""",
  )
  service_account: Optional[str] = Field(
      default=None,
      description="""The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account.""",
  )
  tuned_model_display_name: Optional[str] = Field(
      default=None,
      description="""Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. For continuous tuning, tuned_model_display_name will by default use the same display name as the pre-tuned model. If a new display name is provided, the tuning job will create a new model instead of a new version.""",
  )
  veo_tuning_spec: Optional[VeoTuningSpec] = Field(
      default=None, description="""Tuning Spec for Veo Tuning."""
  )

  @property
  def has_ended(self) -> bool:
    """Whether the tuning job has ended."""
    return self.state in JOB_STATES_ENDED

  @property
  def has_succeeded(self) -> bool:
    """Whether the tuning job has succeeded."""
    return self.state in JOB_STATES_SUCCEEDED


class TuningJobDict(TypedDict, total=False):
  """A tuning job."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  name: Optional[str]
  """Output only. Identifier. Resource name of a TuningJob. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}`"""

  state: Optional[JobState]
  """Output only. The detailed state of the job."""

  create_time: Optional[datetime.datetime]
  """Output only. Time when the TuningJob was created."""

  start_time: Optional[datetime.datetime]
  """Output only. Time when the TuningJob for the first time entered the `JOB_STATE_RUNNING` state."""

  end_time: Optional[datetime.datetime]
  """Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`."""

  update_time: Optional[datetime.datetime]
  """Output only. Time when the TuningJob was most recently updated."""

  error: Optional[GoogleRpcStatusDict]
  """Output only. Only populated when job's state is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`."""

  description: Optional[str]
  """Optional. The description of the TuningJob."""

  base_model: Optional[str]
  """The base model that is being tuned. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models)."""

  tuned_model: Optional[TunedModelDict]
  """Output only. The tuned model resources associated with this TuningJob."""

  pre_tuned_model: Optional[PreTunedModelDict]
  """The pre-tuned model for continuous tuning."""

  supervised_tuning_spec: Optional[SupervisedTuningSpecDict]
  """Tuning Spec for Supervised Fine Tuning."""

  preference_optimization_spec: Optional[PreferenceOptimizationSpecDict]
  """Tuning Spec for Preference Optimization."""

  tuning_data_stats: Optional[TuningDataStatsDict]
  """Output only. The tuning data statistics associated with this TuningJob."""

  encryption_spec: Optional[EncryptionSpecDict]
  """Customer-managed encryption key options for a TuningJob. If this is set, then all resources created by the TuningJob will be encrypted with the provided encryption key."""

  partner_model_tuning_spec: Optional[PartnerModelTuningSpecDict]
  """Tuning Spec for open sourced and third party Partner models."""

  evaluation_config: Optional[EvaluationConfigDict]
  """Evaluation config for the tuning job."""

  custom_base_model: Optional[str]
  """Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models."""

  experiment: Optional[str]
  """Output only. The Experiment associated with this TuningJob."""

  labels: Optional[dict[str, str]]
  """Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels."""

  output_uri: Optional[str]
  """Optional. Cloud Storage path to the directory where tuning job outputs are written to. This field is only available and required for open source models."""

  pipeline_job: Optional[str]
  """Output only. The resource name of the PipelineJob associated with the TuningJob. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`."""

  service_account: Optional[str]
  """The service account that the tuningJob workload runs as. If not specified, the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. See https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent Users starting the pipeline must have the `iam.serviceAccounts.actAs` permission on this service account."""

  tuned_model_display_name: Optional[str]
  """Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. For continuous tuning, tuned_model_display_name will by default use the same display name as the pre-tuned model. If a new display name is provided, the tuning job will create a new model instead of a new version."""

  veo_tuning_spec: Optional[VeoTuningSpecDict]
  """Tuning Spec for Veo Tuning."""


TuningJobOrDict = Union[TuningJob, TuningJobDict]


class ListTuningJobsConfig(_common.BaseModel):
  """Configuration for the list tuning jobs method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")
  filter: Optional[str] = Field(default=None, description="""""")


class ListTuningJobsConfigDict(TypedDict, total=False):
  """Configuration for the list tuning jobs method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""

  filter: Optional[str]
  """"""


ListTuningJobsConfigOrDict = Union[
    ListTuningJobsConfig, ListTuningJobsConfigDict
]


class _ListTuningJobsParameters(_common.BaseModel):
  """Parameters for the list tuning jobs method."""

  config: Optional[ListTuningJobsConfig] = Field(
      default=None, description=""""""
  )


class _ListTuningJobsParametersDict(TypedDict, total=False):
  """Parameters for the list tuning jobs method."""

  config: Optional[ListTuningJobsConfigDict]
  """"""


_ListTuningJobsParametersOrDict = Union[
    _ListTuningJobsParameters, _ListTuningJobsParametersDict
]


class ListTuningJobsResponse(_common.BaseModel):
  """Response for the list tuning jobs method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(
      default=None,
      description="""A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page.""",
  )
  tuning_jobs: Optional[list[TuningJob]] = Field(
      default=None, description="""List of TuningJobs in the requested page."""
  )


class ListTuningJobsResponseDict(TypedDict, total=False):
  """Response for the list tuning jobs method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page."""

  tuning_jobs: Optional[list[TuningJobDict]]
  """List of TuningJobs in the requested page."""


ListTuningJobsResponseOrDict = Union[
    ListTuningJobsResponse, ListTuningJobsResponseDict
]


class CancelTuningJobConfig(_common.BaseModel):
  """Optional parameters for tunings.cancel method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class CancelTuningJobConfigDict(TypedDict, total=False):
  """Optional parameters for tunings.cancel method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


CancelTuningJobConfigOrDict = Union[
    CancelTuningJobConfig, CancelTuningJobConfigDict
]


class _CancelTuningJobParameters(_common.BaseModel):
  """Parameters for the cancel method."""

  name: Optional[str] = Field(
      default=None, description="""The resource name of the tuning job."""
  )
  config: Optional[CancelTuningJobConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _CancelTuningJobParametersDict(TypedDict, total=False):
  """Parameters for the cancel method."""

  name: Optional[str]
  """The resource name of the tuning job."""

  config: Optional[CancelTuningJobConfigDict]
  """Optional parameters for the request."""


_CancelTuningJobParametersOrDict = Union[
    _CancelTuningJobParameters, _CancelTuningJobParametersDict
]


class CancelTuningJobResponse(_common.BaseModel):
  """Empty response for tunings.cancel method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class CancelTuningJobResponseDict(TypedDict, total=False):
  """Empty response for tunings.cancel method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


CancelTuningJobResponseOrDict = Union[
    CancelTuningJobResponse, CancelTuningJobResponseDict
]


class TuningExample(_common.BaseModel):
  """A single example for tuning.

  This data type is not supported in Vertex AI.
  """

  output: Optional[str] = Field(
      default=None, description="""Required. The expected model output."""
  )
  text_input: Optional[str] = Field(
      default=None, description="""Optional. Text model input."""
  )


class TuningExampleDict(TypedDict, total=False):
  """A single example for tuning.

  This data type is not supported in Vertex AI.
  """

  output: Optional[str]
  """Required. The expected model output."""

  text_input: Optional[str]
  """Optional. Text model input."""


TuningExampleOrDict = Union[TuningExample, TuningExampleDict]


class TuningDataset(_common.BaseModel):
  """Supervised fine-tuning training dataset."""

  gcs_uri: Optional[str] = Field(
      default=None,
      description="""GCS URI of the file containing training dataset in JSONL format.""",
  )
  vertex_dataset_resource: Optional[str] = Field(
      default=None,
      description="""The resource name of the Vertex Multimodal Dataset that is used as training dataset. Example: 'projects/my-project-id-or-number/locations/my-location/datasets/my-dataset-id'.""",
  )
  examples: Optional[list[TuningExample]] = Field(
      default=None,
      description="""Inline examples with simple input/output text.""",
  )


class TuningDatasetDict(TypedDict, total=False):
  """Supervised fine-tuning training dataset."""

  gcs_uri: Optional[str]
  """GCS URI of the file containing training dataset in JSONL format."""

  vertex_dataset_resource: Optional[str]
  """The resource name of the Vertex Multimodal Dataset that is used as training dataset. Example: 'projects/my-project-id-or-number/locations/my-location/datasets/my-dataset-id'."""

  examples: Optional[list[TuningExampleDict]]
  """Inline examples with simple input/output text."""


TuningDatasetOrDict = Union[TuningDataset, TuningDatasetDict]


class TuningValidationDataset(_common.BaseModel):

  gcs_uri: Optional[str] = Field(
      default=None,
      description="""GCS URI of the file containing validation dataset in JSONL format.""",
  )
  vertex_dataset_resource: Optional[str] = Field(
      default=None,
      description="""The resource name of the Vertex Multimodal Dataset that is used as validation dataset. Example: 'projects/my-project-id-or-number/locations/my-location/datasets/my-dataset-id'.""",
  )


class TuningValidationDatasetDict(TypedDict, total=False):

  gcs_uri: Optional[str]
  """GCS URI of the file containing validation dataset in JSONL format."""

  vertex_dataset_resource: Optional[str]
  """The resource name of the Vertex Multimodal Dataset that is used as validation dataset. Example: 'projects/my-project-id-or-number/locations/my-location/datasets/my-dataset-id'."""


TuningValidationDatasetOrDict = Union[
    TuningValidationDataset, TuningValidationDatasetDict
]


class CreateTuningJobConfig(_common.BaseModel):
  """Fine-tuning job creation request - optional fields."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  method: Optional[TuningMethod] = Field(
      default=None,
      description="""The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING). If not set, the default method (SFT) will be used.""",
  )
  validation_dataset: Optional[TuningValidationDataset] = Field(
      default=None,
      description="""Validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
  )
  tuned_model_display_name: Optional[str] = Field(
      default=None,
      description="""The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.""",
  )
  description: Optional[str] = Field(
      default=None, description="""The description of the TuningJob"""
  )
  epoch_count: Optional[int] = Field(
      default=None,
      description="""Number of complete passes the model makes over the entire training dataset during training.""",
  )
  learning_rate_multiplier: Optional[float] = Field(
      default=None,
      description="""Multiplier for adjusting the default learning rate.""",
  )
  export_last_checkpoint_only: Optional[bool] = Field(
      default=None,
      description="""If set to true, disable intermediate checkpoints and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints.""",
  )
  pre_tuned_model_checkpoint_id: Optional[str] = Field(
      default=None,
      description="""The optional checkpoint id of the pre-tuned model to use for tuning, if applicable.""",
  )
  adapter_size: Optional[AdapterSize] = Field(
      default=None, description="""Adapter size for tuning."""
  )
  batch_size: Optional[int] = Field(
      default=None,
      description="""The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples.""",
  )
  learning_rate: Optional[float] = Field(
      default=None,
      description="""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""",
  )
  evaluation_config: Optional[EvaluationConfig] = Field(
      default=None, description="""Evaluation config for the tuning job."""
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""",
  )
  beta: Optional[float] = Field(
      default=None,
      description="""Weight for KL Divergence regularization, Preference Optimization tuning only.""",
  )


class CreateTuningJobConfigDict(TypedDict, total=False):
  """Fine-tuning job creation request - optional fields."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  method: Optional[TuningMethod]
  """The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING). If not set, the default method (SFT) will be used."""

  validation_dataset: Optional[TuningValidationDatasetDict]
  """Validation dataset for tuning. The dataset must be formatted as a JSONL file."""

  tuned_model_display_name: Optional[str]
  """The display name of the tuned Model. The name can be up to 128 characters long and can consist of any UTF-8 characters."""

  description: Optional[str]
  """The description of the TuningJob"""

  epoch_count: Optional[int]
  """Number of complete passes the model makes over the entire training dataset during training."""

  learning_rate_multiplier: Optional[float]
  """Multiplier for adjusting the default learning rate."""

  export_last_checkpoint_only: Optional[bool]
  """If set to true, disable intermediate checkpoints and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints."""

  pre_tuned_model_checkpoint_id: Optional[str]
  """The optional checkpoint id of the pre-tuned model to use for tuning, if applicable."""

  adapter_size: Optional[AdapterSize]
  """Adapter size for tuning."""

  batch_size: Optional[int]
  """The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples."""

  learning_rate: Optional[float]
  """The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples."""

  evaluation_config: Optional[EvaluationConfigDict]
  """Evaluation config for the tuning job."""

  labels: Optional[dict[str, str]]
  """Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels."""

  beta: Optional[float]
  """Weight for KL Divergence regularization, Preference Optimization tuning only."""


CreateTuningJobConfigOrDict = Union[
    CreateTuningJobConfig, CreateTuningJobConfigDict
]


class _CreateTuningJobParametersPrivate(_common.BaseModel):
  """Fine-tuning job creation parameters - optional fields."""

  base_model: Optional[str] = Field(
      default=None,
      description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
  )
  pre_tuned_model: Optional[PreTunedModel] = Field(
      default=None, description="""The PreTunedModel that is being tuned."""
  )
  training_dataset: Optional[TuningDataset] = Field(
      default=None,
      description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
  )
  config: Optional[CreateTuningJobConfig] = Field(
      default=None, description="""Configuration for the tuning job."""
  )


class _CreateTuningJobParametersPrivateDict(TypedDict, total=False):
  """Fine-tuning job creation parameters - optional fields."""

  base_model: Optional[str]
  """The base model that is being tuned, e.g., "gemini-2.5-flash"."""

  pre_tuned_model: Optional[PreTunedModelDict]
  """The PreTunedModel that is being tuned."""

  training_dataset: Optional[TuningDatasetDict]
  """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""

  config: Optional[CreateTuningJobConfigDict]
  """Configuration for the tuning job."""


_CreateTuningJobParametersPrivateOrDict = Union[
    _CreateTuningJobParametersPrivate, _CreateTuningJobParametersPrivateDict
]


class TuningOperation(_common.BaseModel):
  """A long-running operation."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  name: Optional[str] = Field(
      default=None,
      description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""",
  )
  metadata: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata.  Any method that returns a long-running operation should document the metadata type, if any.""",
  )
  done: Optional[bool] = Field(
      default=None,
      description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""",
  )
  error: Optional[dict[str, Any]] = Field(
      default=None,
      description="""The error result of the operation in case of failure or cancellation.""",
  )


class TuningOperationDict(TypedDict, total=False):
  """A long-running operation."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  name: Optional[str]
  """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""

  metadata: Optional[dict[str, Any]]
  """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata.  Any method that returns a long-running operation should document the metadata type, if any."""

  done: Optional[bool]
  """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."""

  error: Optional[dict[str, Any]]
  """The error result of the operation in case of failure or cancellation."""


TuningOperationOrDict = Union[TuningOperation, TuningOperationDict]


class CreateCachedContentConfig(_common.BaseModel):
  """Optional configuration for cached content creation."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  ttl: Optional[str] = Field(
      default=None,
      description="""The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s".""",
  )
  expire_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z.""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The user-generated meaningful display name of the cached content.
      """,
  )
  contents: Optional[ContentListUnion] = Field(
      default=None,
      description="""The content to cache.
      """,
  )
  system_instruction: Optional[ContentUnion] = Field(
      default=None,
      description="""Developer set system instruction.
      """,
  )
  tools: Optional[list[Tool]] = Field(
      default=None,
      description="""A list of `Tools` the model may use to generate the next response.
      """,
  )
  tool_config: Optional[ToolConfig] = Field(
      default=None,
      description="""Configuration for the tools to use. This config is shared for all tools.
      """,
  )
  kms_key_name: Optional[str] = Field(
      default=None,
      description="""The Cloud KMS resource identifier of the customer managed
      encryption key used to protect a resource.
      The key needs to be in the same region as where the compute resource is
      created. See
      https://cloud.google.com/vertex-ai/docs/general/cmek for more
      details. If this is set, then all created CachedContent objects
      will be encrypted with the provided encryption key.
      Allowed formats: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
      """,
  )


class CreateCachedContentConfigDict(TypedDict, total=False):
  """Optional configuration for cached content creation."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  ttl: Optional[str]
  """The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s"."""

  expire_time: Optional[datetime.datetime]
  """Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z."""

  display_name: Optional[str]
  """The user-generated meaningful display name of the cached content.
      """

  contents: Optional[ContentListUnionDict]
  """The content to cache.
      """

  system_instruction: Optional[ContentUnionDict]
  """Developer set system instruction.
      """

  tools: Optional[list[ToolDict]]
  """A list of `Tools` the model may use to generate the next response.
      """

  tool_config: Optional[ToolConfigDict]
  """Configuration for the tools to use. This config is shared for all tools.
      """

  kms_key_name: Optional[str]
  """The Cloud KMS resource identifier of the customer managed
      encryption key used to protect a resource.
      The key needs to be in the same region as where the compute resource is
      created. See
      https://cloud.google.com/vertex-ai/docs/general/cmek for more
      details. If this is set, then all created CachedContent objects
      will be encrypted with the provided encryption key.
      Allowed formats: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
      """


CreateCachedContentConfigOrDict = Union[
    CreateCachedContentConfig, CreateCachedContentConfigDict
]


class _CreateCachedContentParameters(_common.BaseModel):
  """Parameters for caches.create method."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. Example: gemini-2.0-flash""",
  )
  config: Optional[CreateCachedContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional parameters.
      """,
  )


class _CreateCachedContentParametersDict(TypedDict, total=False):
  """Parameters for caches.create method."""

  model: Optional[str]
  """ID of the model to use. Example: gemini-2.0-flash"""

  config: Optional[CreateCachedContentConfigDict]
  """Configuration that contains optional parameters.
      """


_CreateCachedContentParametersOrDict = Union[
    _CreateCachedContentParameters, _CreateCachedContentParametersDict
]


class CachedContentUsageMetadata(_common.BaseModel):
  """Metadata on the usage of the cached content."""

  audio_duration_seconds: Optional[int] = Field(
      default=None,
      description="""Duration of audio in seconds. This field is not supported in Gemini API.""",
  )
  image_count: Optional[int] = Field(
      default=None,
      description="""Number of images. This field is not supported in Gemini API.""",
  )
  text_count: Optional[int] = Field(
      default=None,
      description="""Number of text characters. This field is not supported in Gemini API.""",
  )
  total_token_count: Optional[int] = Field(
      default=None,
      description="""Total number of tokens that the cached content consumes.""",
  )
  video_duration_seconds: Optional[int] = Field(
      default=None,
      description="""Duration of video in seconds. This field is not supported in Gemini API.""",
  )


class CachedContentUsageMetadataDict(TypedDict, total=False):
  """Metadata on the usage of the cached content."""

  audio_duration_seconds: Optional[int]
  """Duration of audio in seconds. This field is not supported in Gemini API."""

  image_count: Optional[int]
  """Number of images. This field is not supported in Gemini API."""

  text_count: Optional[int]
  """Number of text characters. This field is not supported in Gemini API."""

  total_token_count: Optional[int]
  """Total number of tokens that the cached content consumes."""

  video_duration_seconds: Optional[int]
  """Duration of video in seconds. This field is not supported in Gemini API."""


CachedContentUsageMetadataOrDict = Union[
    CachedContentUsageMetadata, CachedContentUsageMetadataDict
]


class CachedContent(_common.BaseModel):
  """A resource used in LLM queries for users to explicitly specify what to cache."""

  name: Optional[str] = Field(
      default=None,
      description="""The server-generated resource name of the cached content.""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The user-generated meaningful display name of the cached content.""",
  )
  model: Optional[str] = Field(
      default=None,
      description="""The name of the publisher model to use for cached content.""",
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None, description="""Creation time of the cache entry."""
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""When the cache entry was last updated in UTC time.""",
  )
  expire_time: Optional[datetime.datetime] = Field(
      default=None, description="""Expiration time of the cached content."""
  )
  usage_metadata: Optional[CachedContentUsageMetadata] = Field(
      default=None,
      description="""Metadata on the usage of the cached content.""",
  )


class CachedContentDict(TypedDict, total=False):
  """A resource used in LLM queries for users to explicitly specify what to cache."""

  name: Optional[str]
  """The server-generated resource name of the cached content."""

  display_name: Optional[str]
  """The user-generated meaningful display name of the cached content."""

  model: Optional[str]
  """The name of the publisher model to use for cached content."""

  create_time: Optional[datetime.datetime]
  """Creation time of the cache entry."""

  update_time: Optional[datetime.datetime]
  """When the cache entry was last updated in UTC time."""

  expire_time: Optional[datetime.datetime]
  """Expiration time of the cached content."""

  usage_metadata: Optional[CachedContentUsageMetadataDict]
  """Metadata on the usage of the cached content."""


CachedContentOrDict = Union[CachedContent, CachedContentDict]


class GetCachedContentConfig(_common.BaseModel):
  """Optional parameters for caches.get method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetCachedContentConfigDict(TypedDict, total=False):
  """Optional parameters for caches.get method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetCachedContentConfigOrDict = Union[
    GetCachedContentConfig, GetCachedContentConfigDict
]


class _GetCachedContentParameters(_common.BaseModel):
  """Parameters for caches.get method."""

  name: Optional[str] = Field(
      default=None,
      description="""The server-generated resource name of the cached content.
      """,
  )
  config: Optional[GetCachedContentConfig] = Field(
      default=None,
      description="""Optional parameters for the request.
      """,
  )


class _GetCachedContentParametersDict(TypedDict, total=False):
  """Parameters for caches.get method."""

  name: Optional[str]
  """The server-generated resource name of the cached content.
      """

  config: Optional[GetCachedContentConfigDict]
  """Optional parameters for the request.
      """


_GetCachedContentParametersOrDict = Union[
    _GetCachedContentParameters, _GetCachedContentParametersDict
]


class DeleteCachedContentConfig(_common.BaseModel):
  """Optional parameters for caches.delete method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class DeleteCachedContentConfigDict(TypedDict, total=False):
  """Optional parameters for caches.delete method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


DeleteCachedContentConfigOrDict = Union[
    DeleteCachedContentConfig, DeleteCachedContentConfigDict
]


class _DeleteCachedContentParameters(_common.BaseModel):
  """Parameters for caches.delete method."""

  name: Optional[str] = Field(
      default=None,
      description="""The server-generated resource name of the cached content.
      """,
  )
  config: Optional[DeleteCachedContentConfig] = Field(
      default=None,
      description="""Optional parameters for the request.
      """,
  )


class _DeleteCachedContentParametersDict(TypedDict, total=False):
  """Parameters for caches.delete method."""

  name: Optional[str]
  """The server-generated resource name of the cached content.
      """

  config: Optional[DeleteCachedContentConfigDict]
  """Optional parameters for the request.
      """


_DeleteCachedContentParametersOrDict = Union[
    _DeleteCachedContentParameters, _DeleteCachedContentParametersDict
]


class DeleteCachedContentResponse(_common.BaseModel):
  """Empty response for caches.delete method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class DeleteCachedContentResponseDict(TypedDict, total=False):
  """Empty response for caches.delete method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


DeleteCachedContentResponseOrDict = Union[
    DeleteCachedContentResponse, DeleteCachedContentResponseDict
]


class UpdateCachedContentConfig(_common.BaseModel):
  """Optional parameters for caches.update method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  ttl: Optional[str] = Field(
      default=None,
      description="""The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s".""",
  )
  expire_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z.""",
  )


class UpdateCachedContentConfigDict(TypedDict, total=False):
  """Optional parameters for caches.update method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  ttl: Optional[str]
  """The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s"."""

  expire_time: Optional[datetime.datetime]
  """Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z."""


UpdateCachedContentConfigOrDict = Union[
    UpdateCachedContentConfig, UpdateCachedContentConfigDict
]


class _UpdateCachedContentParameters(_common.BaseModel):

  name: Optional[str] = Field(
      default=None,
      description="""The server-generated resource name of the cached content.
      """,
  )
  config: Optional[UpdateCachedContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional parameters.
      """,
  )


class _UpdateCachedContentParametersDict(TypedDict, total=False):

  name: Optional[str]
  """The server-generated resource name of the cached content.
      """

  config: Optional[UpdateCachedContentConfigDict]
  """Configuration that contains optional parameters.
      """


_UpdateCachedContentParametersOrDict = Union[
    _UpdateCachedContentParameters, _UpdateCachedContentParametersDict
]


class ListCachedContentsConfig(_common.BaseModel):
  """Config for caches.list method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")


class ListCachedContentsConfigDict(TypedDict, total=False):
  """Config for caches.list method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""


ListCachedContentsConfigOrDict = Union[
    ListCachedContentsConfig, ListCachedContentsConfigDict
]


class _ListCachedContentsParameters(_common.BaseModel):
  """Parameters for caches.list method."""

  config: Optional[ListCachedContentsConfig] = Field(
      default=None,
      description="""Configuration that contains optional parameters.
      """,
  )


class _ListCachedContentsParametersDict(TypedDict, total=False):
  """Parameters for caches.list method."""

  config: Optional[ListCachedContentsConfigDict]
  """Configuration that contains optional parameters.
      """


_ListCachedContentsParametersOrDict = Union[
    _ListCachedContentsParameters, _ListCachedContentsParametersDict
]


class ListCachedContentsResponse(_common.BaseModel):

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(default=None, description="""""")
  cached_contents: Optional[list[CachedContent]] = Field(
      default=None,
      description="""List of cached contents.
      """,
  )


class ListCachedContentsResponseDict(TypedDict, total=False):

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """"""

  cached_contents: Optional[list[CachedContentDict]]
  """List of cached contents.
      """


ListCachedContentsResponseOrDict = Union[
    ListCachedContentsResponse, ListCachedContentsResponseDict
]


class GetDocumentConfig(_common.BaseModel):
  """Optional Config."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetDocumentConfigDict(TypedDict, total=False):
  """Optional Config."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetDocumentConfigOrDict = Union[GetDocumentConfig, GetDocumentConfigDict]


class _GetDocumentParameters(_common.BaseModel):
  """Parameters for documents.get."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the Document.
    Example: fileSearchStores/file-search-store-foo/documents/documents-bar""",
  )
  config: Optional[GetDocumentConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _GetDocumentParametersDict(TypedDict, total=False):
  """Parameters for documents.get."""

  name: Optional[str]
  """The resource name of the Document.
    Example: fileSearchStores/file-search-store-foo/documents/documents-bar"""

  config: Optional[GetDocumentConfigDict]
  """Optional parameters for the request."""


_GetDocumentParametersOrDict = Union[
    _GetDocumentParameters, _GetDocumentParametersDict
]


class StringList(_common.BaseModel):
  """User provided string values assigned to a single metadata key.

  This data type is not supported in Vertex AI.
  """

  values: Optional[list[str]] = Field(
      default=None,
      description="""The string values of the metadata to store.""",
  )


class StringListDict(TypedDict, total=False):
  """User provided string values assigned to a single metadata key.

  This data type is not supported in Vertex AI.
  """

  values: Optional[list[str]]
  """The string values of the metadata to store."""


StringListOrDict = Union[StringList, StringListDict]


class CustomMetadata(_common.BaseModel):
  """User provided metadata stored as key-value pairs.

  This data type is not supported in Vertex AI.
  """

  key: Optional[str] = Field(
      default=None,
      description="""Required. The key of the metadata to store.""",
  )
  numeric_value: Optional[float] = Field(
      default=None,
      description="""The numeric value of the metadata to store.""",
  )
  string_list_value: Optional[StringList] = Field(
      default=None,
      description="""The StringList value of the metadata to store.""",
  )
  string_value: Optional[str] = Field(
      default=None, description="""The string value of the metadata to store."""
  )


class CustomMetadataDict(TypedDict, total=False):
  """User provided metadata stored as key-value pairs.

  This data type is not supported in Vertex AI.
  """

  key: Optional[str]
  """Required. The key of the metadata to store."""

  numeric_value: Optional[float]
  """The numeric value of the metadata to store."""

  string_list_value: Optional[StringListDict]
  """The StringList value of the metadata to store."""

  string_value: Optional[str]
  """The string value of the metadata to store."""


CustomMetadataOrDict = Union[CustomMetadata, CustomMetadataDict]


class Document(_common.BaseModel):
  """A Document is a collection of Chunks."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the Document.
      Example: fileSearchStores/file-search-store-foo/documents/documents-bar""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The human-readable display name for the Document.""",
  )
  state: Optional[DocumentState] = Field(
      default=None, description="""The current state of the Document."""
  )
  size_bytes: Optional[int] = Field(
      default=None, description="""The size of the Document in bytes."""
  )
  mime_type: Optional[str] = Field(
      default=None, description="""The MIME type of the Document."""
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. The Timestamp of when the `Document` was created.""",
  )
  custom_metadata: Optional[list[CustomMetadata]] = Field(
      default=None,
      description="""Optional. User provided custom metadata stored as key-value pairs used for querying. A `Document` can have a maximum of 20 `CustomMetadata`.""",
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. The Timestamp of when the `Document` was last updated.""",
  )


class DocumentDict(TypedDict, total=False):
  """A Document is a collection of Chunks."""

  name: Optional[str]
  """The resource name of the Document.
      Example: fileSearchStores/file-search-store-foo/documents/documents-bar"""

  display_name: Optional[str]
  """The human-readable display name for the Document."""

  state: Optional[DocumentState]
  """The current state of the Document."""

  size_bytes: Optional[int]
  """The size of the Document in bytes."""

  mime_type: Optional[str]
  """The MIME type of the Document."""

  create_time: Optional[datetime.datetime]
  """Output only. The Timestamp of when the `Document` was created."""

  custom_metadata: Optional[list[CustomMetadataDict]]
  """Optional. User provided custom metadata stored as key-value pairs used for querying. A `Document` can have a maximum of 20 `CustomMetadata`."""

  update_time: Optional[datetime.datetime]
  """Output only. The Timestamp of when the `Document` was last updated."""


DocumentOrDict = Union[Document, DocumentDict]


class DeleteDocumentConfig(_common.BaseModel):
  """Config for optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  force: Optional[bool] = Field(
      default=None,
      description="""If set to true, any `Chunk`s and objects related to this `Document` will
      also be deleted.
      """,
  )


class DeleteDocumentConfigDict(TypedDict, total=False):
  """Config for optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  force: Optional[bool]
  """If set to true, any `Chunk`s and objects related to this `Document` will
      also be deleted.
      """


DeleteDocumentConfigOrDict = Union[
    DeleteDocumentConfig, DeleteDocumentConfigDict
]


class _DeleteDocumentParameters(_common.BaseModel):
  """Config for documents.delete parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the Document.
    Example: fileSearchStores/file-search-store-foo/documents/documents-bar""",
  )
  config: Optional[DeleteDocumentConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _DeleteDocumentParametersDict(TypedDict, total=False):
  """Config for documents.delete parameters."""

  name: Optional[str]
  """The resource name of the Document.
    Example: fileSearchStores/file-search-store-foo/documents/documents-bar"""

  config: Optional[DeleteDocumentConfigDict]
  """Optional parameters for the request."""


_DeleteDocumentParametersOrDict = Union[
    _DeleteDocumentParameters, _DeleteDocumentParametersDict
]


class ListDocumentsConfig(_common.BaseModel):
  """Config for optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")


class ListDocumentsConfigDict(TypedDict, total=False):
  """Config for optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""


ListDocumentsConfigOrDict = Union[ListDocumentsConfig, ListDocumentsConfigDict]


class _ListDocumentsParameters(_common.BaseModel):
  """Config for documents.list parameters."""

  parent: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStores. Example: `fileSearchStore/file-search-store-foo`""",
  )
  config: Optional[ListDocumentsConfig] = Field(
      default=None, description=""""""
  )


class _ListDocumentsParametersDict(TypedDict, total=False):
  """Config for documents.list parameters."""

  parent: Optional[str]
  """The resource name of the FileSearchStores. Example: `fileSearchStore/file-search-store-foo`"""

  config: Optional[ListDocumentsConfigDict]
  """"""


_ListDocumentsParametersOrDict = Union[
    _ListDocumentsParameters, _ListDocumentsParametersDict
]


class ListDocumentsResponse(_common.BaseModel):
  """Config for documents.list return value."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(
      default=None,
      description="""A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.""",
  )
  documents: Optional[list[Document]] = Field(
      default=None, description="""The returned `Document`s."""
  )


class ListDocumentsResponseDict(TypedDict, total=False):
  """Config for documents.list return value."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages."""

  documents: Optional[list[DocumentDict]]
  """The returned `Document`s."""


ListDocumentsResponseOrDict = Union[
    ListDocumentsResponse, ListDocumentsResponseDict
]


class CreateFileSearchStoreConfig(_common.BaseModel):
  """Optional parameters for creating a file search store."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The human-readable display name for the file search store.
      """,
  )


class CreateFileSearchStoreConfigDict(TypedDict, total=False):
  """Optional parameters for creating a file search store."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  display_name: Optional[str]
  """The human-readable display name for the file search store.
      """


CreateFileSearchStoreConfigOrDict = Union[
    CreateFileSearchStoreConfig, CreateFileSearchStoreConfigDict
]


class _CreateFileSearchStoreParameters(_common.BaseModel):
  """Config for file_search_stores.create parameters."""

  config: Optional[CreateFileSearchStoreConfig] = Field(
      default=None,
      description="""Optional parameters for creating a file search store.
      """,
  )


class _CreateFileSearchStoreParametersDict(TypedDict, total=False):
  """Config for file_search_stores.create parameters."""

  config: Optional[CreateFileSearchStoreConfigDict]
  """Optional parameters for creating a file search store.
      """


_CreateFileSearchStoreParametersOrDict = Union[
    _CreateFileSearchStoreParameters, _CreateFileSearchStoreParametersDict
]


class FileSearchStore(_common.BaseModel):
  """A collection of Documents."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`""",
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The human-readable display name for the FileSearchStore.""",
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The Timestamp of when the FileSearchStore was created.""",
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The Timestamp of when the FileSearchStore was last updated.""",
  )
  active_documents_count: Optional[int] = Field(
      default=None,
      description="""The number of documents in the FileSearchStore that are active and ready for retrieval.""",
  )
  pending_documents_count: Optional[int] = Field(
      default=None,
      description="""The number of documents in the FileSearchStore that are being processed.""",
  )
  failed_documents_count: Optional[int] = Field(
      default=None,
      description="""The number of documents in the FileSearchStore that have failed processing.""",
  )
  size_bytes: Optional[int] = Field(
      default=None,
      description="""The size of raw bytes ingested into the FileSearchStore. This is the
      total size of all the documents in the FileSearchStore.""",
  )


class FileSearchStoreDict(TypedDict, total=False):
  """A collection of Documents."""

  name: Optional[str]
  """The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`"""

  display_name: Optional[str]
  """The human-readable display name for the FileSearchStore."""

  create_time: Optional[datetime.datetime]
  """The Timestamp of when the FileSearchStore was created."""

  update_time: Optional[datetime.datetime]
  """The Timestamp of when the FileSearchStore was last updated."""

  active_documents_count: Optional[int]
  """The number of documents in the FileSearchStore that are active and ready for retrieval."""

  pending_documents_count: Optional[int]
  """The number of documents in the FileSearchStore that are being processed."""

  failed_documents_count: Optional[int]
  """The number of documents in the FileSearchStore that have failed processing."""

  size_bytes: Optional[int]
  """The size of raw bytes ingested into the FileSearchStore. This is the
      total size of all the documents in the FileSearchStore."""


FileSearchStoreOrDict = Union[FileSearchStore, FileSearchStoreDict]


class GetFileSearchStoreConfig(_common.BaseModel):
  """Optional parameters for getting a FileSearchStore."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetFileSearchStoreConfigDict(TypedDict, total=False):
  """Optional parameters for getting a FileSearchStore."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetFileSearchStoreConfigOrDict = Union[
    GetFileSearchStoreConfig, GetFileSearchStoreConfigDict
]


class _GetFileSearchStoreParameters(_common.BaseModel):
  """Config for file_search_stores.get parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`""",
  )
  config: Optional[GetFileSearchStoreConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _GetFileSearchStoreParametersDict(TypedDict, total=False):
  """Config for file_search_stores.get parameters."""

  name: Optional[str]
  """The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`"""

  config: Optional[GetFileSearchStoreConfigDict]
  """Optional parameters for the request."""


_GetFileSearchStoreParametersOrDict = Union[
    _GetFileSearchStoreParameters, _GetFileSearchStoreParametersDict
]


class DeleteFileSearchStoreConfig(_common.BaseModel):
  """Optional parameters for deleting a FileSearchStore."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  force: Optional[bool] = Field(
      default=None,
      description="""If set to true, any Documents and objects related to this FileSearchStore will also be deleted.
      If false (the default), a FAILED_PRECONDITION error will be returned if
      the FileSearchStore contains any Documents.
      """,
  )


class DeleteFileSearchStoreConfigDict(TypedDict, total=False):
  """Optional parameters for deleting a FileSearchStore."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  force: Optional[bool]
  """If set to true, any Documents and objects related to this FileSearchStore will also be deleted.
      If false (the default), a FAILED_PRECONDITION error will be returned if
      the FileSearchStore contains any Documents.
      """


DeleteFileSearchStoreConfigOrDict = Union[
    DeleteFileSearchStoreConfig, DeleteFileSearchStoreConfigDict
]


class _DeleteFileSearchStoreParameters(_common.BaseModel):
  """Config for file_search_stores.delete parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`""",
  )
  config: Optional[DeleteFileSearchStoreConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _DeleteFileSearchStoreParametersDict(TypedDict, total=False):
  """Config for file_search_stores.delete parameters."""

  name: Optional[str]
  """The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`"""

  config: Optional[DeleteFileSearchStoreConfigDict]
  """Optional parameters for the request."""


_DeleteFileSearchStoreParametersOrDict = Union[
    _DeleteFileSearchStoreParameters, _DeleteFileSearchStoreParametersDict
]


class ListFileSearchStoresConfig(_common.BaseModel):
  """Optional parameters for listing FileSearchStore."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")


class ListFileSearchStoresConfigDict(TypedDict, total=False):
  """Optional parameters for listing FileSearchStore."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""


ListFileSearchStoresConfigOrDict = Union[
    ListFileSearchStoresConfig, ListFileSearchStoresConfigDict
]


class _ListFileSearchStoresParameters(_common.BaseModel):
  """Config for file_search_stores.list parameters."""

  config: Optional[ListFileSearchStoresConfig] = Field(
      default=None, description="""Optional parameters for the list request."""
  )


class _ListFileSearchStoresParametersDict(TypedDict, total=False):
  """Config for file_search_stores.list parameters."""

  config: Optional[ListFileSearchStoresConfigDict]
  """Optional parameters for the list request."""


_ListFileSearchStoresParametersOrDict = Union[
    _ListFileSearchStoresParameters, _ListFileSearchStoresParametersDict
]


class ListFileSearchStoresResponse(_common.BaseModel):
  """Config for file_search_stores.list return value."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(default=None, description="""""")
  file_search_stores: Optional[list[FileSearchStore]] = Field(
      default=None, description="""The returned file search stores."""
  )


class ListFileSearchStoresResponseDict(TypedDict, total=False):
  """Config for file_search_stores.list return value."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """"""

  file_search_stores: Optional[list[FileSearchStoreDict]]
  """The returned file search stores."""


ListFileSearchStoresResponseOrDict = Union[
    ListFileSearchStoresResponse, ListFileSearchStoresResponseDict
]


class WhiteSpaceConfig(_common.BaseModel):
  """Configuration for a white space chunking algorithm."""

  max_tokens_per_chunk: Optional[int] = Field(
      default=None, description="""Maximum number of tokens per chunk."""
  )
  max_overlap_tokens: Optional[int] = Field(
      default=None,
      description="""Maximum number of overlapping tokens between two adjacent chunks.""",
  )


class WhiteSpaceConfigDict(TypedDict, total=False):
  """Configuration for a white space chunking algorithm."""

  max_tokens_per_chunk: Optional[int]
  """Maximum number of tokens per chunk."""

  max_overlap_tokens: Optional[int]
  """Maximum number of overlapping tokens between two adjacent chunks."""


WhiteSpaceConfigOrDict = Union[WhiteSpaceConfig, WhiteSpaceConfigDict]


class ChunkingConfig(_common.BaseModel):
  """Config for telling the service how to chunk the file."""

  white_space_config: Optional[WhiteSpaceConfig] = Field(
      default=None, description="""White space chunking configuration."""
  )


class ChunkingConfigDict(TypedDict, total=False):
  """Config for telling the service how to chunk the file."""

  white_space_config: Optional[WhiteSpaceConfigDict]
  """White space chunking configuration."""


ChunkingConfigOrDict = Union[ChunkingConfig, ChunkingConfigDict]


class UploadToFileSearchStoreConfig(_common.BaseModel):
  """Optional parameters for uploading a file to a FileSearchStore."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  should_return_http_response: Optional[bool] = Field(
      default=None,
      description=""" If true, the raw HTTP response will be returned in the 'sdk_http_response' field.""",
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""MIME type of the file to be uploaded. If not provided, it will be inferred from the file extension.""",
  )
  display_name: Optional[str] = Field(
      default=None, description="""Display name of the created document."""
  )
  custom_metadata: Optional[list[CustomMetadata]] = Field(
      default=None,
      description="""User provided custom metadata stored as key-value pairs used for querying.""",
  )
  chunking_config: Optional[ChunkingConfig] = Field(
      default=None,
      description="""Config for telling the service how to chunk the file.""",
  )


class UploadToFileSearchStoreConfigDict(TypedDict, total=False):
  """Optional parameters for uploading a file to a FileSearchStore."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  should_return_http_response: Optional[bool]
  """ If true, the raw HTTP response will be returned in the 'sdk_http_response' field."""

  mime_type: Optional[str]
  """MIME type of the file to be uploaded. If not provided, it will be inferred from the file extension."""

  display_name: Optional[str]
  """Display name of the created document."""

  custom_metadata: Optional[list[CustomMetadataDict]]
  """User provided custom metadata stored as key-value pairs used for querying."""

  chunking_config: Optional[ChunkingConfigDict]
  """Config for telling the service how to chunk the file."""


UploadToFileSearchStoreConfigOrDict = Union[
    UploadToFileSearchStoreConfig, UploadToFileSearchStoreConfigDict
]


class _UploadToFileSearchStoreParameters(_common.BaseModel):
  """Generates the parameters for the private _upload_to_file_search_store method."""

  file_search_store_name: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`""",
  )
  config: Optional[UploadToFileSearchStoreConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _UploadToFileSearchStoreParametersDict(TypedDict, total=False):
  """Generates the parameters for the private _upload_to_file_search_store method."""

  file_search_store_name: Optional[str]
  """The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`"""

  config: Optional[UploadToFileSearchStoreConfigDict]
  """Used to override the default configuration."""


_UploadToFileSearchStoreParametersOrDict = Union[
    _UploadToFileSearchStoreParameters, _UploadToFileSearchStoreParametersDict
]


class UploadToFileSearchStoreResumableResponse(_common.BaseModel):
  """Response for the resumable upload method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class UploadToFileSearchStoreResumableResponseDict(TypedDict, total=False):
  """Response for the resumable upload method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


UploadToFileSearchStoreResumableResponseOrDict = Union[
    UploadToFileSearchStoreResumableResponse,
    UploadToFileSearchStoreResumableResponseDict,
]


class ImportFileConfig(_common.BaseModel):
  """Optional parameters for importing a file."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  custom_metadata: Optional[list[CustomMetadata]] = Field(
      default=None,
      description="""User provided custom metadata stored as key-value pairs used for querying.""",
  )
  chunking_config: Optional[ChunkingConfig] = Field(
      default=None,
      description="""Config for telling the service how to chunk the file.""",
  )


class ImportFileConfigDict(TypedDict, total=False):
  """Optional parameters for importing a file."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  custom_metadata: Optional[list[CustomMetadataDict]]
  """User provided custom metadata stored as key-value pairs used for querying."""

  chunking_config: Optional[ChunkingConfigDict]
  """Config for telling the service how to chunk the file."""


ImportFileConfigOrDict = Union[ImportFileConfig, ImportFileConfigDict]


class _ImportFileParameters(_common.BaseModel):
  """Config for file_search_stores.import_file parameters."""

  file_search_store_name: Optional[str] = Field(
      default=None,
      description="""The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`""",
  )
  file_name: Optional[str] = Field(
      default=None,
      description="""The name of the File API File to import. Example: `files/abc-123`""",
  )
  config: Optional[ImportFileConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _ImportFileParametersDict(TypedDict, total=False):
  """Config for file_search_stores.import_file parameters."""

  file_search_store_name: Optional[str]
  """The resource name of the FileSearchStore. Example: `fileSearchStores/my-file-search-store-123`"""

  file_name: Optional[str]
  """The name of the File API File to import. Example: `files/abc-123`"""

  config: Optional[ImportFileConfigDict]
  """Optional parameters for the request."""


_ImportFileParametersOrDict = Union[
    _ImportFileParameters, _ImportFileParametersDict
]


class ImportFileResponse(_common.BaseModel):
  """Response for ImportFile to import a File API file with a file search store."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  parent: Optional[str] = Field(
      default=None,
      description="""The name of the FileSearchStore containing Documents.""",
  )
  document_name: Optional[str] = Field(
      default=None, description="""The identifier for the Document imported."""
  )


class ImportFileResponseDict(TypedDict, total=False):
  """Response for ImportFile to import a File API file with a file search store."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  parent: Optional[str]
  """The name of the FileSearchStore containing Documents."""

  document_name: Optional[str]
  """The identifier for the Document imported."""


ImportFileResponseOrDict = Union[ImportFileResponse, ImportFileResponseDict]


class ImportFileOperation(_common.BaseModel, Operation):
  """Long-running operation for importing a file to a FileSearchStore."""

  response: Optional[ImportFileResponse] = Field(
      default=None,
      description="""The result of the ImportFile operation, available when the operation is done.""",
  )

  @classmethod
  def from_api_response(
      cls, api_response: Any, is_vertex_ai: bool = False
  ) -> Self:
    """Instantiates a ImportFileOperation from an API response."""

    response_dict = _ImportFileOperation_from_mldev(api_response)
    return cls._from_response(response=response_dict, kwargs={})


class ListFilesConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")


class ListFilesConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""


ListFilesConfigOrDict = Union[ListFilesConfig, ListFilesConfigDict]


class _ListFilesParameters(_common.BaseModel):
  """Generates the parameters for the list method."""

  config: Optional[ListFilesConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _ListFilesParametersDict(TypedDict, total=False):
  """Generates the parameters for the list method."""

  config: Optional[ListFilesConfigDict]
  """Used to override the default configuration."""


_ListFilesParametersOrDict = Union[
    _ListFilesParameters, _ListFilesParametersDict
]


class ListFilesResponse(_common.BaseModel):
  """Response for the list files method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(
      default=None,
      description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""",
  )
  files: Optional[list[File]] = Field(
      default=None, description="""The list of `File`s."""
  )


class ListFilesResponseDict(TypedDict, total=False):
  """Response for the list files method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """A token that can be sent as a `page_token` into a subsequent `ListFiles` call."""

  files: Optional[list[FileDict]]
  """The list of `File`s."""


ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict]


class CreateFileConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  should_return_http_response: Optional[bool] = Field(
      default=None,
      description=""" If true, the raw HTTP response will be returned in the 'sdk_http_response' field.""",
  )


class CreateFileConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  should_return_http_response: Optional[bool]
  """ If true, the raw HTTP response will be returned in the 'sdk_http_response' field."""


CreateFileConfigOrDict = Union[CreateFileConfig, CreateFileConfigDict]


class _CreateFileParameters(_common.BaseModel):
  """Generates the parameters for the private _create method."""

  file: Optional[File] = Field(
      default=None,
      description="""The file to be uploaded.
            mime_type: (Required) The MIME type of the file. Must be provided.
            name: (Optional) The name of the file in the destination (e.g.
            'files/sample-image').
            display_name: (Optional) The display name of the file.
      """,
  )
  config: Optional[CreateFileConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _CreateFileParametersDict(TypedDict, total=False):
  """Generates the parameters for the private _create method."""

  file: Optional[FileDict]
  """The file to be uploaded.
            mime_type: (Required) The MIME type of the file. Must be provided.
            name: (Optional) The name of the file in the destination (e.g.
            'files/sample-image').
            display_name: (Optional) The display name of the file.
      """

  config: Optional[CreateFileConfigDict]
  """Used to override the default configuration."""


_CreateFileParametersOrDict = Union[
    _CreateFileParameters, _CreateFileParametersDict
]


class CreateFileResponse(_common.BaseModel):
  """Response for the create file method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class CreateFileResponseDict(TypedDict, total=False):
  """Response for the create file method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


CreateFileResponseOrDict = Union[CreateFileResponse, CreateFileResponseDict]


class GetFileConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetFileConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetFileConfigOrDict = Union[GetFileConfig, GetFileConfigDict]


class _GetFileParameters(_common.BaseModel):
  """Generates the parameters for the get method."""

  name: Optional[str] = Field(
      default=None,
      description="""The name identifier for the file to retrieve.""",
  )
  config: Optional[GetFileConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _GetFileParametersDict(TypedDict, total=False):
  """Generates the parameters for the get method."""

  name: Optional[str]
  """The name identifier for the file to retrieve."""

  config: Optional[GetFileConfigDict]
  """Used to override the default configuration."""


_GetFileParametersOrDict = Union[_GetFileParameters, _GetFileParametersDict]


class DeleteFileConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class DeleteFileConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


DeleteFileConfigOrDict = Union[DeleteFileConfig, DeleteFileConfigDict]


class _DeleteFileParameters(_common.BaseModel):
  """Generates the parameters for the get method."""

  name: Optional[str] = Field(
      default=None,
      description="""The name identifier for the file to be deleted.""",
  )
  config: Optional[DeleteFileConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _DeleteFileParametersDict(TypedDict, total=False):
  """Generates the parameters for the get method."""

  name: Optional[str]
  """The name identifier for the file to be deleted."""

  config: Optional[DeleteFileConfigDict]
  """Used to override the default configuration."""


_DeleteFileParametersOrDict = Union[
    _DeleteFileParameters, _DeleteFileParametersDict
]


class DeleteFileResponse(_common.BaseModel):
  """Response for the delete file method."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )


class DeleteFileResponseDict(TypedDict, total=False):
  """Response for the delete file method."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""


DeleteFileResponseOrDict = Union[DeleteFileResponse, DeleteFileResponseDict]


class InlinedRequest(_common.BaseModel):
  """Config for inlined request."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
      <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  contents: Optional[ContentListUnion] = Field(
      default=None,
      description="""Content of the request.
      """,
  )
  metadata: Optional[dict[str, str]] = Field(
      default=None,
      description="""The metadata to be associated with the request.""",
  )
  config: Optional[GenerateContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional model parameters.
      """,
  )


class InlinedRequestDict(TypedDict, total=False):
  """Config for inlined request."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
      <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  contents: Optional[ContentListUnionDict]
  """Content of the request.
      """

  metadata: Optional[dict[str, str]]
  """The metadata to be associated with the request."""

  config: Optional[GenerateContentConfigDict]
  """Configuration that contains optional model parameters.
      """


InlinedRequestOrDict = Union[InlinedRequest, InlinedRequestDict]


class BatchJobSource(_common.BaseModel):
  """Config for `src` parameter."""

  format: Optional[str] = Field(
      default=None,
      description="""Storage format of the input files. Must be one of:
      'jsonl', 'bigquery'.
      """,
  )
  gcs_uri: Optional[list[str]] = Field(
      default=None,
      description="""The Google Cloud Storage URIs to input files.
      """,
  )
  bigquery_uri: Optional[str] = Field(
      default=None,
      description="""The BigQuery URI to input table.
      """,
  )
  file_name: Optional[str] = Field(
      default=None,
      description="""The Gemini Developer API's file resource name of the input data
      (e.g. "files/12345").
      """,
  )
  inlined_requests: Optional[list[InlinedRequest]] = Field(
      default=None,
      description="""The Gemini Developer API's inlined input data to run batch job.
      """,
  )


class BatchJobSourceDict(TypedDict, total=False):
  """Config for `src` parameter."""

  format: Optional[str]
  """Storage format of the input files. Must be one of:
      'jsonl', 'bigquery'.
      """

  gcs_uri: Optional[list[str]]
  """The Google Cloud Storage URIs to input files.
      """

  bigquery_uri: Optional[str]
  """The BigQuery URI to input table.
      """

  file_name: Optional[str]
  """The Gemini Developer API's file resource name of the input data
      (e.g. "files/12345").
      """

  inlined_requests: Optional[list[InlinedRequestDict]]
  """The Gemini Developer API's inlined input data to run batch job.
      """


BatchJobSourceOrDict = Union[BatchJobSource, BatchJobSourceDict]


class JobError(_common.BaseModel):
  """Job error."""

  details: Optional[list[str]] = Field(
      default=None,
      description="""A list of messages that carry the error details. There is a common set of message types for APIs to use.""",
  )
  code: Optional[int] = Field(default=None, description="""The status code.""")
  message: Optional[str] = Field(
      default=None,
      description="""A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the `details` field.""",
  )


class JobErrorDict(TypedDict, total=False):
  """Job error."""

  details: Optional[list[str]]
  """A list of messages that carry the error details. There is a common set of message types for APIs to use."""

  code: Optional[int]
  """The status code."""

  message: Optional[str]
  """A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the `details` field."""


JobErrorOrDict = Union[JobError, JobErrorDict]


class InlinedResponse(_common.BaseModel):
  """Config for `inlined_responses` parameter."""

  response: Optional[GenerateContentResponse] = Field(
      default=None,
      description="""The response to the request.
      """,
  )
  error: Optional[JobError] = Field(
      default=None,
      description="""The error encountered while processing the request.
      """,
  )


class InlinedResponseDict(TypedDict, total=False):
  """Config for `inlined_responses` parameter."""

  response: Optional[GenerateContentResponseDict]
  """The response to the request.
      """

  error: Optional[JobErrorDict]
  """The error encountered while processing the request.
      """


InlinedResponseOrDict = Union[InlinedResponse, InlinedResponseDict]


class SingleEmbedContentResponse(_common.BaseModel):
  """Config for `response` parameter."""

  embedding: Optional[ContentEmbedding] = Field(
      default=None,
      description="""The response to the request.
      """,
  )
  token_count: Optional[int] = Field(
      default=None,
      description="""The error encountered while processing the request.
      """,
  )


class SingleEmbedContentResponseDict(TypedDict, total=False):
  """Config for `response` parameter."""

  embedding: Optional[ContentEmbeddingDict]
  """The response to the request.
      """

  token_count: Optional[int]
  """The error encountered while processing the request.
      """


SingleEmbedContentResponseOrDict = Union[
    SingleEmbedContentResponse, SingleEmbedContentResponseDict
]


class InlinedEmbedContentResponse(_common.BaseModel):
  """Config for `inlined_embedding_responses` parameter."""

  response: Optional[SingleEmbedContentResponse] = Field(
      default=None,
      description="""The response to the request.
      """,
  )
  error: Optional[JobError] = Field(
      default=None,
      description="""The error encountered while processing the request.
      """,
  )


class InlinedEmbedContentResponseDict(TypedDict, total=False):
  """Config for `inlined_embedding_responses` parameter."""

  response: Optional[SingleEmbedContentResponseDict]
  """The response to the request.
      """

  error: Optional[JobErrorDict]
  """The error encountered while processing the request.
      """


InlinedEmbedContentResponseOrDict = Union[
    InlinedEmbedContentResponse, InlinedEmbedContentResponseDict
]


class BatchJobDestination(_common.BaseModel):
  """Config for `des` parameter."""

  format: Optional[str] = Field(
      default=None,
      description="""Storage format of the output files. Must be one of:
      'jsonl', 'bigquery'.
      """,
  )
  gcs_uri: Optional[str] = Field(
      default=None,
      description="""The Google Cloud Storage URI to the output file.
      """,
  )
  bigquery_uri: Optional[str] = Field(
      default=None,
      description="""The BigQuery URI to the output table.
      """,
  )
  file_name: Optional[str] = Field(
      default=None,
      description="""The Gemini Developer API's file resource name of the output data
      (e.g. "files/12345"). The file will be a JSONL file with a single response
      per line. The responses will be GenerateContentResponse messages formatted
      as JSON. The responses will be written in the same order as the input
      requests.
      """,
  )
  inlined_responses: Optional[list[InlinedResponse]] = Field(
      default=None,
      description="""The responses to the requests in the batch. Returned when the batch was
      built using inlined requests. The responses will be in the same order as
      the input requests.
      """,
  )
  inlined_embed_content_responses: Optional[
      list[InlinedEmbedContentResponse]
  ] = Field(
      default=None,
      description="""The responses to the requests in the batch. Returned when the batch was
      built using inlined requests. The responses will be in the same order as
      the input requests.
      """,
  )


class BatchJobDestinationDict(TypedDict, total=False):
  """Config for `des` parameter."""

  format: Optional[str]
  """Storage format of the output files. Must be one of:
      'jsonl', 'bigquery'.
      """

  gcs_uri: Optional[str]
  """The Google Cloud Storage URI to the output file.
      """

  bigquery_uri: Optional[str]
  """The BigQuery URI to the output table.
      """

  file_name: Optional[str]
  """The Gemini Developer API's file resource name of the output data
      (e.g. "files/12345"). The file will be a JSONL file with a single response
      per line. The responses will be GenerateContentResponse messages formatted
      as JSON. The responses will be written in the same order as the input
      requests.
      """

  inlined_responses: Optional[list[InlinedResponseDict]]
  """The responses to the requests in the batch. Returned when the batch was
      built using inlined requests. The responses will be in the same order as
      the input requests.
      """

  inlined_embed_content_responses: Optional[
      list[InlinedEmbedContentResponseDict]
  ]
  """The responses to the requests in the batch. Returned when the batch was
      built using inlined requests. The responses will be in the same order as
      the input requests.
      """


BatchJobDestinationOrDict = Union[BatchJobDestination, BatchJobDestinationDict]


BatchJobDestinationUnion = Union[BatchJobDestination, str]


BatchJobDestinationUnionDict = Union[
    BatchJobDestinationUnion, BatchJobDestinationDict
]


class CreateBatchJobConfig(_common.BaseModel):
  """Config for optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The user-defined name of this BatchJob.
      """,
  )
  dest: Optional[BatchJobDestinationUnion] = Field(
      default=None,
      description="""GCS or BigQuery URI prefix for the output predictions. Example:
      "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId".
      """,
  )


class CreateBatchJobConfigDict(TypedDict, total=False):
  """Config for optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  display_name: Optional[str]
  """The user-defined name of this BatchJob.
      """

  dest: Optional[BatchJobDestinationUnionDict]
  """GCS or BigQuery URI prefix for the output predictions. Example:
      "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId".
      """


CreateBatchJobConfigOrDict = Union[
    CreateBatchJobConfig, CreateBatchJobConfigDict
]


BatchJobSourceUnion = Union[BatchJobSource, list[InlinedRequest], str]


BatchJobSourceUnionDict = Union[
    BatchJobSourceUnion, BatchJobSourceDict, list[InlinedRequestDict]
]


class _CreateBatchJobParameters(_common.BaseModel):
  """Config for batches.create parameters."""

  model: Optional[str] = Field(
      default=None,
      description="""The name of the model to produces the predictions via the BatchJob.
      """,
  )
  src: Optional[BatchJobSourceUnion] = Field(
      default=None,
      description="""GCS URI(-s) or BigQuery URI to your input data to run batch job.
      Example: "gs://path/to/input/data" or "bq://projectId.bqDatasetId.bqTableId".
      """,
  )
  config: Optional[CreateBatchJobConfig] = Field(
      default=None,
      description="""Optional parameters for creating a BatchJob.
      """,
  )


class _CreateBatchJobParametersDict(TypedDict, total=False):
  """Config for batches.create parameters."""

  model: Optional[str]
  """The name of the model to produces the predictions via the BatchJob.
      """

  src: Optional[BatchJobSourceUnionDict]
  """GCS URI(-s) or BigQuery URI to your input data to run batch job.
      Example: "gs://path/to/input/data" or "bq://projectId.bqDatasetId.bqTableId".
      """

  config: Optional[CreateBatchJobConfigDict]
  """Optional parameters for creating a BatchJob.
      """


_CreateBatchJobParametersOrDict = Union[
    _CreateBatchJobParameters, _CreateBatchJobParametersDict
]


class CompletionStats(_common.BaseModel):
  """Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch.

  This data type is not supported in Gemini API.
  """

  failed_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of entities for which any error was encountered.""",
  )
  incomplete_count: Optional[int] = Field(
      default=None,
      description="""Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected).""",
  )
  successful_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of entities that had been processed successfully.""",
  )
  successful_forecast_point_count: Optional[int] = Field(
      default=None,
      description="""Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction.""",
  )


class CompletionStatsDict(TypedDict, total=False):
  """Success and error statistics of processing multiple entities (for example, DataItems or structured data rows) in batch.

  This data type is not supported in Gemini API.
  """

  failed_count: Optional[int]
  """Output only. The number of entities for which any error was encountered."""

  incomplete_count: Optional[int]
  """Output only. In cases when enough errors are encountered a job, pipeline, or operation may be failed as a whole. Below is the number of entities for which the processing had not been finished (either in successful or failed state). Set to -1 if the number is unknown (for example, the operation failed before the total entity number could be collected)."""

  successful_count: Optional[int]
  """Output only. The number of entities that had been processed successfully."""

  successful_forecast_point_count: Optional[int]
  """Output only. The number of the successful forecast points that are generated by the forecasting model. This is ONLY used by the forecasting batch prediction."""


CompletionStatsOrDict = Union[CompletionStats, CompletionStatsDict]


class BatchJob(_common.BaseModel):
  """Config for batches.create return value."""

  name: Optional[str] = Field(
      default=None,
      description="""The resource name of the BatchJob. Output only.".
      """,
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The display name of the BatchJob.
      """,
  )
  state: Optional[JobState] = Field(
      default=None,
      description="""The state of the BatchJob.
      """,
  )
  error: Optional[JobError] = Field(
      default=None,
      description="""Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED.""",
  )
  create_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The time when the BatchJob was created.
      """,
  )
  start_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""Output only. Time when the Job for the first time entered the `JOB_STATE_RUNNING` state.""",
  )
  end_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The time when the BatchJob was completed. This field is for Vertex AI only.
      """,
  )
  update_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The time when the BatchJob was last updated.
      """,
  )
  model: Optional[str] = Field(
      default=None,
      description="""The name of the model that produces the predictions via the BatchJob.
      """,
  )
  src: Optional[BatchJobSource] = Field(
      default=None,
      description="""Configuration for the input data. This field is for Vertex AI only.
      """,
  )
  dest: Optional[BatchJobDestination] = Field(
      default=None,
      description="""Configuration for the output data.
      """,
  )
  completion_stats: Optional[CompletionStats] = Field(
      default=None,
      description="""Statistics on completed and failed prediction instances. This field is for Vertex AI only.
      """,
  )

  @property
  def done(self) -> bool:
    """Returns True if the batch job has ended."""
    if self.state is None:
      return False
    return self.state.name in JOB_STATES_ENDED


class GenerationConfigThinkingConfig(ThinkingConfig):
  """Config for thinking feature.

  This class will be deprecated. Please use `ThinkingConfig` instead.
  """


class GenerationConfigThinkingConfigDict(ThinkingConfigDict):
  """Config for thinking feature.

  This class will be deprecated. Please use `ThinkingConfig` instead.
  """


GenerationConfigThinkingConfigOrDict = Union[
    GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
]


class BatchJobDict(TypedDict, total=False):
  """Config for batches.create return value."""

  name: Optional[str]
  """The resource name of the BatchJob. Output only.".
      """

  display_name: Optional[str]
  """The display name of the BatchJob.
      """

  state: Optional[JobState]
  """The state of the BatchJob.
      """

  error: Optional[JobErrorDict]
  """Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED."""

  create_time: Optional[datetime.datetime]
  """The time when the BatchJob was created.
      """

  start_time: Optional[datetime.datetime]
  """Output only. Time when the Job for the first time entered the `JOB_STATE_RUNNING` state."""

  end_time: Optional[datetime.datetime]
  """The time when the BatchJob was completed. This field is for Vertex AI only.
      """

  update_time: Optional[datetime.datetime]
  """The time when the BatchJob was last updated.
      """

  model: Optional[str]
  """The name of the model that produces the predictions via the BatchJob.
      """

  src: Optional[BatchJobSourceDict]
  """Configuration for the input data. This field is for Vertex AI only.
      """

  dest: Optional[BatchJobDestinationDict]
  """Configuration for the output data.
      """

  completion_stats: Optional[CompletionStatsDict]
  """Statistics on completed and failed prediction instances. This field is for Vertex AI only.
      """


BatchJobOrDict = Union[BatchJob, BatchJobDict]


class EmbedContentBatch(_common.BaseModel):
  """Parameters for the embed_content method."""

  contents: Optional[ContentListUnion] = Field(
      default=None,
      description="""The content to embed. Only the `parts.text` fields will be counted.
      """,
  )
  config: Optional[EmbedContentConfig] = Field(
      default=None,
      description="""Configuration that contains optional parameters.
      """,
  )


class EmbedContentBatchDict(TypedDict, total=False):
  """Parameters for the embed_content method."""

  contents: Optional[ContentListUnionDict]
  """The content to embed. Only the `parts.text` fields will be counted.
      """

  config: Optional[EmbedContentConfigDict]
  """Configuration that contains optional parameters.
      """


EmbedContentBatchOrDict = Union[EmbedContentBatch, EmbedContentBatchDict]


class EmbeddingsBatchJobSource(_common.BaseModel):

  file_name: Optional[str] = Field(
      default=None,
      description="""The Gemini Developer API's file resource name of the input data
      (e.g. "files/12345").
      """,
  )
  inlined_requests: Optional[EmbedContentBatch] = Field(
      default=None,
      description="""The Gemini Developer API's inlined input data to run batch job.
      """,
  )


class EmbeddingsBatchJobSourceDict(TypedDict, total=False):

  file_name: Optional[str]
  """The Gemini Developer API's file resource name of the input data
      (e.g. "files/12345").
      """

  inlined_requests: Optional[EmbedContentBatchDict]
  """The Gemini Developer API's inlined input data to run batch job.
      """


EmbeddingsBatchJobSourceOrDict = Union[
    EmbeddingsBatchJobSource, EmbeddingsBatchJobSourceDict
]


class CreateEmbeddingsBatchJobConfig(_common.BaseModel):
  """Config for optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  display_name: Optional[str] = Field(
      default=None,
      description="""The user-defined name of this BatchJob.
      """,
  )


class CreateEmbeddingsBatchJobConfigDict(TypedDict, total=False):
  """Config for optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  display_name: Optional[str]
  """The user-defined name of this BatchJob.
      """


CreateEmbeddingsBatchJobConfigOrDict = Union[
    CreateEmbeddingsBatchJobConfig, CreateEmbeddingsBatchJobConfigDict
]


class _CreateEmbeddingsBatchJobParameters(_common.BaseModel):
  """Config for batches.create parameters."""

  model: Optional[str] = Field(
      default=None,
      description="""The name of the model to produces the predictions via the BatchJob.
      """,
  )
  src: Optional[EmbeddingsBatchJobSource] = Field(
      default=None,
      description="""input data to run batch job".
      """,
  )
  config: Optional[CreateEmbeddingsBatchJobConfig] = Field(
      default=None,
      description="""Optional parameters for creating a BatchJob.
      """,
  )


class _CreateEmbeddingsBatchJobParametersDict(TypedDict, total=False):
  """Config for batches.create parameters."""

  model: Optional[str]
  """The name of the model to produces the predictions via the BatchJob.
      """

  src: Optional[EmbeddingsBatchJobSourceDict]
  """input data to run batch job".
      """

  config: Optional[CreateEmbeddingsBatchJobConfigDict]
  """Optional parameters for creating a BatchJob.
      """


_CreateEmbeddingsBatchJobParametersOrDict = Union[
    _CreateEmbeddingsBatchJobParameters, _CreateEmbeddingsBatchJobParametersDict
]


class GetBatchJobConfig(_common.BaseModel):
  """Optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetBatchJobConfigDict(TypedDict, total=False):
  """Optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetBatchJobConfigOrDict = Union[GetBatchJobConfig, GetBatchJobConfigDict]


class _GetBatchJobParameters(_common.BaseModel):
  """Config for batches.get parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """,
  )
  config: Optional[GetBatchJobConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _GetBatchJobParametersDict(TypedDict, total=False):
  """Config for batches.get parameters."""

  name: Optional[str]
  """A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """

  config: Optional[GetBatchJobConfigDict]
  """Optional parameters for the request."""


_GetBatchJobParametersOrDict = Union[
    _GetBatchJobParameters, _GetBatchJobParametersDict
]


class CancelBatchJobConfig(_common.BaseModel):
  """Optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class CancelBatchJobConfigDict(TypedDict, total=False):
  """Optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


CancelBatchJobConfigOrDict = Union[
    CancelBatchJobConfig, CancelBatchJobConfigDict
]


class _CancelBatchJobParameters(_common.BaseModel):
  """Config for batches.cancel parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """,
  )
  config: Optional[CancelBatchJobConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _CancelBatchJobParametersDict(TypedDict, total=False):
  """Config for batches.cancel parameters."""

  name: Optional[str]
  """A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """

  config: Optional[CancelBatchJobConfigDict]
  """Optional parameters for the request."""


_CancelBatchJobParametersOrDict = Union[
    _CancelBatchJobParameters, _CancelBatchJobParametersDict
]


class ListBatchJobsConfig(_common.BaseModel):
  """Config for optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  page_size: Optional[int] = Field(default=None, description="""""")
  page_token: Optional[str] = Field(default=None, description="""""")
  filter: Optional[str] = Field(default=None, description="""""")


class ListBatchJobsConfigDict(TypedDict, total=False):
  """Config for optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  page_size: Optional[int]
  """"""

  page_token: Optional[str]
  """"""

  filter: Optional[str]
  """"""


ListBatchJobsConfigOrDict = Union[ListBatchJobsConfig, ListBatchJobsConfigDict]


class _ListBatchJobsParameters(_common.BaseModel):
  """Config for batches.list parameters."""

  config: Optional[ListBatchJobsConfig] = Field(
      default=None, description=""""""
  )


class _ListBatchJobsParametersDict(TypedDict, total=False):
  """Config for batches.list parameters."""

  config: Optional[ListBatchJobsConfigDict]
  """"""


_ListBatchJobsParametersOrDict = Union[
    _ListBatchJobsParameters, _ListBatchJobsParametersDict
]


class ListBatchJobsResponse(_common.BaseModel):
  """Config for batches.list return value."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  next_page_token: Optional[str] = Field(default=None, description="""""")
  batch_jobs: Optional[list[BatchJob]] = Field(default=None, description="""""")


class ListBatchJobsResponseDict(TypedDict, total=False):
  """Config for batches.list return value."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  next_page_token: Optional[str]
  """"""

  batch_jobs: Optional[list[BatchJobDict]]
  """"""


ListBatchJobsResponseOrDict = Union[
    ListBatchJobsResponse, ListBatchJobsResponseDict
]


class DeleteBatchJobConfig(_common.BaseModel):
  """Optional parameters for models.get method."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class DeleteBatchJobConfigDict(TypedDict, total=False):
  """Optional parameters for models.get method."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


DeleteBatchJobConfigOrDict = Union[
    DeleteBatchJobConfig, DeleteBatchJobConfigDict
]


class _DeleteBatchJobParameters(_common.BaseModel):
  """Config for batches.delete parameters."""

  name: Optional[str] = Field(
      default=None,
      description="""A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """,
  )
  config: Optional[DeleteBatchJobConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class _DeleteBatchJobParametersDict(TypedDict, total=False):
  """Config for batches.delete parameters."""

  name: Optional[str]
  """A fully-qualified BatchJob resource name or ID.
    Example: "projects/.../locations/.../batchPredictionJobs/456"
    or "456" when project and location are initialized in the client.
    """

  config: Optional[DeleteBatchJobConfigDict]
  """Optional parameters for the request."""


_DeleteBatchJobParametersOrDict = Union[
    _DeleteBatchJobParameters, _DeleteBatchJobParametersDict
]


class DeleteResourceJob(_common.BaseModel):
  """The return value of delete operation."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  name: Optional[str] = Field(default=None, description="""""")
  done: Optional[bool] = Field(default=None, description="""""")
  error: Optional[JobError] = Field(default=None, description="""""")


class DeleteResourceJobDict(TypedDict, total=False):
  """The return value of delete operation."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  name: Optional[str]
  """"""

  done: Optional[bool]
  """"""

  error: Optional[JobErrorDict]
  """"""


DeleteResourceJobOrDict = Union[DeleteResourceJob, DeleteResourceJobDict]


class GetOperationConfig(_common.BaseModel):

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class GetOperationConfigDict(TypedDict, total=False):

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


GetOperationConfigOrDict = Union[GetOperationConfig, GetOperationConfigDict]


class _GetOperationParameters(_common.BaseModel):
  """Parameters for the GET method."""

  operation_name: Optional[str] = Field(
      default=None,
      description="""The server-assigned name for the operation.""",
  )
  config: Optional[GetOperationConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _GetOperationParametersDict(TypedDict, total=False):
  """Parameters for the GET method."""

  operation_name: Optional[str]
  """The server-assigned name for the operation."""

  config: Optional[GetOperationConfigDict]
  """Used to override the default configuration."""


_GetOperationParametersOrDict = Union[
    _GetOperationParameters, _GetOperationParametersDict
]


class FetchPredictOperationConfig(_common.BaseModel):

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class FetchPredictOperationConfigDict(TypedDict, total=False):

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


FetchPredictOperationConfigOrDict = Union[
    FetchPredictOperationConfig, FetchPredictOperationConfigDict
]


class _FetchPredictOperationParameters(_common.BaseModel):
  """Parameters for the fetchPredictOperation method."""

  operation_name: Optional[str] = Field(
      default=None,
      description="""The server-assigned name for the operation.""",
  )
  resource_name: Optional[str] = Field(default=None, description="""""")
  config: Optional[FetchPredictOperationConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _FetchPredictOperationParametersDict(TypedDict, total=False):
  """Parameters for the fetchPredictOperation method."""

  operation_name: Optional[str]
  """The server-assigned name for the operation."""

  resource_name: Optional[str]
  """"""

  config: Optional[FetchPredictOperationConfigDict]
  """Used to override the default configuration."""


_FetchPredictOperationParametersOrDict = Union[
    _FetchPredictOperationParameters, _FetchPredictOperationParametersDict
]


class _GetProjectOperationParameters(_common.BaseModel):
  """Parameters for the getProjectOperation method."""

  operation_id: Optional[str] = Field(
      default=None,
      description="""The ID of the project-level Vertex operation to get. For example if the operation resource name is
      projects/123/locations/us-central1/operations/456, the operation_id is
      456.""",
  )
  config: Optional[GetOperationConfig] = Field(
      default=None,
      description="""Used to override the default configuration.""",
  )


class _GetProjectOperationParametersDict(TypedDict, total=False):
  """Parameters for the getProjectOperation method."""

  operation_id: Optional[str]
  """The ID of the project-level Vertex operation to get. For example if the operation resource name is
      projects/123/locations/us-central1/operations/456, the operation_id is
      456."""

  config: Optional[GetOperationConfigDict]
  """Used to override the default configuration."""


_GetProjectOperationParametersOrDict = Union[
    _GetProjectOperationParameters, _GetProjectOperationParametersDict
]


class ProjectOperation(_common.BaseModel):
  """A project-level operation in Vertex."""

  name: Optional[str] = Field(
      default=None,
      description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""",
  )
  metadata: Optional[dict[str, Any]] = Field(
      default=None,
      description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata.  Any method that returns a long-running operation should document the metadata type, if any.""",
  )
  done: Optional[bool] = Field(
      default=None,
      description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""",
  )
  error: Optional[dict[str, Any]] = Field(
      default=None,
      description="""The error result of the operation in case of failure or cancellation.""",
  )


class ProjectOperationDict(TypedDict, total=False):
  """A project-level operation in Vertex."""

  name: Optional[str]
  """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""

  metadata: Optional[dict[str, Any]]
  """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata.  Any method that returns a long-running operation should document the metadata type, if any."""

  done: Optional[bool]
  """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."""

  error: Optional[dict[str, Any]]
  """The error result of the operation in case of failure or cancellation."""


ProjectOperationOrDict = Union[ProjectOperation, ProjectOperationDict]


class TestTableItem(_common.BaseModel):

  name: Optional[str] = Field(
      default=None,
      description="""The name of the test. This is used to derive the replay id.""",
  )
  parameters: Optional[dict[str, Any]] = Field(
      default=None,
      description="""The parameters to the test. Use pydantic models.""",
  )
  exception_if_mldev: Optional[str] = Field(
      default=None,
      description="""Expects an exception for MLDev matching the string.""",
  )
  exception_if_vertex: Optional[str] = Field(
      default=None,
      description="""Expects an exception for Vertex matching the string.""",
  )
  override_replay_id: Optional[str] = Field(
      default=None,
      description="""Use if you don't want to use the default replay id which is derived from the test name.""",
  )
  has_union: Optional[bool] = Field(
      default=None,
      description="""True if the parameters contain an unsupported union type. This test  will be skipped for languages that do not support the union type.""",
  )
  skip_in_api_mode: Optional[str] = Field(
      default=None,
      description="""When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource.""",
  )
  ignore_keys: Optional[list[str]] = Field(
      default=None,
      description="""Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic.""",
  )


class TestTableItemDict(TypedDict, total=False):

  name: Optional[str]
  """The name of the test. This is used to derive the replay id."""

  parameters: Optional[dict[str, Any]]
  """The parameters to the test. Use pydantic models."""

  exception_if_mldev: Optional[str]
  """Expects an exception for MLDev matching the string."""

  exception_if_vertex: Optional[str]
  """Expects an exception for Vertex matching the string."""

  override_replay_id: Optional[str]
  """Use if you don't want to use the default replay id which is derived from the test name."""

  has_union: Optional[bool]
  """True if the parameters contain an unsupported union type. This test  will be skipped for languages that do not support the union type."""

  skip_in_api_mode: Optional[str]
  """When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource."""

  ignore_keys: Optional[list[str]]
  """Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic."""


TestTableItemOrDict = Union[TestTableItem, TestTableItemDict]


class TestTableFile(_common.BaseModel):

  comment: Optional[str] = Field(default=None, description="""""")
  test_method: Optional[str] = Field(default=None, description="""""")
  parameter_names: Optional[list[str]] = Field(default=None, description="""""")
  test_table: Optional[list[TestTableItem]] = Field(
      default=None, description=""""""
  )


class TestTableFileDict(TypedDict, total=False):

  comment: Optional[str]
  """"""

  test_method: Optional[str]
  """"""

  parameter_names: Optional[list[str]]
  """"""

  test_table: Optional[list[TestTableItemDict]]
  """"""


TestTableFileOrDict = Union[TestTableFile, TestTableFileDict]


class ReplayRequest(_common.BaseModel):
  """Represents a single request in a replay."""

  method: Optional[str] = Field(default=None, description="""""")
  url: Optional[str] = Field(default=None, description="""""")
  headers: Optional[dict[str, str]] = Field(default=None, description="""""")
  body_segments: Optional[list[dict[str, Any]]] = Field(
      default=None, description=""""""
  )


class ReplayRequestDict(TypedDict, total=False):
  """Represents a single request in a replay."""

  method: Optional[str]
  """"""

  url: Optional[str]
  """"""

  headers: Optional[dict[str, str]]
  """"""

  body_segments: Optional[list[dict[str, Any]]]
  """"""


ReplayRequestOrDict = Union[ReplayRequest, ReplayRequestDict]


class ReplayResponse(_common.BaseModel):
  """Represents a single response in a replay."""

  status_code: Optional[int] = Field(default=None, description="""""")
  headers: Optional[dict[str, str]] = Field(default=None, description="""""")
  body_segments: Optional[list[dict[str, Any]]] = Field(
      default=None, description=""""""
  )
  sdk_response_segments: Optional[list[dict[str, Any]]] = Field(
      default=None, description=""""""
  )


class ReplayResponseDict(TypedDict, total=False):
  """Represents a single response in a replay."""

  status_code: Optional[int]
  """"""

  headers: Optional[dict[str, str]]
  """"""

  body_segments: Optional[list[dict[str, Any]]]
  """"""

  sdk_response_segments: Optional[list[dict[str, Any]]]
  """"""


ReplayResponseOrDict = Union[ReplayResponse, ReplayResponseDict]


class ReplayInteraction(_common.BaseModel):
  """Represents a single interaction, request and response in a replay."""

  request: Optional[ReplayRequest] = Field(default=None, description="""""")
  response: Optional[ReplayResponse] = Field(default=None, description="""""")


class ReplayInteractionDict(TypedDict, total=False):
  """Represents a single interaction, request and response in a replay."""

  request: Optional[ReplayRequestDict]
  """"""

  response: Optional[ReplayResponseDict]
  """"""


ReplayInteractionOrDict = Union[ReplayInteraction, ReplayInteractionDict]


class ReplayFile(_common.BaseModel):
  """Represents a recorded session."""

  replay_id: Optional[str] = Field(default=None, description="""""")
  interactions: Optional[list[ReplayInteraction]] = Field(
      default=None, description=""""""
  )


class ReplayFileDict(TypedDict, total=False):
  """Represents a recorded session."""

  replay_id: Optional[str]
  """"""

  interactions: Optional[list[ReplayInteractionDict]]
  """"""


ReplayFileOrDict = Union[ReplayFile, ReplayFileDict]


class UploadFileConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  name: Optional[str] = Field(
      default=None,
      description="""The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated.""",
  )
  mime_type: Optional[str] = Field(
      default=None,
      description="""mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension.""",
  )
  display_name: Optional[str] = Field(
      default=None, description="""Optional display name of the file."""
  )


class UploadFileConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  name: Optional[str]
  """The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated."""

  mime_type: Optional[str]
  """mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension."""

  display_name: Optional[str]
  """Optional display name of the file."""


UploadFileConfigOrDict = Union[UploadFileConfig, UploadFileConfigDict]


class DownloadFileConfig(_common.BaseModel):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )


class DownloadFileConfigDict(TypedDict, total=False):
  """Used to override the default configuration."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""


DownloadFileConfigOrDict = Union[DownloadFileConfig, DownloadFileConfigDict]


class UpscaleImageConfig(_common.BaseModel):
  """Configuration for upscaling an image.

  For more information on this configuration, refer to
  the `Imagen API reference documentation
  <https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api>`_.
  """

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  output_gcs_uri: Optional[str] = Field(
      default=None,
      description="""Cloud Storage URI used to store the generated images.""",
  )
  safety_filter_level: Optional[SafetyFilterLevel] = Field(
      default=None, description="""Filter level for safety filtering."""
  )
  person_generation: Optional[PersonGeneration] = Field(
      default=None, description="""Allows generation of people by the model."""
  )
  include_rai_reason: Optional[bool] = Field(
      default=None,
      description="""Whether to include a reason for filtered-out images in the
      response.""",
  )
  output_mime_type: Optional[str] = Field(
      default=None,
      description="""The image format that the output should be saved as.""",
  )
  output_compression_quality: Optional[int] = Field(
      default=None,
      description="""The level of compression. Only applicable if the
      ``output_mime_type`` is ``image/jpeg``.""",
  )
  enhance_input_image: Optional[bool] = Field(
      default=None,
      description="""Whether to add an image enhancing step before upscaling.
      It is expected to suppress the noise and JPEG compression artifacts
      from the input image.""",
  )
  image_preservation_factor: Optional[float] = Field(
      default=None,
      description="""With a higher image preservation factor, the original image
      pixels are more respected. With a lower image preservation factor, the
      output image will have be more different from the input image, but
      with finer details and less noise.""",
  )
  labels: Optional[dict[str, str]] = Field(
      default=None,
      description="""User specified labels to track billing usage.""",
  )


class UpscaleImageConfigDict(TypedDict, total=False):
  """Configuration for upscaling an image.

  For more information on this configuration, refer to
  the `Imagen API reference documentation
  <https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api>`_.
  """

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  output_gcs_uri: Optional[str]
  """Cloud Storage URI used to store the generated images."""

  safety_filter_level: Optional[SafetyFilterLevel]
  """Filter level for safety filtering."""

  person_generation: Optional[PersonGeneration]
  """Allows generation of people by the model."""

  include_rai_reason: Optional[bool]
  """Whether to include a reason for filtered-out images in the
      response."""

  output_mime_type: Optional[str]
  """The image format that the output should be saved as."""

  output_compression_quality: Optional[int]
  """The level of compression. Only applicable if the
      ``output_mime_type`` is ``image/jpeg``."""

  enhance_input_image: Optional[bool]
  """Whether to add an image enhancing step before upscaling.
      It is expected to suppress the noise and JPEG compression artifacts
      from the input image."""

  image_preservation_factor: Optional[float]
  """With a higher image preservation factor, the original image
      pixels are more respected. With a lower image preservation factor, the
      output image will have be more different from the input image, but
      with finer details and less noise."""

  labels: Optional[dict[str, str]]
  """User specified labels to track billing usage."""


UpscaleImageConfigOrDict = Union[UpscaleImageConfig, UpscaleImageConfigDict]


class UpscaleImageParameters(_common.BaseModel):
  """User-facing config UpscaleImageParameters."""

  model: Optional[str] = Field(
      default=None, description="""The model to use."""
  )
  image: Optional[Image] = Field(
      default=None, description="""The input image to upscale."""
  )
  upscale_factor: Optional[str] = Field(
      default=None,
      description="""The factor to upscale the image (x2 or x4).""",
  )
  config: Optional[UpscaleImageConfig] = Field(
      default=None, description="""Configuration for upscaling."""
  )


class UpscaleImageParametersDict(TypedDict, total=False):
  """User-facing config UpscaleImageParameters."""

  model: Optional[str]
  """The model to use."""

  image: Optional[ImageDict]
  """The input image to upscale."""

  upscale_factor: Optional[str]
  """The factor to upscale the image (x2 or x4)."""

  config: Optional[UpscaleImageConfigDict]
  """Configuration for upscaling."""


UpscaleImageParametersOrDict = Union[
    UpscaleImageParameters, UpscaleImageParametersDict
]


class RawReferenceImage(_common.BaseModel):
  """A raw reference image.

  A raw reference image represents the base image to edit, provided by the user.
  It can optionally be provided in addition to a mask reference image or
  a style reference image.
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_RAW'
    return values


class RawReferenceImageDict(TypedDict, total=False):
  """A raw reference image.

  A raw reference image represents the base image to edit, provided by the user.
  It can optionally be provided in addition to a mask reference image or
  a style reference image.
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""


RawReferenceImageOrDict = Union[RawReferenceImage, RawReferenceImageDict]


class MaskReferenceImage(_common.BaseModel):
  """A mask reference image.

  This encapsulates either a mask image provided by the user and configs for
  the user provided mask, or only config parameters for the model to generate
  a mask.

  A mask image is an image whose non-zero values indicate where to edit the base
  image. If the user provides a mask image, the mask must be in the same
  dimensions as the raw image.
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )
  config: Optional[MaskReferenceConfig] = Field(
      default=None,
      description="""Configuration for the mask reference image.""",
  )
  """Re-map config to mask_reference_config to send to API."""
  mask_image_config: Optional['MaskReferenceConfig'] = Field(
      default=None, description=""""""
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    config = values.get('config', None)
    values['mask_image_config'] = config
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_MASK'
    return values


class MaskReferenceImageDict(TypedDict, total=False):
  """A mask reference image.

  This encapsulates either a mask image provided by the user and configs for
  the user provided mask, or only config parameters for the model to generate
  a mask.

  A mask image is an image whose non-zero values indicate where to edit the base
  image. If the user provides a mask image, the mask must be in the same
  dimensions as the raw image.
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""

  config: Optional[MaskReferenceConfigDict]
  """Configuration for the mask reference image."""


MaskReferenceImageOrDict = Union[MaskReferenceImage, MaskReferenceImageDict]


class ControlReferenceImage(_common.BaseModel):
  """A control reference image.

  The image of the control reference image is either a control image provided
  by the user, or a regular image which the backend will use to generate a
  control image of. In the case of the latter, the
  enable_control_image_computation field in the config should be set to True.

  A control image is an image that represents a sketch image of areas for the
  model to fill in based on the prompt.
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )
  config: Optional[ControlReferenceConfig] = Field(
      default=None,
      description="""Configuration for the control reference image.""",
  )
  """Re-map config to control_reference_config to send to API."""
  control_image_config: Optional['ControlReferenceConfig'] = Field(
      default=None, description=""""""
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    config = values.get('config', None)
    values['control_image_config'] = config
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_CONTROL'
    return values


class ControlReferenceImageDict(TypedDict, total=False):
  """A control reference image.

  The image of the control reference image is either a control image provided
  by the user, or a regular image which the backend will use to generate a
  control image of. In the case of the latter, the
  enable_control_image_computation field in the config should be set to True.

  A control image is an image that represents a sketch image of areas for the
  model to fill in based on the prompt.
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""

  config: Optional[ControlReferenceConfigDict]
  """Configuration for the control reference image."""


ControlReferenceImageOrDict = Union[
    ControlReferenceImage, ControlReferenceImageDict
]


class StyleReferenceImage(_common.BaseModel):
  """A style reference image.

  This encapsulates a style reference image provided by the user, and
  additionally optional config parameters for the style reference image.

  A raw reference image can also be provided as a destination for the style to
  be applied to.
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )
  config: Optional[StyleReferenceConfig] = Field(
      default=None,
      description="""Configuration for the style reference image.""",
  )
  """Re-map config to style_reference_config to send to API."""
  style_image_config: Optional['StyleReferenceConfig'] = Field(
      default=None, description=""""""
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    config = values.get('config', None)
    values['style_image_config'] = config
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_STYLE'
    return values


class StyleReferenceImageDict(TypedDict, total=False):
  """A style reference image.

  This encapsulates a style reference image provided by the user, and
  additionally optional config parameters for the style reference image.

  A raw reference image can also be provided as a destination for the style to
  be applied to.
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""

  config: Optional[StyleReferenceConfigDict]
  """Configuration for the style reference image."""


StyleReferenceImageOrDict = Union[StyleReferenceImage, StyleReferenceImageDict]


class SubjectReferenceImage(_common.BaseModel):
  """A subject reference image.

  This encapsulates a subject reference image provided by the user, and
  additionally optional config parameters for the subject reference image.

  A raw reference image can also be provided as a destination for the subject to
  be applied to.
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )
  config: Optional[SubjectReferenceConfig] = Field(
      default=None,
      description="""Configuration for the subject reference image.""",
  )
  """Re-map config to subject_reference_config to send to API."""
  subject_image_config: Optional['SubjectReferenceConfig'] = Field(
      default=None, description=""""""
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    config = values.get('config', None)
    values['subject_image_config'] = config
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_SUBJECT'
    return values


class SubjectReferenceImageDict(TypedDict, total=False):
  """A subject reference image.

  This encapsulates a subject reference image provided by the user, and
  additionally optional config parameters for the subject reference image.

  A raw reference image can also be provided as a destination for the subject to
  be applied to.
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""

  config: Optional[SubjectReferenceConfigDict]
  """Configuration for the subject reference image."""


SubjectReferenceImageOrDict = Union[
    SubjectReferenceImage, SubjectReferenceImageDict
]


class ContentReferenceImage(_common.BaseModel):
  """A content reference image.

  A content reference image represents a subject to reference (ex. person,
  product, animal) provided by the user. It can optionally be provided in
  addition to a style reference image (ex. background, style reference).
  """

  reference_image: Optional[Image] = Field(
      default=None,
      description="""The reference image for the editing operation.""",
  )
  reference_id: Optional[int] = Field(
      default=None, description="""The id of the reference image."""
  )
  reference_type: Optional[str] = Field(
      default=None,
      description="""The type of the reference image. Only set by the SDK.""",
  )

  @pydantic.model_validator(mode='before')
  @classmethod
  def _validate_mask_image_config(self, values: Any) -> Any:
    if 'reference_type' in values:
      raise ValueError('Cannot set internal reference_type field directly.')
    values['reference_type'] = 'REFERENCE_TYPE_CONTENT'
    return values


class ContentReferenceImageDict(TypedDict, total=False):
  """A content reference image.

  A content reference image represents a subject to reference (ex. person,
  product, animal) provided by the user. It can optionally be provided in
  addition to a style reference image (ex. background, style reference).
  """

  reference_image: Optional[ImageDict]
  """The reference image for the editing operation."""

  reference_id: Optional[int]
  """The id of the reference image."""

  reference_type: Optional[str]
  """The type of the reference image. Only set by the SDK."""


ContentReferenceImageOrDict = Union[
    ContentReferenceImage, ContentReferenceImageDict
]


class LiveServerSetupComplete(_common.BaseModel):
  """Sent in response to a `LiveGenerateContentSetup` message from the client."""

  session_id: Optional[str] = Field(
      default=None, description="""The session id of the live session."""
  )


class LiveServerSetupCompleteDict(TypedDict, total=False):
  """Sent in response to a `LiveGenerateContentSetup` message from the client."""

  session_id: Optional[str]
  """The session id of the live session."""


LiveServerSetupCompleteOrDict = Union[
    LiveServerSetupComplete, LiveServerSetupCompleteDict
]


class Transcription(_common.BaseModel):
  """Audio transcription in Server Conent."""

  text: Optional[str] = Field(
      default=None,
      description="""Transcription text.
      """,
  )
  finished: Optional[bool] = Field(
      default=None,
      description="""The bool indicates the end of the transcription.
      """,
  )


class TranscriptionDict(TypedDict, total=False):
  """Audio transcription in Server Conent."""

  text: Optional[str]
  """Transcription text.
      """

  finished: Optional[bool]
  """The bool indicates the end of the transcription.
      """


TranscriptionOrDict = Union[Transcription, TranscriptionDict]


class LiveServerContent(_common.BaseModel):
  """Incremental server update generated by the model in response to client messages.

  Content is generated as quickly as possible, and not in real time. Clients
  may choose to buffer and play it out in real time.
  """

  model_turn: Optional[Content] = Field(
      default=None,
      description="""The content that the model has generated as part of the current conversation with the user.""",
  )
  turn_complete: Optional[bool] = Field(
      default=None,
      description="""If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn.""",
  )
  interrupted: Optional[bool] = Field(
      default=None,
      description="""If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue.""",
  )
  grounding_metadata: Optional[GroundingMetadata] = Field(
      default=None,
      description="""Metadata returned to client when grounding is enabled.""",
  )
  generation_complete: Optional[bool] = Field(
      default=None,
      description="""If true, indicates that the model is done generating. When model is
      interrupted while generating there will be no generation_complete message
      in interrupted turn, it will go through interrupted > turn_complete.
      When model assumes realtime playback there will be delay between
      generation_complete and turn_complete that is caused by model
      waiting for playback to finish. If true, indicates that the model
      has finished generating all content. This is a signal to the client
      that it can stop sending messages.""",
  )
  input_transcription: Optional[Transcription] = Field(
      default=None,
      description="""Input transcription. The transcription is independent to the model
      turn which means it doesn’t imply any ordering between transcription and
      model turn.""",
  )
  output_transcription: Optional[Transcription] = Field(
      default=None,
      description="""Output transcription. The transcription is independent to the model
      turn which means it doesn’t imply any ordering between transcription and
      model turn.
      """,
  )
  url_context_metadata: Optional[UrlContextMetadata] = Field(
      default=None,
      description="""Metadata related to url context retrieval tool.""",
  )
  turn_complete_reason: Optional[TurnCompleteReason] = Field(
      default=None, description="""Reason for the turn is complete."""
  )
  waiting_for_input: Optional[bool] = Field(
      default=None,
      description="""If true, indicates that the model is not generating content because
      it is waiting for more input from the user, e.g. because it expects the
      user to continue talking.""",
  )


class LiveServerContentDict(TypedDict, total=False):
  """Incremental server update generated by the model in response to client messages.

  Content is generated as quickly as possible, and not in real time. Clients
  may choose to buffer and play it out in real time.
  """

  model_turn: Optional[ContentDict]
  """The content that the model has generated as part of the current conversation with the user."""

  turn_complete: Optional[bool]
  """If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn."""

  interrupted: Optional[bool]
  """If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue."""

  grounding_metadata: Optional[GroundingMetadataDict]
  """Metadata returned to client when grounding is enabled."""

  generation_complete: Optional[bool]
  """If true, indicates that the model is done generating. When model is
      interrupted while generating there will be no generation_complete message
      in interrupted turn, it will go through interrupted > turn_complete.
      When model assumes realtime playback there will be delay between
      generation_complete and turn_complete that is caused by model
      waiting for playback to finish. If true, indicates that the model
      has finished generating all content. This is a signal to the client
      that it can stop sending messages."""

  input_transcription: Optional[TranscriptionDict]
  """Input transcription. The transcription is independent to the model
      turn which means it doesn’t imply any ordering between transcription and
      model turn."""

  output_transcription: Optional[TranscriptionDict]
  """Output transcription. The transcription is independent to the model
      turn which means it doesn’t imply any ordering between transcription and
      model turn.
      """

  url_context_metadata: Optional[UrlContextMetadataDict]
  """Metadata related to url context retrieval tool."""

  turn_complete_reason: Optional[TurnCompleteReason]
  """Reason for the turn is complete."""

  waiting_for_input: Optional[bool]
  """If true, indicates that the model is not generating content because
      it is waiting for more input from the user, e.g. because it expects the
      user to continue talking."""


LiveServerContentOrDict = Union[LiveServerContent, LiveServerContentDict]


class LiveServerToolCall(_common.BaseModel):
  """Request for the client to execute the `function_calls` and return the responses with the matching `id`s."""

  function_calls: Optional[list[FunctionCall]] = Field(
      default=None, description="""The function call to be executed."""
  )


class LiveServerToolCallDict(TypedDict, total=False):
  """Request for the client to execute the `function_calls` and return the responses with the matching `id`s."""

  function_calls: Optional[list[FunctionCallDict]]
  """The function call to be executed."""


LiveServerToolCallOrDict = Union[LiveServerToolCall, LiveServerToolCallDict]


class LiveServerToolCallCancellation(_common.BaseModel):
  """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.

  If there were side-effects to those tool calls, clients may attempt to undo
  the tool calls. This message occurs only in cases where the clients interrupt
  server turns.
  """

  ids: Optional[list[str]] = Field(
      default=None, description="""The ids of the tool calls to be cancelled."""
  )


class LiveServerToolCallCancellationDict(TypedDict, total=False):
  """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.

  If there were side-effects to those tool calls, clients may attempt to undo
  the tool calls. This message occurs only in cases where the clients interrupt
  server turns.
  """

  ids: Optional[list[str]]
  """The ids of the tool calls to be cancelled."""


LiveServerToolCallCancellationOrDict = Union[
    LiveServerToolCallCancellation, LiveServerToolCallCancellationDict
]


class UsageMetadata(_common.BaseModel):
  """Usage metadata about response(s)."""

  prompt_token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens in the prompt. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.""",
  )
  cached_content_token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens in the cached part of the prompt (the cached content).""",
  )
  response_token_count: Optional[int] = Field(
      default=None,
      description="""Total number of tokens across all the generated response candidates.""",
  )
  tool_use_prompt_token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens present in tool-use prompt(s).""",
  )
  thoughts_token_count: Optional[int] = Field(
      default=None,
      description="""Number of tokens of thoughts for thinking models.""",
  )
  total_token_count: Optional[int] = Field(
      default=None,
      description="""Total token count for prompt, response candidates, and tool-use prompts(if present).""",
  )
  prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""List of modalities that were processed in the request input.""",
  )
  cache_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""List of modalities that were processed in the cache input.""",
  )
  response_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""List of modalities that were returned in the response.""",
  )
  tool_use_prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
      default=None,
      description="""List of modalities that were processed in the tool-use prompt.""",
  )
  traffic_type: Optional[TrafficType] = Field(
      default=None,
      description="""Traffic type. This shows whether a request consumes Pay-As-You-Go
 or Provisioned Throughput quota.""",
  )


class UsageMetadataDict(TypedDict, total=False):
  """Usage metadata about response(s)."""

  prompt_token_count: Optional[int]
  """Number of tokens in the prompt. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content."""

  cached_content_token_count: Optional[int]
  """Number of tokens in the cached part of the prompt (the cached content)."""

  response_token_count: Optional[int]
  """Total number of tokens across all the generated response candidates."""

  tool_use_prompt_token_count: Optional[int]
  """Number of tokens present in tool-use prompt(s)."""

  thoughts_token_count: Optional[int]
  """Number of tokens of thoughts for thinking models."""

  total_token_count: Optional[int]
  """Total token count for prompt, response candidates, and tool-use prompts(if present)."""

  prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
  """List of modalities that were processed in the request input."""

  cache_tokens_details: Optional[list[ModalityTokenCountDict]]
  """List of modalities that were processed in the cache input."""

  response_tokens_details: Optional[list[ModalityTokenCountDict]]
  """List of modalities that were returned in the response."""

  tool_use_prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
  """List of modalities that were processed in the tool-use prompt."""

  traffic_type: Optional[TrafficType]
  """Traffic type. This shows whether a request consumes Pay-As-You-Go
 or Provisioned Throughput quota."""


UsageMetadataOrDict = Union[UsageMetadata, UsageMetadataDict]


class LiveServerGoAway(_common.BaseModel):
  """Server will not be able to service client soon."""

  time_left: Optional[str] = Field(
      default=None,
      description="""The remaining time before the connection will be terminated as ABORTED. The minimal time returned here is specified differently together with the rate limits for a given model.""",
  )


class LiveServerGoAwayDict(TypedDict, total=False):
  """Server will not be able to service client soon."""

  time_left: Optional[str]
  """The remaining time before the connection will be terminated as ABORTED. The minimal time returned here is specified differently together with the rate limits for a given model."""


LiveServerGoAwayOrDict = Union[LiveServerGoAway, LiveServerGoAwayDict]


class LiveServerSessionResumptionUpdate(_common.BaseModel):
  """Update of the session resumption state.

  Only sent if `session_resumption` was set in the connection config.
  """

  new_handle: Optional[str] = Field(
      default=None,
      description="""New handle that represents state that can be resumed. Empty if `resumable`=false.""",
  )
  resumable: Optional[bool] = Field(
      default=None,
      description="""True if session can be resumed at this point. It might be not possible to resume session at some points. In that case we send update empty new_handle and resumable=false. Example of such case could be model executing function calls or just generating. Resuming session (using previous session token) in such state will result in some data loss.""",
  )
  last_consumed_client_message_index: Optional[int] = Field(
      default=None,
      description="""Index of last message sent by client that is included in state represented by this SessionResumptionToken. Only sent when `SessionResumptionConfig.transparent` is set.

Presence of this index allows users to transparently reconnect and avoid issue of losing some part of realtime audio input/video. If client wishes to temporarily disconnect (for example as result of receiving GoAway) they can do it without losing state by buffering messages sent since last `SessionResmumptionTokenUpdate`. This field will enable them to limit buffering (avoid keeping all requests in RAM).

Note: This should not be used for when resuming a session at some time later -- in those cases partial audio and video frames arelikely not needed.""",
  )


class LiveServerSessionResumptionUpdateDict(TypedDict, total=False):
  """Update of the session resumption state.

  Only sent if `session_resumption` was set in the connection config.
  """

  new_handle: Optional[str]
  """New handle that represents state that can be resumed. Empty if `resumable`=false."""

  resumable: Optional[bool]
  """True if session can be resumed at this point. It might be not possible to resume session at some points. In that case we send update empty new_handle and resumable=false. Example of such case could be model executing function calls or just generating. Resuming session (using previous session token) in such state will result in some data loss."""

  last_consumed_client_message_index: Optional[int]
  """Index of last message sent by client that is included in state represented by this SessionResumptionToken. Only sent when `SessionResumptionConfig.transparent` is set.

Presence of this index allows users to transparently reconnect and avoid issue of losing some part of realtime audio input/video. If client wishes to temporarily disconnect (for example as result of receiving GoAway) they can do it without losing state by buffering messages sent since last `SessionResmumptionTokenUpdate`. This field will enable them to limit buffering (avoid keeping all requests in RAM).

Note: This should not be used for when resuming a session at some time later -- in those cases partial audio and video frames arelikely not needed."""


LiveServerSessionResumptionUpdateOrDict = Union[
    LiveServerSessionResumptionUpdate, LiveServerSessionResumptionUpdateDict
]


class LiveServerMessage(_common.BaseModel):
  """Response message for API call."""

  setup_complete: Optional[LiveServerSetupComplete] = Field(
      default=None,
      description="""Sent in response to a `LiveClientSetup` message from the client.""",
  )
  server_content: Optional[LiveServerContent] = Field(
      default=None,
      description="""Content generated by the model in response to client messages.""",
  )
  tool_call: Optional[LiveServerToolCall] = Field(
      default=None,
      description="""Request for the client to execute the `function_calls` and return the responses with the matching `id`s.""",
  )
  tool_call_cancellation: Optional[LiveServerToolCallCancellation] = Field(
      default=None,
      description="""Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.""",
  )
  usage_metadata: Optional[UsageMetadata] = Field(
      default=None, description="""Usage metadata about model response(s)."""
  )
  go_away: Optional[LiveServerGoAway] = Field(
      default=None, description="""Server will disconnect soon."""
  )
  session_resumption_update: Optional[LiveServerSessionResumptionUpdate] = (
      Field(
          default=None,
          description="""Update of the session resumption state.""",
      )
  )

  @property
  def text(self) -> Optional[str]:
    """Returns the concatenation of all text parts in the response.

    If there are non-text parts in the response, only the concatenated text
    result from text parts will be returned.
    """
    if (
        not self.server_content
        or not self.server_content
        or not self.server_content.model_turn
        or not self.server_content.model_turn.parts
    ):
      return None
    text = ''
    non_text_parts = []
    for part in self.server_content.model_turn.parts:
      for field_name, field_value in part.model_dump(
          exclude={'text', 'thought'}
      ).items():
        if field_value is not None:
          non_text_parts.append(field_name)
      if isinstance(part.text, str):
        if isinstance(part.thought, bool) and part.thought:
          continue
        text += part.text
    global _live_server_text_warning_logged
    if non_text_parts and not _live_server_text_warning_logged:
      logger.warning(
          'Warning: there are non-text parts in the response:'
          f' {non_text_parts}, returning concatenated text result from text'
          ' parts, check out the non text parts for full response from model.'
      )
      _live_server_text_warning_logged = True
    return text if text else None

  @property
  def data(self) -> Optional[bytes]:
    """Returns the concatenation of all inline data parts in the response.

    If there are non-data parts in the response, only the concatenated data
    result from the data parts will be returned.
    """
    if (
        not self.server_content
        or not self.server_content
        or not self.server_content.model_turn
        or not self.server_content.model_turn.parts
    ):
      return None
    concatenated_data = b''
    non_data_parts = []
    for part in self.server_content.model_turn.parts:
      for field_name, field_value in part.model_dump(
          exclude={'inline_data'}
      ).items():
        if field_value is not None:
          non_data_parts.append(field_name)
      if part.inline_data and isinstance(part.inline_data.data, bytes):
        concatenated_data += part.inline_data.data
    global _live_server_data_warning_logged
    if non_data_parts and not _live_server_data_warning_logged:
      logger.warning(
          'Warning: there are non-data parts in the response:'
          f' {non_data_parts}, returning concatenated data result from data'
          ' parts, check out the non data parts for full response from model.'
      )
      _live_server_data_warning_logged = True
    return concatenated_data if len(concatenated_data) > 0 else None


class LiveServerMessageDict(TypedDict, total=False):
  """Response message for API call."""

  setup_complete: Optional[LiveServerSetupCompleteDict]
  """Sent in response to a `LiveClientSetup` message from the client."""

  server_content: Optional[LiveServerContentDict]
  """Content generated by the model in response to client messages."""

  tool_call: Optional[LiveServerToolCallDict]
  """Request for the client to execute the `function_calls` and return the responses with the matching `id`s."""

  tool_call_cancellation: Optional[LiveServerToolCallCancellationDict]
  """Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled."""

  usage_metadata: Optional[UsageMetadataDict]
  """Usage metadata about model response(s)."""

  go_away: Optional[LiveServerGoAwayDict]
  """Server will disconnect soon."""

  session_resumption_update: Optional[LiveServerSessionResumptionUpdateDict]
  """Update of the session resumption state."""


LiveServerMessageOrDict = Union[LiveServerMessage, LiveServerMessageDict]


class SessionResumptionConfig(_common.BaseModel):
  """Configuration of session resumption mechanism.

  Included in `LiveConnectConfig.session_resumption`. If included server
  will send `LiveServerSessionResumptionUpdate` messages.
  """

  handle: Optional[str] = Field(
      default=None,
      description="""Session resumption handle of previous session (session to restore).

If not present new session will be started.""",
  )
  transparent: Optional[bool] = Field(
      default=None,
      description="""If set the server will send `last_consumed_client_message_index` in the `session_resumption_update` messages to allow for transparent reconnections.""",
  )


class SessionResumptionConfigDict(TypedDict, total=False):
  """Configuration of session resumption mechanism.

  Included in `LiveConnectConfig.session_resumption`. If included server
  will send `LiveServerSessionResumptionUpdate` messages.
  """

  handle: Optional[str]
  """Session resumption handle of previous session (session to restore).

If not present new session will be started."""

  transparent: Optional[bool]
  """If set the server will send `last_consumed_client_message_index` in the `session_resumption_update` messages to allow for transparent reconnections."""


SessionResumptionConfigOrDict = Union[
    SessionResumptionConfig, SessionResumptionConfigDict
]


class SlidingWindow(_common.BaseModel):
  """Context window will be truncated by keeping only suffix of it.

  Context window will always be cut at start of USER role turn. System
  instructions and `BidiGenerateContentSetup.prefix_turns` will not be
  subject to the sliding window mechanism, they will always stay at the
  beginning of context window.
  """

  target_tokens: Optional[int] = Field(
      default=None,
      description="""Session reduction target -- how many tokens we should keep. Window shortening operation has some latency costs, so we should avoid running it on every turn. Should be < trigger_tokens. If not set, trigger_tokens/2 is assumed.""",
  )


class SlidingWindowDict(TypedDict, total=False):
  """Context window will be truncated by keeping only suffix of it.

  Context window will always be cut at start of USER role turn. System
  instructions and `BidiGenerateContentSetup.prefix_turns` will not be
  subject to the sliding window mechanism, they will always stay at the
  beginning of context window.
  """

  target_tokens: Optional[int]
  """Session reduction target -- how many tokens we should keep. Window shortening operation has some latency costs, so we should avoid running it on every turn. Should be < trigger_tokens. If not set, trigger_tokens/2 is assumed."""


SlidingWindowOrDict = Union[SlidingWindow, SlidingWindowDict]


class ContextWindowCompressionConfig(_common.BaseModel):
  """Enables context window compression -- mechanism managing model context window so it does not exceed given length."""

  trigger_tokens: Optional[int] = Field(
      default=None,
      description="""Number of tokens (before running turn) that triggers context window compression mechanism.""",
  )
  sliding_window: Optional[SlidingWindow] = Field(
      default=None, description="""Sliding window compression mechanism."""
  )


class ContextWindowCompressionConfigDict(TypedDict, total=False):
  """Enables context window compression -- mechanism managing model context window so it does not exceed given length."""

  trigger_tokens: Optional[int]
  """Number of tokens (before running turn) that triggers context window compression mechanism."""

  sliding_window: Optional[SlidingWindowDict]
  """Sliding window compression mechanism."""


ContextWindowCompressionConfigOrDict = Union[
    ContextWindowCompressionConfig, ContextWindowCompressionConfigDict
]


class AudioTranscriptionConfig(_common.BaseModel):
  """The audio transcription configuration in Setup."""

  pass


class AudioTranscriptionConfigDict(TypedDict, total=False):
  """The audio transcription configuration in Setup."""

  pass


AudioTranscriptionConfigOrDict = Union[
    AudioTranscriptionConfig, AudioTranscriptionConfigDict
]


class ProactivityConfig(_common.BaseModel):
  """Config for proactivity features."""

  proactive_audio: Optional[bool] = Field(
      default=None,
      description="""If enabled, the model can reject responding to the last prompt. For
        example, this allows the model to ignore out of context speech or to stay
        silent if the user did not make a request, yet.""",
  )


class ProactivityConfigDict(TypedDict, total=False):
  """Config for proactivity features."""

  proactive_audio: Optional[bool]
  """If enabled, the model can reject responding to the last prompt. For
        example, this allows the model to ignore out of context speech or to stay
        silent if the user did not make a request, yet."""


ProactivityConfigOrDict = Union[ProactivityConfig, ProactivityConfigDict]


class AutomaticActivityDetection(_common.BaseModel):
  """Configures automatic detection of activity."""

  disabled: Optional[bool] = Field(
      default=None,
      description="""If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals.""",
  )
  start_of_speech_sensitivity: Optional[StartSensitivity] = Field(
      default=None,
      description="""Determines how likely speech is to be detected.""",
  )
  end_of_speech_sensitivity: Optional[EndSensitivity] = Field(
      default=None,
      description="""Determines how likely detected speech is ended.""",
  )
  prefix_padding_ms: Optional[int] = Field(
      default=None,
      description="""The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives.""",
  )
  silence_duration_ms: Optional[int] = Field(
      default=None,
      description="""The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency.""",
  )


class AutomaticActivityDetectionDict(TypedDict, total=False):
  """Configures automatic detection of activity."""

  disabled: Optional[bool]
  """If enabled, detected voice and text input count as activity. If disabled, the client must send activity signals."""

  start_of_speech_sensitivity: Optional[StartSensitivity]
  """Determines how likely speech is to be detected."""

  end_of_speech_sensitivity: Optional[EndSensitivity]
  """Determines how likely detected speech is ended."""

  prefix_padding_ms: Optional[int]
  """The required duration of detected speech before start-of-speech is committed. The lower this value the more sensitive the start-of-speech detection is and the shorter speech can be recognized. However, this also increases the probability of false positives."""

  silence_duration_ms: Optional[int]
  """The required duration of detected non-speech (e.g. silence) before end-of-speech is committed. The larger this value, the longer speech gaps can be without interrupting the user's activity but this will increase the model's latency."""


AutomaticActivityDetectionOrDict = Union[
    AutomaticActivityDetection, AutomaticActivityDetectionDict
]


class RealtimeInputConfig(_common.BaseModel):
  """Marks the end of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  automatic_activity_detection: Optional[AutomaticActivityDetection] = Field(
      default=None,
      description="""If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals.""",
  )
  activity_handling: Optional[ActivityHandling] = Field(
      default=None, description="""Defines what effect activity has."""
  )
  turn_coverage: Optional[TurnCoverage] = Field(
      default=None,
      description="""Defines which input is included in the user's turn.""",
  )


class RealtimeInputConfigDict(TypedDict, total=False):
  """Marks the end of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  automatic_activity_detection: Optional[AutomaticActivityDetectionDict]
  """If not set, automatic activity detection is enabled by default. If automatic voice detection is disabled, the client must send activity signals."""

  activity_handling: Optional[ActivityHandling]
  """Defines what effect activity has."""

  turn_coverage: Optional[TurnCoverage]
  """Defines which input is included in the user's turn."""


RealtimeInputConfigOrDict = Union[RealtimeInputConfig, RealtimeInputConfigDict]


class LiveClientSetup(_common.BaseModel):
  """Message contains configuration that will apply for the duration of the streaming session."""

  model: Optional[str] = Field(
      default=None,
      description="""
      The fully qualified name of the publisher model or tuned model endpoint to
      use.
      """,
  )
  generation_config: Optional[GenerationConfig] = Field(
      default=None,
      description="""The generation configuration for the session.
      Note: only a subset of fields are supported.
      """,
  )
  system_instruction: Optional[ContentUnion] = Field(
      default=None,
      description="""The user provided system instructions for the model.
      Note: only text should be used in parts and content in each part will be
      in a separate paragraph.""",
  )
  tools: Optional[ToolListUnion] = Field(
      default=None,
      description=""" A list of `Tools` the model may use to generate the next response.

      A `Tool` is a piece of code that enables the system to interact with
      external systems to perform an action, or set of actions, outside of
      knowledge and scope of the model.""",
  )
  session_resumption: Optional[SessionResumptionConfig] = Field(
      default=None,
      description="""Configures session resumption mechanism.

          If included server will send SessionResumptionUpdate messages.""",
  )
  context_window_compression: Optional[ContextWindowCompressionConfig] = Field(
      default=None,
      description="""Configures context window compression mechanism.

      If included, server will compress context window to fit into given length.""",
  )
  input_audio_transcription: Optional[AudioTranscriptionConfig] = Field(
      default=None,
      description="""The transcription of the input aligns with the input audio language.
      """,
  )
  output_audio_transcription: Optional[AudioTranscriptionConfig] = Field(
      default=None,
      description="""The transcription of the output aligns with the language code
      specified for the output audio.
      """,
  )
  proactivity: Optional[ProactivityConfig] = Field(
      default=None,
      description="""Configures the proactivity of the model. This allows the model to respond proactively to
    the input and to ignore irrelevant input.""",
  )


class LiveClientSetupDict(TypedDict, total=False):
  """Message contains configuration that will apply for the duration of the streaming session."""

  model: Optional[str]
  """
      The fully qualified name of the publisher model or tuned model endpoint to
      use.
      """

  generation_config: Optional[GenerationConfigDict]
  """The generation configuration for the session.
      Note: only a subset of fields are supported.
      """

  system_instruction: Optional[ContentUnionDict]
  """The user provided system instructions for the model.
      Note: only text should be used in parts and content in each part will be
      in a separate paragraph."""

  tools: Optional[ToolListUnionDict]
  """ A list of `Tools` the model may use to generate the next response.

      A `Tool` is a piece of code that enables the system to interact with
      external systems to perform an action, or set of actions, outside of
      knowledge and scope of the model."""

  session_resumption: Optional[SessionResumptionConfigDict]
  """Configures session resumption mechanism.

          If included server will send SessionResumptionUpdate messages."""

  context_window_compression: Optional[ContextWindowCompressionConfigDict]
  """Configures context window compression mechanism.

      If included, server will compress context window to fit into given length."""

  input_audio_transcription: Optional[AudioTranscriptionConfigDict]
  """The transcription of the input aligns with the input audio language.
      """

  output_audio_transcription: Optional[AudioTranscriptionConfigDict]
  """The transcription of the output aligns with the language code
      specified for the output audio.
      """

  proactivity: Optional[ProactivityConfigDict]
  """Configures the proactivity of the model. This allows the model to respond proactively to
    the input and to ignore irrelevant input."""


LiveClientSetupOrDict = Union[LiveClientSetup, LiveClientSetupDict]


class LiveClientContent(_common.BaseModel):
  """Incremental update of the current conversation delivered from the client.

  All the content here will unconditionally be appended to the conversation
  history and used as part of the prompt to the model to generate content.

  A message here will interrupt any current model generation.
  """

  turns: Optional[list[Content]] = Field(
      default=None,
      description="""The content appended to the current conversation with the model.

      For single-turn queries, this is a single instance. For multi-turn
      queries, this is a repeated field that contains conversation history and
      latest request.
      """,
  )
  turn_complete: Optional[bool] = Field(
      default=None,
      description="""If true, indicates that the server content generation should start with
  the currently accumulated prompt. Otherwise, the server will await
  additional messages before starting generation.""",
  )


class LiveClientContentDict(TypedDict, total=False):
  """Incremental update of the current conversation delivered from the client.

  All the content here will unconditionally be appended to the conversation
  history and used as part of the prompt to the model to generate content.

  A message here will interrupt any current model generation.
  """

  turns: Optional[list[ContentDict]]
  """The content appended to the current conversation with the model.

      For single-turn queries, this is a single instance. For multi-turn
      queries, this is a repeated field that contains conversation history and
      latest request.
      """

  turn_complete: Optional[bool]
  """If true, indicates that the server content generation should start with
  the currently accumulated prompt. Otherwise, the server will await
  additional messages before starting generation."""


LiveClientContentOrDict = Union[LiveClientContent, LiveClientContentDict]


class ActivityStart(_common.BaseModel):
  """Marks the start of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  pass


class ActivityStartDict(TypedDict, total=False):
  """Marks the start of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  pass


ActivityStartOrDict = Union[ActivityStart, ActivityStartDict]


class ActivityEnd(_common.BaseModel):
  """Marks the end of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  pass


class ActivityEndDict(TypedDict, total=False):
  """Marks the end of user activity.

  This can only be sent if automatic (i.e. server-side) activity detection is
  disabled.
  """

  pass


ActivityEndOrDict = Union[ActivityEnd, ActivityEndDict]


class LiveClientRealtimeInput(_common.BaseModel):
  """User input that is sent in real time.

  This is different from `LiveClientContent` in a few ways:

    - Can be sent continuously without interruption to model generation.
    - If there is a need to mix data interleaved across the
      `LiveClientContent` and the `LiveClientRealtimeInput`, server attempts to
      optimize for best response, but there are no guarantees.
    - End of turn is not explicitly specified, but is rather derived from user
      activity (for example, end of speech).
    - Even before the end of turn, the data is processed incrementally
      to optimize for a fast start of the response from the model.
    - Is always assumed to be the user's input (cannot be used to populate
      conversation history).
  """

  media_chunks: Optional[list[Blob]] = Field(
      default=None, description="""Inlined bytes data for media input."""
  )
  audio: Optional[Blob] = Field(
      default=None, description="""The realtime audio input stream."""
  )
  audio_stream_end: Optional[bool] = Field(
      default=None,
      description="""
Indicates that the audio stream has ended, e.g. because the microphone was
turned off.

This should only be sent when automatic activity detection is enabled
(which is the default).

The client can reopen the stream by sending an audio message.
""",
  )
  video: Optional[Blob] = Field(
      default=None, description="""The realtime video input stream."""
  )
  text: Optional[str] = Field(
      default=None, description="""The realtime text input stream."""
  )
  activity_start: Optional[ActivityStart] = Field(
      default=None, description="""Marks the start of user activity."""
  )
  activity_end: Optional[ActivityEnd] = Field(
      default=None, description="""Marks the end of user activity."""
  )


class LiveClientRealtimeInputDict(TypedDict, total=False):
  """User input that is sent in real time.

  This is different from `LiveClientContent` in a few ways:

    - Can be sent continuously without interruption to model generation.
    - If there is a need to mix data interleaved across the
      `LiveClientContent` and the `LiveClientRealtimeInput`, server attempts to
      optimize for best response, but there are no guarantees.
    - End of turn is not explicitly specified, but is rather derived from user
      activity (for example, end of speech).
    - Even before the end of turn, the data is processed incrementally
      to optimize for a fast start of the response from the model.
    - Is always assumed to be the user's input (cannot be used to populate
      conversation history).
  """

  media_chunks: Optional[list[BlobDict]]
  """Inlined bytes data for media input."""

  audio: Optional[BlobDict]
  """The realtime audio input stream."""

  audio_stream_end: Optional[bool]
  """
Indicates that the audio stream has ended, e.g. because the microphone was
turned off.

This should only be sent when automatic activity detection is enabled
(which is the default).

The client can reopen the stream by sending an audio message.
"""

  video: Optional[BlobDict]
  """The realtime video input stream."""

  text: Optional[str]
  """The realtime text input stream."""

  activity_start: Optional[ActivityStartDict]
  """Marks the start of user activity."""

  activity_end: Optional[ActivityEndDict]
  """Marks the end of user activity."""


LiveClientRealtimeInputOrDict = Union[
    LiveClientRealtimeInput, LiveClientRealtimeInputDict
]


class LiveClientToolResponse(_common.BaseModel):
  """Client generated response to a `ToolCall` received from the server.

  Individual `FunctionResponse` objects are matched to the respective
  `FunctionCall` objects by the `id` field.

  Note that in the unary and server-streaming GenerateContent APIs function
  calling happens by exchanging the `Content` parts, while in the bidi
  GenerateContent APIs function calling happens over this dedicated set of
  messages.
  """

  function_responses: Optional[list[FunctionResponse]] = Field(
      default=None, description="""The response to the function calls."""
  )


class LiveClientToolResponseDict(TypedDict, total=False):
  """Client generated response to a `ToolCall` received from the server.

  Individual `FunctionResponse` objects are matched to the respective
  `FunctionCall` objects by the `id` field.

  Note that in the unary and server-streaming GenerateContent APIs function
  calling happens by exchanging the `Content` parts, while in the bidi
  GenerateContent APIs function calling happens over this dedicated set of
  messages.
  """

  function_responses: Optional[list[FunctionResponseDict]]
  """The response to the function calls."""


LiveClientToolResponseOrDict = Union[
    LiveClientToolResponse, LiveClientToolResponseDict
]


if _is_pillow_image_imported:
  BlobImageUnion = Union[PIL_Image, Blob]
else:
  BlobImageUnion = Blob  # type: ignore[misc]


if _is_pillow_image_imported:
  BlobImageUnionDict = Union[PIL_Image, Blob, BlobDict]
else:
  BlobImageUnionDict = Union[Blob, BlobDict]  # type: ignore[misc]


class LiveSendRealtimeInputParameters(_common.BaseModel):
  """Parameters for sending realtime input to the live API."""

  media: Optional[BlobImageUnion] = Field(
      default=None, description="""Realtime input to send to the session."""
  )
  audio: Optional[Blob] = Field(
      default=None, description="""The realtime audio input stream."""
  )
  audio_stream_end: Optional[bool] = Field(
      default=None,
      description="""
Indicates that the audio stream has ended, e.g. because the microphone was
turned off.

This should only be sent when automatic activity detection is enabled
(which is the default).

The client can reopen the stream by sending an audio message.
""",
  )
  video: Optional[BlobImageUnion] = Field(
      default=None, description="""The realtime video input stream."""
  )
  text: Optional[str] = Field(
      default=None, description="""The realtime text input stream."""
  )
  activity_start: Optional[ActivityStart] = Field(
      default=None, description="""Marks the start of user activity."""
  )
  activity_end: Optional[ActivityEnd] = Field(
      default=None, description="""Marks the end of user activity."""
  )


class LiveSendRealtimeInputParametersDict(TypedDict, total=False):
  """Parameters for sending realtime input to the live API."""

  media: Optional[BlobImageUnionDict]
  """Realtime input to send to the session."""

  audio: Optional[BlobDict]
  """The realtime audio input stream."""

  audio_stream_end: Optional[bool]
  """
Indicates that the audio stream has ended, e.g. because the microphone was
turned off.

This should only be sent when automatic activity detection is enabled
(which is the default).

The client can reopen the stream by sending an audio message.
"""

  video: Optional[BlobImageUnionDict]
  """The realtime video input stream."""

  text: Optional[str]
  """The realtime text input stream."""

  activity_start: Optional[ActivityStartDict]
  """Marks the start of user activity."""

  activity_end: Optional[ActivityEndDict]
  """Marks the end of user activity."""


LiveSendRealtimeInputParametersOrDict = Union[
    LiveSendRealtimeInputParameters, LiveSendRealtimeInputParametersDict
]


class LiveClientMessage(_common.BaseModel):
  """Messages sent by the client in the API call."""

  setup: Optional[LiveClientSetup] = Field(
      default=None,
      description="""Message to be sent by the system when connecting to the API. SDK users should not send this message.""",
  )
  client_content: Optional[LiveClientContent] = Field(
      default=None,
      description="""Incremental update of the current conversation delivered from the client.""",
  )
  realtime_input: Optional[LiveClientRealtimeInput] = Field(
      default=None, description="""User input that is sent in real time."""
  )
  tool_response: Optional[LiveClientToolResponse] = Field(
      default=None,
      description="""Response to a `ToolCallMessage` received from the server.""",
  )


class LiveClientMessageDict(TypedDict, total=False):
  """Messages sent by the client in the API call."""

  setup: Optional[LiveClientSetupDict]
  """Message to be sent by the system when connecting to the API. SDK users should not send this message."""

  client_content: Optional[LiveClientContentDict]
  """Incremental update of the current conversation delivered from the client."""

  realtime_input: Optional[LiveClientRealtimeInputDict]
  """User input that is sent in real time."""

  tool_response: Optional[LiveClientToolResponseDict]
  """Response to a `ToolCallMessage` received from the server."""


LiveClientMessageOrDict = Union[LiveClientMessage, LiveClientMessageDict]


class LiveConnectConfig(_common.BaseModel):
  """Session config for the API connection."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  generation_config: Optional[GenerationConfig] = Field(
      default=None,
      description="""The generation configuration for the session.""",
  )
  response_modalities: Optional[list[Modality]] = Field(
      default=None,
      description="""The requested modalities of the response. Represents the set of
      modalities that the model can return. Defaults to AUDIO if not specified.
      """,
  )
  temperature: Optional[float] = Field(
      default=None,
      description="""Value that controls the degree of randomness in token selection.
      Lower temperatures are good for prompts that require a less open-ended or
      creative response, while higher temperatures can lead to more diverse or
      creative results.
      """,
  )
  top_p: Optional[float] = Field(
      default=None,
      description="""Tokens are selected from the most to least probable until the sum
      of their probabilities equals this value. Use a lower value for less
      random responses and a higher value for more random responses.
      """,
  )
  top_k: Optional[float] = Field(
      default=None,
      description="""For each token selection step, the ``top_k`` tokens with the
      highest probabilities are sampled. Then tokens are further filtered based
      on ``top_p`` with the final token selected using temperature sampling. Use
      a lower number for less random responses and a higher number for more
      random responses.
      """,
  )
  max_output_tokens: Optional[int] = Field(
      default=None,
      description="""Maximum number of tokens that can be generated in the response.
      """,
  )
  media_resolution: Optional[MediaResolution] = Field(
      default=None,
      description="""If specified, the media resolution specified will be used.
      """,
  )
  seed: Optional[int] = Field(
      default=None,
      description="""When ``seed`` is fixed to a specific number, the model makes a best
      effort to provide the same response for repeated requests. By default, a
      random number is used.
      """,
  )
  speech_config: Optional[SpeechConfig] = Field(
      default=None,
      description="""The speech generation configuration.
      """,
  )
  thinking_config: Optional[ThinkingConfig] = Field(
      default=None,
      description="""Config for thinking features.
      An error will be returned if this field is set for models that don't
      support thinking.
      """,
  )
  enable_affective_dialog: Optional[bool] = Field(
      default=None,
      description="""If enabled, the model will detect emotions and adapt its responses accordingly.""",
  )
  system_instruction: Optional[ContentUnion] = Field(
      default=None,
      description="""The user provided system instructions for the model.
      Note: only text should be used in parts and content in each part will be
      in a separate paragraph.""",
  )
  tools: Optional[ToolListUnion] = Field(
      default=None,
      description="""A list of `Tools` the model may use to generate the next response.

      A `Tool` is a piece of code that enables the system to interact with
      external systems to perform an action, or set of actions, outside of
      knowledge and scope of the model.""",
  )
  session_resumption: Optional[SessionResumptionConfig] = Field(
      default=None,
      description="""Configures session resumption mechanism.

If included the server will send SessionResumptionUpdate messages.""",
  )
  input_audio_transcription: Optional[AudioTranscriptionConfig] = Field(
      default=None,
      description="""The transcription of the input aligns with the input audio language.
      """,
  )
  output_audio_transcription: Optional[AudioTranscriptionConfig] = Field(
      default=None,
      description="""The transcription of the output aligns with the language code
      specified for the output audio.
      """,
  )
  realtime_input_config: Optional[RealtimeInputConfig] = Field(
      default=None,
      description="""Configures the realtime input behavior in BidiGenerateContent.""",
  )
  context_window_compression: Optional[ContextWindowCompressionConfig] = Field(
      default=None,
      description="""Configures context window compression mechanism.

      If included, server will compress context window to fit into given length.""",
  )
  proactivity: Optional[ProactivityConfig] = Field(
      default=None,
      description="""Configures the proactivity of the model. This allows the model to respond proactively to
    the input and to ignore irrelevant input.""",
  )


class LiveConnectConfigDict(TypedDict, total=False):
  """Session config for the API connection."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  generation_config: Optional[GenerationConfigDict]
  """The generation configuration for the session."""

  response_modalities: Optional[list[Modality]]
  """The requested modalities of the response. Represents the set of
      modalities that the model can return. Defaults to AUDIO if not specified.
      """

  temperature: Optional[float]
  """Value that controls the degree of randomness in token selection.
      Lower temperatures are good for prompts that require a less open-ended or
      creative response, while higher temperatures can lead to more diverse or
      creative results.
      """

  top_p: Optional[float]
  """Tokens are selected from the most to least probable until the sum
      of their probabilities equals this value. Use a lower value for less
      random responses and a higher value for more random responses.
      """

  top_k: Optional[float]
  """For each token selection step, the ``top_k`` tokens with the
      highest probabilities are sampled. Then tokens are further filtered based
      on ``top_p`` with the final token selected using temperature sampling. Use
      a lower number for less random responses and a higher number for more
      random responses.
      """

  max_output_tokens: Optional[int]
  """Maximum number of tokens that can be generated in the response.
      """

  media_resolution: Optional[MediaResolution]
  """If specified, the media resolution specified will be used.
      """

  seed: Optional[int]
  """When ``seed`` is fixed to a specific number, the model makes a best
      effort to provide the same response for repeated requests. By default, a
      random number is used.
      """

  speech_config: Optional[SpeechConfigDict]
  """The speech generation configuration.
      """

  thinking_config: Optional[ThinkingConfigDict]
  """Config for thinking features.
      An error will be returned if this field is set for models that don't
      support thinking.
      """

  enable_affective_dialog: Optional[bool]
  """If enabled, the model will detect emotions and adapt its responses accordingly."""

  system_instruction: Optional[ContentUnionDict]
  """The user provided system instructions for the model.
      Note: only text should be used in parts and content in each part will be
      in a separate paragraph."""

  tools: Optional[ToolListUnionDict]
  """A list of `Tools` the model may use to generate the next response.

      A `Tool` is a piece of code that enables the system to interact with
      external systems to perform an action, or set of actions, outside of
      knowledge and scope of the model."""

  session_resumption: Optional[SessionResumptionConfigDict]
  """Configures session resumption mechanism.

If included the server will send SessionResumptionUpdate messages."""

  input_audio_transcription: Optional[AudioTranscriptionConfigDict]
  """The transcription of the input aligns with the input audio language.
      """

  output_audio_transcription: Optional[AudioTranscriptionConfigDict]
  """The transcription of the output aligns with the language code
      specified for the output audio.
      """

  realtime_input_config: Optional[RealtimeInputConfigDict]
  """Configures the realtime input behavior in BidiGenerateContent."""

  context_window_compression: Optional[ContextWindowCompressionConfigDict]
  """Configures context window compression mechanism.

      If included, server will compress context window to fit into given length."""

  proactivity: Optional[ProactivityConfigDict]
  """Configures the proactivity of the model. This allows the model to respond proactively to
    the input and to ignore irrelevant input."""


LiveConnectConfigOrDict = Union[LiveConnectConfig, LiveConnectConfigDict]


class LiveConnectParameters(_common.BaseModel):
  """Parameters for connecting to the live API."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_.""",
  )
  config: Optional[LiveConnectConfig] = Field(
      default=None,
      description="""Optional configuration parameters for the request.
      """,
  )


class LiveConnectParametersDict(TypedDict, total=False):
  """Parameters for connecting to the live API."""

  model: Optional[str]
  """ID of the model to use. For a list of models, see `Google models
    <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models>`_."""

  config: Optional[LiveConnectConfigDict]
  """Optional configuration parameters for the request.
      """


LiveConnectParametersOrDict = Union[
    LiveConnectParameters, LiveConnectParametersDict
]


class LiveMusicClientSetup(_common.BaseModel):
  """Message to be sent by the system when connecting to the API."""

  model: Optional[str] = Field(
      default=None,
      description="""The model's resource name. Format: `models/{model}`.""",
  )


class LiveMusicClientSetupDict(TypedDict, total=False):
  """Message to be sent by the system when connecting to the API."""

  model: Optional[str]
  """The model's resource name. Format: `models/{model}`."""


LiveMusicClientSetupOrDict = Union[
    LiveMusicClientSetup, LiveMusicClientSetupDict
]


class WeightedPrompt(_common.BaseModel):
  """Maps a prompt to a relative weight to steer music generation."""

  text: Optional[str] = Field(default=None, description="""Text prompt.""")
  weight: Optional[float] = Field(
      default=None,
      description="""Weight of the prompt. The weight is used to control the relative
      importance of the prompt. Higher weights are more important than lower
      weights.

      Weight must not be 0. Weights of all weighted_prompts in this
      LiveMusicClientContent message will be normalized.""",
  )


class WeightedPromptDict(TypedDict, total=False):
  """Maps a prompt to a relative weight to steer music generation."""

  text: Optional[str]
  """Text prompt."""

  weight: Optional[float]
  """Weight of the prompt. The weight is used to control the relative
      importance of the prompt. Higher weights are more important than lower
      weights.

      Weight must not be 0. Weights of all weighted_prompts in this
      LiveMusicClientContent message will be normalized."""


WeightedPromptOrDict = Union[WeightedPrompt, WeightedPromptDict]


class LiveMusicClientContent(_common.BaseModel):
  """User input to start or steer the music."""

  weighted_prompts: Optional[list[WeightedPrompt]] = Field(
      default=None, description="""Weighted prompts as the model input."""
  )


class LiveMusicClientContentDict(TypedDict, total=False):
  """User input to start or steer the music."""

  weighted_prompts: Optional[list[WeightedPromptDict]]
  """Weighted prompts as the model input."""


LiveMusicClientContentOrDict = Union[
    LiveMusicClientContent, LiveMusicClientContentDict
]


class LiveMusicGenerationConfig(_common.BaseModel):
  """Configuration for music generation."""

  temperature: Optional[float] = Field(
      default=None,
      description="""Controls the variance in audio generation. Higher values produce
      higher variance. Range is [0.0, 3.0].""",
  )
  top_k: Optional[int] = Field(
      default=None,
      description="""Controls how the model selects tokens for output. Samples the topK
      tokens with the highest probabilities. Range is [1, 1000].""",
  )
  seed: Optional[int] = Field(
      default=None,
      description="""Seeds audio generation. If not set, the request uses a randomly
      generated seed.""",
  )
  guidance: Optional[float] = Field(
      default=None,
      description="""Controls how closely the model follows prompts.
      Higher guidance follows more closely, but will make transitions more
      abrupt. Range is [0.0, 6.0].""",
  )
  bpm: Optional[int] = Field(
      default=None, description="""Beats per minute. Range is [60, 200]."""
  )
  density: Optional[float] = Field(
      default=None, description="""Density of sounds. Range is [0.0, 1.0]."""
  )
  brightness: Optional[float] = Field(
      default=None,
      description="""Brightness of the music. Range is [0.0, 1.0].""",
  )
  scale: Optional[Scale] = Field(
      default=None, description="""Scale of the generated music."""
  )
  mute_bass: Optional[bool] = Field(
      default=None,
      description="""Whether the audio output should contain bass.""",
  )
  mute_drums: Optional[bool] = Field(
      default=None,
      description="""Whether the audio output should contain drums.""",
  )
  only_bass_and_drums: Optional[bool] = Field(
      default=None,
      description="""Whether the audio output should contain only bass and drums.""",
  )
  music_generation_mode: Optional[MusicGenerationMode] = Field(
      default=None,
      description="""The mode of music generation. Default mode is QUALITY.""",
  )


class LiveMusicGenerationConfigDict(TypedDict, total=False):
  """Configuration for music generation."""

  temperature: Optional[float]
  """Controls the variance in audio generation. Higher values produce
      higher variance. Range is [0.0, 3.0]."""

  top_k: Optional[int]
  """Controls how the model selects tokens for output. Samples the topK
      tokens with the highest probabilities. Range is [1, 1000]."""

  seed: Optional[int]
  """Seeds audio generation. If not set, the request uses a randomly
      generated seed."""

  guidance: Optional[float]
  """Controls how closely the model follows prompts.
      Higher guidance follows more closely, but will make transitions more
      abrupt. Range is [0.0, 6.0]."""

  bpm: Optional[int]
  """Beats per minute. Range is [60, 200]."""

  density: Optional[float]
  """Density of sounds. Range is [0.0, 1.0]."""

  brightness: Optional[float]
  """Brightness of the music. Range is [0.0, 1.0]."""

  scale: Optional[Scale]
  """Scale of the generated music."""

  mute_bass: Optional[bool]
  """Whether the audio output should contain bass."""

  mute_drums: Optional[bool]
  """Whether the audio output should contain drums."""

  only_bass_and_drums: Optional[bool]
  """Whether the audio output should contain only bass and drums."""

  music_generation_mode: Optional[MusicGenerationMode]
  """The mode of music generation. Default mode is QUALITY."""


LiveMusicGenerationConfigOrDict = Union[
    LiveMusicGenerationConfig, LiveMusicGenerationConfigDict
]


class LiveMusicClientMessage(_common.BaseModel):
  """Messages sent by the client in the LiveMusicClientMessage call."""

  setup: Optional[LiveMusicClientSetup] = Field(
      default=None,
      description="""Message to be sent in the first (and only in the first) `LiveMusicClientMessage`.
      Clients should wait for a `LiveMusicSetupComplete` message before
      sending any additional messages.""",
  )
  client_content: Optional[LiveMusicClientContent] = Field(
      default=None, description="""User input to influence music generation."""
  )
  music_generation_config: Optional[LiveMusicGenerationConfig] = Field(
      default=None, description="""Configuration for music generation."""
  )
  playback_control: Optional[LiveMusicPlaybackControl] = Field(
      default=None,
      description="""Playback control signal for the music generation.""",
  )


class LiveMusicClientMessageDict(TypedDict, total=False):
  """Messages sent by the client in the LiveMusicClientMessage call."""

  setup: Optional[LiveMusicClientSetupDict]
  """Message to be sent in the first (and only in the first) `LiveMusicClientMessage`.
      Clients should wait for a `LiveMusicSetupComplete` message before
      sending any additional messages."""

  client_content: Optional[LiveMusicClientContentDict]
  """User input to influence music generation."""

  music_generation_config: Optional[LiveMusicGenerationConfigDict]
  """Configuration for music generation."""

  playback_control: Optional[LiveMusicPlaybackControl]
  """Playback control signal for the music generation."""


LiveMusicClientMessageOrDict = Union[
    LiveMusicClientMessage, LiveMusicClientMessageDict
]


class LiveMusicServerSetupComplete(_common.BaseModel):
  """Sent in response to a `LiveMusicClientSetup` message from the client."""

  pass


class LiveMusicServerSetupCompleteDict(TypedDict, total=False):
  """Sent in response to a `LiveMusicClientSetup` message from the client."""

  pass


LiveMusicServerSetupCompleteOrDict = Union[
    LiveMusicServerSetupComplete, LiveMusicServerSetupCompleteDict
]


class LiveMusicSourceMetadata(_common.BaseModel):
  """Prompts and config used for generating this audio chunk."""

  client_content: Optional[LiveMusicClientContent] = Field(
      default=None,
      description="""Weighted prompts for generating this audio chunk.""",
  )
  music_generation_config: Optional[LiveMusicGenerationConfig] = Field(
      default=None,
      description="""Music generation config for generating this audio chunk.""",
  )


class LiveMusicSourceMetadataDict(TypedDict, total=False):
  """Prompts and config used for generating this audio chunk."""

  client_content: Optional[LiveMusicClientContentDict]
  """Weighted prompts for generating this audio chunk."""

  music_generation_config: Optional[LiveMusicGenerationConfigDict]
  """Music generation config for generating this audio chunk."""


LiveMusicSourceMetadataOrDict = Union[
    LiveMusicSourceMetadata, LiveMusicSourceMetadataDict
]


class AudioChunk(_common.BaseModel):
  """Representation of an audio chunk."""

  data: Optional[bytes] = Field(
      default=None, description="""Raw bytes of audio data."""
  )
  mime_type: Optional[str] = Field(
      default=None, description="""MIME type of the audio chunk."""
  )
  source_metadata: Optional[LiveMusicSourceMetadata] = Field(
      default=None,
      description="""Prompts and config used for generating this audio chunk.""",
  )


class AudioChunkDict(TypedDict, total=False):
  """Representation of an audio chunk."""

  data: Optional[bytes]
  """Raw bytes of audio data."""

  mime_type: Optional[str]
  """MIME type of the audio chunk."""

  source_metadata: Optional[LiveMusicSourceMetadataDict]
  """Prompts and config used for generating this audio chunk."""


AudioChunkOrDict = Union[AudioChunk, AudioChunkDict]


class LiveMusicServerContent(_common.BaseModel):
  """Server update generated by the model in response to client messages.

  Content is generated as quickly as possible, and not in real time.
  Clients may choose to buffer and play it out in real time.
  """

  audio_chunks: Optional[list[AudioChunk]] = Field(
      default=None,
      description="""The audio chunks that the model has generated.""",
  )


class LiveMusicServerContentDict(TypedDict, total=False):
  """Server update generated by the model in response to client messages.

  Content is generated as quickly as possible, and not in real time.
  Clients may choose to buffer and play it out in real time.
  """

  audio_chunks: Optional[list[AudioChunkDict]]
  """The audio chunks that the model has generated."""


LiveMusicServerContentOrDict = Union[
    LiveMusicServerContent, LiveMusicServerContentDict
]


class LiveMusicFilteredPrompt(_common.BaseModel):
  """A prompt that was filtered with the reason."""

  text: Optional[str] = Field(
      default=None, description="""The text prompt that was filtered."""
  )
  filtered_reason: Optional[str] = Field(
      default=None, description="""The reason the prompt was filtered."""
  )


class LiveMusicFilteredPromptDict(TypedDict, total=False):
  """A prompt that was filtered with the reason."""

  text: Optional[str]
  """The text prompt that was filtered."""

  filtered_reason: Optional[str]
  """The reason the prompt was filtered."""


LiveMusicFilteredPromptOrDict = Union[
    LiveMusicFilteredPrompt, LiveMusicFilteredPromptDict
]


class LiveMusicServerMessage(_common.BaseModel):
  """Response message for the LiveMusicClientMessage call."""

  setup_complete: Optional[LiveMusicServerSetupComplete] = Field(
      default=None,
      description="""Message sent in response to a `LiveMusicClientSetup` message from the client.
      Clients should wait for this message before sending any additional messages.""",
  )
  server_content: Optional[LiveMusicServerContent] = Field(
      default=None,
      description="""Content generated by the model in response to client messages.""",
  )
  filtered_prompt: Optional[LiveMusicFilteredPrompt] = Field(
      default=None,
      description="""A prompt that was filtered with the reason.""",
  )


class LiveMusicServerMessageDict(TypedDict, total=False):
  """Response message for the LiveMusicClientMessage call."""

  setup_complete: Optional[LiveMusicServerSetupCompleteDict]
  """Message sent in response to a `LiveMusicClientSetup` message from the client.
      Clients should wait for this message before sending any additional messages."""

  server_content: Optional[LiveMusicServerContentDict]
  """Content generated by the model in response to client messages."""

  filtered_prompt: Optional[LiveMusicFilteredPromptDict]
  """A prompt that was filtered with the reason."""


LiveMusicServerMessageOrDict = Union[
    LiveMusicServerMessage, LiveMusicServerMessageDict
]


class LiveMusicConnectParameters(_common.BaseModel):
  """Parameters for connecting to the live API."""

  model: Optional[str] = Field(
      default=None, description="""The model's resource name."""
  )


class LiveMusicConnectParametersDict(TypedDict, total=False):
  """Parameters for connecting to the live API."""

  model: Optional[str]
  """The model's resource name."""


LiveMusicConnectParametersOrDict = Union[
    LiveMusicConnectParameters, LiveMusicConnectParametersDict
]


class LiveMusicSetConfigParameters(_common.BaseModel):
  """Parameters for setting config for the live music API."""

  music_generation_config: Optional[LiveMusicGenerationConfig] = Field(
      default=None, description="""Configuration for music generation."""
  )


class LiveMusicSetConfigParametersDict(TypedDict, total=False):
  """Parameters for setting config for the live music API."""

  music_generation_config: Optional[LiveMusicGenerationConfigDict]
  """Configuration for music generation."""


LiveMusicSetConfigParametersOrDict = Union[
    LiveMusicSetConfigParameters, LiveMusicSetConfigParametersDict
]


class LiveMusicSetWeightedPromptsParameters(_common.BaseModel):
  """Parameters for setting weighted prompts for the live music API."""

  weighted_prompts: Optional[list[WeightedPrompt]] = Field(
      default=None,
      description="""A map of text prompts to weights to use for the generation request.""",
  )


class LiveMusicSetWeightedPromptsParametersDict(TypedDict, total=False):
  """Parameters for setting weighted prompts for the live music API."""

  weighted_prompts: Optional[list[WeightedPromptDict]]
  """A map of text prompts to weights to use for the generation request."""


LiveMusicSetWeightedPromptsParametersOrDict = Union[
    LiveMusicSetWeightedPromptsParameters,
    LiveMusicSetWeightedPromptsParametersDict,
]


class AuthToken(_common.BaseModel):
  """Config for auth_tokens.create parameters."""

  name: Optional[str] = Field(
      default=None, description="""The name of the auth token."""
  )


class AuthTokenDict(TypedDict, total=False):
  """Config for auth_tokens.create parameters."""

  name: Optional[str]
  """The name of the auth token."""


AuthTokenOrDict = Union[AuthToken, AuthTokenDict]


class LiveConnectConstraints(_common.BaseModel):
  """Config for LiveConnectConstraints for Auth Token creation."""

  model: Optional[str] = Field(
      default=None,
      description="""ID of the model to configure in the ephemeral token for Live API.
      For a list of models, see `Gemini models
      <https://ai.google.dev/gemini-api/docs/models>`.""",
  )
  config: Optional[LiveConnectConfig] = Field(
      default=None,
      description="""Configuration specific to Live API connections created using this token.""",
  )


class LiveConnectConstraintsDict(TypedDict, total=False):
  """Config for LiveConnectConstraints for Auth Token creation."""

  model: Optional[str]
  """ID of the model to configure in the ephemeral token for Live API.
      For a list of models, see `Gemini models
      <https://ai.google.dev/gemini-api/docs/models>`."""

  config: Optional[LiveConnectConfigDict]
  """Configuration specific to Live API connections created using this token."""


LiveConnectConstraintsOrDict = Union[
    LiveConnectConstraints, LiveConnectConstraintsDict
]


class CreateAuthTokenConfig(_common.BaseModel):
  """Optional parameters."""

  http_options: Optional[HttpOptions] = Field(
      default=None, description="""Used to override HTTP request options."""
  )
  expire_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""An optional time after which, when using the resulting token,
      messages in Live API sessions will be rejected. (Gemini may
      preemptively close the session after this time.)

      If not set then this defaults to 30 minutes in the future. If set, this
      value must be less than 20 hours in the future.""",
  )
  new_session_expire_time: Optional[datetime.datetime] = Field(
      default=None,
      description="""The time after which new Live API sessions using the token
      resulting from this request will be rejected.

      If not set this defaults to 60 seconds in the future. If set, this value
      must be less than 20 hours in the future.""",
  )
  uses: Optional[int] = Field(
      default=None,
      description="""The number of times the token can be used. If this value is zero
      then no limit is applied. Default is 1. Resuming a Live API session does
      not count as a use.""",
  )
  live_connect_constraints: Optional[LiveConnectConstraints] = Field(
      default=None,
      description="""Configuration specific to Live API connections created using this token.""",
  )
  lock_additional_fields: Optional[list[str]] = Field(
      default=None,
      description="""Additional fields to lock in the effective LiveConnectParameters.""",
  )


class CreateAuthTokenConfigDict(TypedDict, total=False):
  """Optional parameters."""

  http_options: Optional[HttpOptionsDict]
  """Used to override HTTP request options."""

  expire_time: Optional[datetime.datetime]
  """An optional time after which, when using the resulting token,
      messages in Live API sessions will be rejected. (Gemini may
      preemptively close the session after this time.)

      If not set then this defaults to 30 minutes in the future. If set, this
      value must be less than 20 hours in the future."""

  new_session_expire_time: Optional[datetime.datetime]
  """The time after which new Live API sessions using the token
      resulting from this request will be rejected.

      If not set this defaults to 60 seconds in the future. If set, this value
      must be less than 20 hours in the future."""

  uses: Optional[int]
  """The number of times the token can be used. If this value is zero
      then no limit is applied. Default is 1. Resuming a Live API session does
      not count as a use."""

  live_connect_constraints: Optional[LiveConnectConstraintsDict]
  """Configuration specific to Live API connections created using this token."""

  lock_additional_fields: Optional[list[str]]
  """Additional fields to lock in the effective LiveConnectParameters."""


CreateAuthTokenConfigOrDict = Union[
    CreateAuthTokenConfig, CreateAuthTokenConfigDict
]


class CreateAuthTokenParameters(_common.BaseModel):
  """Config for auth_tokens.create parameters."""

  config: Optional[CreateAuthTokenConfig] = Field(
      default=None, description="""Optional parameters for the request."""
  )


class CreateAuthTokenParametersDict(TypedDict, total=False):
  """Config for auth_tokens.create parameters."""

  config: Optional[CreateAuthTokenConfigDict]
  """Optional parameters for the request."""


CreateAuthTokenParametersOrDict = Union[
    CreateAuthTokenParameters, CreateAuthTokenParametersDict
]


class CountTokensResult(_common.BaseModel):
  """Local tokenizer count tokens result."""

  total_tokens: Optional[int] = Field(
      default=None, description="""The total number of tokens."""
  )


class CountTokensResultDict(TypedDict, total=False):
  """Local tokenizer count tokens result."""

  total_tokens: Optional[int]
  """The total number of tokens."""


CountTokensResultOrDict = Union[CountTokensResult, CountTokensResultDict]


class ComputeTokensResult(_common.BaseModel):
  """Local tokenizer compute tokens result."""

  tokens_info: Optional[list[TokensInfo]] = Field(
      default=None, description="""Lists of tokens info from the input."""
  )


class ComputeTokensResultDict(TypedDict, total=False):
  """Local tokenizer compute tokens result."""

  tokens_info: Optional[list[TokensInfoDict]]
  """Lists of tokens info from the input."""


ComputeTokensResultOrDict = Union[ComputeTokensResult, ComputeTokensResultDict]


class CreateTuningJobParameters(_common.BaseModel):
  """Fine-tuning job creation parameters - optional fields."""

  base_model: Optional[str] = Field(
      default=None,
      description="""The base model that is being tuned, e.g., "gemini-2.5-flash".""",
  )
  training_dataset: Optional[TuningDataset] = Field(
      default=None,
      description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
  )
  config: Optional[CreateTuningJobConfig] = Field(
      default=None, description="""Configuration for the tuning job."""
  )


class CreateTuningJobParametersDict(TypedDict, total=False):
  """Fine-tuning job creation parameters - optional fields."""

  base_model: Optional[str]
  """The base model that is being tuned, e.g., "gemini-2.5-flash"."""

  training_dataset: Optional[TuningDatasetDict]
  """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""

  config: Optional[CreateTuningJobConfigDict]
  """Configuration for the tuning job."""


CreateTuningJobParametersOrDict = Union[
    CreateTuningJobParameters, CreateTuningJobParametersDict
]


class UserContent(Content):
  """UserContent facilitates the creation of a Content object with a user role.

  Example usages:


  - Create a user Content object with a string:
    user_content = UserContent("Why is the sky blue?")
  - Create a user Content object with a file data Part object:
    user_content = UserContent(Part.from_uri(file_uril="gs://bucket/file.txt",
    mime_type="text/plain"))
  - Create a user Content object with byte data Part object:
    user_content = UserContent(Part.from_bytes(data=b"Hello, World!",
    mime_type="text/plain"))

    You can create a user Content object using other classmethods in the Part
    class as well.
    You can also create a user Content using a list of Part objects or strings.
  """

  role: Literal['user'] = Field(default='user', init=False, frozen=True)
  parts: list[Part] = Field()

  def __init__(
      self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
  ):
    from . import _transformers as t

    super().__init__(parts=t.t_parts(parts=parts))


class ModelContent(Content):
  """ModelContent facilitates the creation of a Content object with a model role.

  Example usages:

  - Create a model Content object with a string:
    model_content = ModelContent("Why is the sky blue?")
  - Create a model Content object with a file data Part object:
    model_content = ModelContent(Part.from_uri(file_uril="gs://bucket/file.txt",
    mime_type="text/plain"))
  - Create a model Content object with byte data Part object:
    model_content = ModelContent(Part.from_bytes(data=b"Hello, World!",
    mime_type="text/plain"))

    You can create a model Content object using other classmethods in the Part
    class as well.
    You can also create a model Content using a list of Part objects or strings.
  """

  role: Literal['model'] = Field(default='model', init=False, frozen=True)
  parts: list[Part] = Field()

  def __init__(
      self, parts: Union['PartUnionDict', list['PartUnionDict'], list['Part']]
  ):
    from . import _transformers as t

    super().__init__(parts=t.t_parts(parts=parts))


class CustomOutputFormatConfig(_common.BaseModel):
  """Config for custom output format."""

  return_raw_output: Optional[bool] = Field(
      default=None, description="""Optional. Whether to return raw output."""
  )


class CustomOutputFormatConfigDict(TypedDict, total=False):
  """Config for custom output format."""

  return_raw_output: Optional[bool]
  """Optional. Whether to return raw output."""


CustomOutputFormatConfigOrDict = Union[
    CustomOutputFormatConfig, CustomOutputFormatConfigDict
]


class BleuSpec(_common.BaseModel):
  """Spec for bleu metric."""

  use_effective_order: Optional[bool] = Field(
      default=None,
      description="""Optional. Whether to use_effective_order to compute bleu score.""",
  )


class BleuSpecDict(TypedDict, total=False):
  """Spec for bleu metric."""

  use_effective_order: Optional[bool]
  """Optional. Whether to use_effective_order to compute bleu score."""


BleuSpecOrDict = Union[BleuSpec, BleuSpecDict]


class PairwiseMetricSpec(_common.BaseModel):
  """Spec for pairwise metric."""

  metric_prompt_template: Optional[str] = Field(
      default=None,
      description="""Required. Metric prompt template for pairwise metric.""",
  )
  baseline_response_field_name: Optional[str] = Field(
      default=None,
      description="""Optional. The field name of the baseline response.""",
  )
  candidate_response_field_name: Optional[str] = Field(
      default=None,
      description="""Optional. The field name of the candidate response.""",
  )
  custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
      default=None,
      description="""Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty.""",
  )
  system_instruction: Optional[str] = Field(
      default=None,
      description="""Optional. System instructions for pairwise metric.""",
  )


class PairwiseMetricSpecDict(TypedDict, total=False):
  """Spec for pairwise metric."""

  metric_prompt_template: Optional[str]
  """Required. Metric prompt template for pairwise metric."""

  baseline_response_field_name: Optional[str]
  """Optional. The field name of the baseline response."""

  candidate_response_field_name: Optional[str]
  """Optional. The field name of the candidate response."""

  custom_output_format_config: Optional[CustomOutputFormatConfigDict]
  """Optional. CustomOutputFormatConfig allows customization of metric output. When this config is set, the default output is replaced with the raw output string. If a custom format is chosen, the `pairwise_choice` and `explanation` fields in the corresponding metric result will be empty."""

  system_instruction: Optional[str]
  """Optional. System instructions for pairwise metric."""


PairwiseMetricSpecOrDict = Union[PairwiseMetricSpec, PairwiseMetricSpecDict]


class PointwiseMetricSpec(_common.BaseModel):
  """Spec for pointwise metric."""

  metric_prompt_template: Optional[str] = Field(
      default=None,
      description="""Required. Metric prompt template for pointwise metric.""",
  )
  custom_output_format_config: Optional[CustomOutputFormatConfig] = Field(
      default=None,
      description="""Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty.""",
  )
  system_instruction: Optional[str] = Field(
      default=None,
      description="""Optional. System instructions for pointwise metric.""",
  )


class PointwiseMetricSpecDict(TypedDict, total=False):
  """Spec for pointwise metric."""

  metric_prompt_template: Optional[str]
  """Required. Metric prompt template for pointwise metric."""

  custom_output_format_config: Optional[CustomOutputFormatConfigDict]
  """Optional. CustomOutputFormatConfig allows customization of metric output. By default, metrics return a score and explanation. When this config is set, the default output is replaced with either: - The raw output string. - A parsed output based on a user-defined schema. If a custom format is chosen, the `score` and `explanation` fields in the corresponding metric result will be empty."""

  system_instruction: Optional[str]
  """Optional. System instructions for pointwise metric."""


PointwiseMetricSpecOrDict = Union[PointwiseMetricSpec, PointwiseMetricSpecDict]


class RougeSpec(_common.BaseModel):
  """Spec for rouge metric."""

  rouge_type: Optional[str] = Field(
      default=None,
      description="""Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum.""",
  )
  split_summaries: Optional[bool] = Field(
      default=None,
      description="""Optional. Whether to split summaries while using rougeLsum.""",
  )
  use_stemmer: Optional[bool] = Field(
      default=None,
      description="""Optional. Whether to use stemmer to compute rouge score.""",
  )


class RougeSpecDict(TypedDict, total=False):
  """Spec for rouge metric."""

  rouge_type: Optional[str]
  """Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum."""

  split_summaries: Optional[bool]
  """Optional. Whether to split summaries while using rougeLsum."""

  use_stemmer: Optional[bool]
  """Optional. Whether to use stemmer to compute rouge score."""


RougeSpecOrDict = Union[RougeSpec, RougeSpecDict]


class UploadToFileSearchStoreResponse(_common.BaseModel):
  """The response when long-running operation for uploading a file to a FileSearchStore complete."""

  sdk_http_response: Optional[HttpResponse] = Field(
      default=None, description="""Used to retain the full HTTP response."""
  )
  parent: Optional[str] = Field(
      default=None,
      description="""The name of the FileSearchStore containing Documents.""",
  )
  document_name: Optional[str] = Field(
      default=None, description="""The identifier for the Document imported."""
  )


class UploadToFileSearchStoreResponseDict(TypedDict, total=False):
  """The response when long-running operation for uploading a file to a FileSearchStore complete."""

  sdk_http_response: Optional[HttpResponseDict]
  """Used to retain the full HTTP response."""

  parent: Optional[str]
  """The name of the FileSearchStore containing Documents."""

  document_name: Optional[str]
  """The identifier for the Document imported."""


UploadToFileSearchStoreResponseOrDict = Union[
    UploadToFileSearchStoreResponse, UploadToFileSearchStoreResponseDict
]


class UploadToFileSearchStoreOperation(_common.BaseModel, Operation):
  """Long-running operation for uploading a file to a FileSearchStore."""

  response: Optional[UploadToFileSearchStoreResponse] = Field(
      default=None,
      description="""The result of the UploadToFileSearchStore operation, available when the operation is done.""",
  )

  @classmethod
  def from_api_response(
      cls, api_response: Any, is_vertex_ai: bool = False
  ) -> Self:
    """Instantiates a UploadToFileSearchStoreOperation from an API response."""

    response_dict = _UploadToFileSearchStoreOperation_from_mldev(api_response)
    return cls._from_response(response=response_dict, kwargs={})
