# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# Code generated by the Google Gen AI SDK generator DO NOT EDIT.

import json
import logging
from typing import Any, Optional, Union
from urllib.parse import urlencode

from . import _api_module
from . import _common
from . import _transformers as t
from . import types
from ._common import get_value_by_path as getv
from ._common import set_value_by_path as setv
from .pagers import AsyncPager, Pager


logger = logging.getLogger('google_genai.tunings')


def _AutoraterConfig_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['samplingCount']) is not None:
    setv(to_object, ['sampling_count'], getv(from_object, ['samplingCount']))

  if getv(from_object, ['flipEnabled']) is not None:
    setv(to_object, ['flip_enabled'], getv(from_object, ['flipEnabled']))

  if getv(from_object, ['autoraterModel']) is not None:
    setv(to_object, ['autorater_model'], getv(from_object, ['autoraterModel']))

  if getv(from_object, ['generationConfig']) is not None:
    setv(
        to_object,
        ['generation_config'],
        _GenerationConfig_from_vertex(
            getv(from_object, ['generationConfig']), to_object, root_object
        ),
    )

  return to_object


def _AutoraterConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sampling_count']) is not None:
    setv(to_object, ['samplingCount'], getv(from_object, ['sampling_count']))

  if getv(from_object, ['flip_enabled']) is not None:
    setv(to_object, ['flipEnabled'], getv(from_object, ['flip_enabled']))

  if getv(from_object, ['autorater_model']) is not None:
    setv(to_object, ['autoraterModel'], getv(from_object, ['autorater_model']))

  if getv(from_object, ['generation_config']) is not None:
    setv(
        to_object,
        ['generationConfig'],
        _GenerationConfig_to_vertex(
            getv(from_object, ['generation_config']), to_object, root_object
        ),
    )

  return to_object


def _CancelTuningJobParameters_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['name']) is not None:
    setv(to_object, ['_url', 'name'], getv(from_object, ['name']))

  return to_object


def _CancelTuningJobParameters_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['name']) is not None:
    setv(to_object, ['_url', 'name'], getv(from_object, ['name']))

  return to_object


def _CancelTuningJobResponse_from_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  return to_object


def _CancelTuningJobResponse_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  return to_object


def _CreateTuningJobConfig_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}

  if getv(from_object, ['validation_dataset']) is not None:
    raise ValueError(
        'validation_dataset parameter is not supported in Gemini API.'
    )

  if getv(from_object, ['tuned_model_display_name']) is not None:
    setv(
        parent_object,
        ['displayName'],
        getv(from_object, ['tuned_model_display_name']),
    )

  if getv(from_object, ['description']) is not None:
    raise ValueError('description parameter is not supported in Gemini API.')

  if getv(from_object, ['epoch_count']) is not None:
    setv(
        parent_object,
        ['tuningTask', 'hyperparameters', 'epochCount'],
        getv(from_object, ['epoch_count']),
    )

  if getv(from_object, ['learning_rate_multiplier']) is not None:
    setv(
        to_object,
        ['tuningTask', 'hyperparameters', 'learningRateMultiplier'],
        getv(from_object, ['learning_rate_multiplier']),
    )

  if getv(from_object, ['export_last_checkpoint_only']) is not None:
    raise ValueError(
        'export_last_checkpoint_only parameter is not supported in Gemini API.'
    )

  if getv(from_object, ['pre_tuned_model_checkpoint_id']) is not None:
    raise ValueError(
        'pre_tuned_model_checkpoint_id parameter is not supported in Gemini'
        ' API.'
    )

  if getv(from_object, ['adapter_size']) is not None:
    raise ValueError('adapter_size parameter is not supported in Gemini API.')

  if getv(from_object, ['batch_size']) is not None:
    setv(
        parent_object,
        ['tuningTask', 'hyperparameters', 'batchSize'],
        getv(from_object, ['batch_size']),
    )

  if getv(from_object, ['learning_rate']) is not None:
    setv(
        parent_object,
        ['tuningTask', 'hyperparameters', 'learningRate'],
        getv(from_object, ['learning_rate']),
    )

  if getv(from_object, ['evaluation_config']) is not None:
    raise ValueError(
        'evaluation_config parameter is not supported in Gemini API.'
    )

  if getv(from_object, ['labels']) is not None:
    raise ValueError('labels parameter is not supported in Gemini API.')

  if getv(from_object, ['beta']) is not None:
    raise ValueError('beta parameter is not supported in Gemini API.')

  return to_object


def _CreateTuningJobConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['validation_dataset']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec'],
          _TuningValidationDataset_to_vertex(
              getv(from_object, ['validation_dataset']), to_object, root_object
          ),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['validation_dataset']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec'],
          _TuningValidationDataset_to_vertex(
              getv(from_object, ['validation_dataset']), to_object, root_object
          ),
      )

  if getv(from_object, ['tuned_model_display_name']) is not None:
    setv(
        parent_object,
        ['tunedModelDisplayName'],
        getv(from_object, ['tuned_model_display_name']),
    )

  if getv(from_object, ['description']) is not None:
    setv(parent_object, ['description'], getv(from_object, ['description']))

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['epoch_count']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'hyperParameters', 'epochCount'],
          getv(from_object, ['epoch_count']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['epoch_count']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'hyperParameters', 'epochCount'],
          getv(from_object, ['epoch_count']),
      )

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['learning_rate_multiplier']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'hyperParameters', 'learningRateMultiplier'],
          getv(from_object, ['learning_rate_multiplier']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['learning_rate_multiplier']) is not None:
      setv(
          parent_object,
          [
              'preferenceOptimizationSpec',
              'hyperParameters',
              'learningRateMultiplier',
          ],
          getv(from_object, ['learning_rate_multiplier']),
      )

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['export_last_checkpoint_only']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'exportLastCheckpointOnly'],
          getv(from_object, ['export_last_checkpoint_only']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['export_last_checkpoint_only']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'exportLastCheckpointOnly'],
          getv(from_object, ['export_last_checkpoint_only']),
      )

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['adapter_size']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'hyperParameters', 'adapterSize'],
          getv(from_object, ['adapter_size']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['adapter_size']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'hyperParameters', 'adapterSize'],
          getv(from_object, ['adapter_size']),
      )

  if getv(from_object, ['batch_size']) is not None:
    raise ValueError('batch_size parameter is not supported in Vertex AI.')

  if getv(from_object, ['learning_rate']) is not None:
    raise ValueError('learning_rate parameter is not supported in Vertex AI.')

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['evaluation_config']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'evaluationConfig'],
          _EvaluationConfig_to_vertex(
              getv(from_object, ['evaluation_config']), to_object, root_object
          ),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['evaluation_config']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'evaluationConfig'],
          _EvaluationConfig_to_vertex(
              getv(from_object, ['evaluation_config']), to_object, root_object
          ),
      )

  if getv(from_object, ['labels']) is not None:
    setv(parent_object, ['labels'], getv(from_object, ['labels']))

  if getv(from_object, ['beta']) is not None:
    setv(
        parent_object,
        ['preferenceOptimizationSpec', 'hyperParameters', 'beta'],
        getv(from_object, ['beta']),
    )

  return to_object


def _CreateTuningJobParametersPrivate_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['base_model']) is not None:
    setv(to_object, ['baseModel'], getv(from_object, ['base_model']))

  if getv(from_object, ['pre_tuned_model']) is not None:
    setv(to_object, ['preTunedModel'], getv(from_object, ['pre_tuned_model']))

  if getv(from_object, ['training_dataset']) is not None:
    _TuningDataset_to_mldev(
        getv(from_object, ['training_dataset']), to_object, root_object
    )

  if getv(from_object, ['config']) is not None:
    _CreateTuningJobConfig_to_mldev(
        getv(from_object, ['config']), to_object, root_object
    )

  return to_object


def _CreateTuningJobParametersPrivate_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['base_model']) is not None:
    setv(to_object, ['baseModel'], getv(from_object, ['base_model']))

  if getv(from_object, ['pre_tuned_model']) is not None:
    setv(to_object, ['preTunedModel'], getv(from_object, ['pre_tuned_model']))

  if getv(from_object, ['training_dataset']) is not None:
    _TuningDataset_to_vertex(
        getv(from_object, ['training_dataset']), to_object, root_object
    )

  if getv(from_object, ['config']) is not None:
    _CreateTuningJobConfig_to_vertex(
        getv(from_object, ['config']), to_object, root_object
    )

  return to_object


def _EvaluationConfig_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['metrics']) is not None:
    setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))

  if getv(from_object, ['outputConfig']) is not None:
    setv(to_object, ['output_config'], getv(from_object, ['outputConfig']))

  if getv(from_object, ['autoraterConfig']) is not None:
    setv(
        to_object,
        ['autorater_config'],
        _AutoraterConfig_from_vertex(
            getv(from_object, ['autoraterConfig']), to_object, root_object
        ),
    )

  return to_object


def _EvaluationConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['metrics']) is not None:
    setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))

  if getv(from_object, ['output_config']) is not None:
    setv(to_object, ['outputConfig'], getv(from_object, ['output_config']))

  if getv(from_object, ['autorater_config']) is not None:
    setv(
        to_object,
        ['autoraterConfig'],
        _AutoraterConfig_to_vertex(
            getv(from_object, ['autorater_config']), to_object, root_object
        ),
    )

  return to_object


def _GenerationConfig_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['modelConfig']) is not None:
    setv(
        to_object,
        ['model_selection_config'],
        getv(from_object, ['modelConfig']),
    )

  if getv(from_object, ['responseJsonSchema']) is not None:
    setv(
        to_object,
        ['response_json_schema'],
        getv(from_object, ['responseJsonSchema']),
    )

  if getv(from_object, ['audioTimestamp']) is not None:
    setv(to_object, ['audio_timestamp'], getv(from_object, ['audioTimestamp']))

  if getv(from_object, ['candidateCount']) is not None:
    setv(to_object, ['candidate_count'], getv(from_object, ['candidateCount']))

  if getv(from_object, ['enableAffectiveDialog']) is not None:
    setv(
        to_object,
        ['enable_affective_dialog'],
        getv(from_object, ['enableAffectiveDialog']),
    )

  if getv(from_object, ['frequencyPenalty']) is not None:
    setv(
        to_object,
        ['frequency_penalty'],
        getv(from_object, ['frequencyPenalty']),
    )

  if getv(from_object, ['logprobs']) is not None:
    setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))

  if getv(from_object, ['maxOutputTokens']) is not None:
    setv(
        to_object, ['max_output_tokens'], getv(from_object, ['maxOutputTokens'])
    )

  if getv(from_object, ['mediaResolution']) is not None:
    setv(
        to_object, ['media_resolution'], getv(from_object, ['mediaResolution'])
    )

  if getv(from_object, ['presencePenalty']) is not None:
    setv(
        to_object, ['presence_penalty'], getv(from_object, ['presencePenalty'])
    )

  if getv(from_object, ['responseLogprobs']) is not None:
    setv(
        to_object,
        ['response_logprobs'],
        getv(from_object, ['responseLogprobs']),
    )

  if getv(from_object, ['responseMimeType']) is not None:
    setv(
        to_object,
        ['response_mime_type'],
        getv(from_object, ['responseMimeType']),
    )

  if getv(from_object, ['responseModalities']) is not None:
    setv(
        to_object,
        ['response_modalities'],
        getv(from_object, ['responseModalities']),
    )

  if getv(from_object, ['responseSchema']) is not None:
    setv(to_object, ['response_schema'], getv(from_object, ['responseSchema']))

  if getv(from_object, ['routingConfig']) is not None:
    setv(to_object, ['routing_config'], getv(from_object, ['routingConfig']))

  if getv(from_object, ['seed']) is not None:
    setv(to_object, ['seed'], getv(from_object, ['seed']))

  if getv(from_object, ['speechConfig']) is not None:
    setv(to_object, ['speech_config'], getv(from_object, ['speechConfig']))

  if getv(from_object, ['stopSequences']) is not None:
    setv(to_object, ['stop_sequences'], getv(from_object, ['stopSequences']))

  if getv(from_object, ['temperature']) is not None:
    setv(to_object, ['temperature'], getv(from_object, ['temperature']))

  if getv(from_object, ['thinkingConfig']) is not None:
    setv(to_object, ['thinking_config'], getv(from_object, ['thinkingConfig']))

  if getv(from_object, ['topK']) is not None:
    setv(to_object, ['top_k'], getv(from_object, ['topK']))

  if getv(from_object, ['topP']) is not None:
    setv(to_object, ['top_p'], getv(from_object, ['topP']))

  return to_object


def _GenerationConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['model_selection_config']) is not None:
    setv(
        to_object,
        ['modelConfig'],
        getv(from_object, ['model_selection_config']),
    )

  if getv(from_object, ['response_json_schema']) is not None:
    setv(
        to_object,
        ['responseJsonSchema'],
        getv(from_object, ['response_json_schema']),
    )

  if getv(from_object, ['audio_timestamp']) is not None:
    setv(to_object, ['audioTimestamp'], getv(from_object, ['audio_timestamp']))

  if getv(from_object, ['candidate_count']) is not None:
    setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count']))

  if getv(from_object, ['enable_affective_dialog']) is not None:
    setv(
        to_object,
        ['enableAffectiveDialog'],
        getv(from_object, ['enable_affective_dialog']),
    )

  if getv(from_object, ['frequency_penalty']) is not None:
    setv(
        to_object,
        ['frequencyPenalty'],
        getv(from_object, ['frequency_penalty']),
    )

  if getv(from_object, ['logprobs']) is not None:
    setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))

  if getv(from_object, ['max_output_tokens']) is not None:
    setv(
        to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens'])
    )

  if getv(from_object, ['media_resolution']) is not None:
    setv(
        to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
    )

  if getv(from_object, ['presence_penalty']) is not None:
    setv(
        to_object, ['presencePenalty'], getv(from_object, ['presence_penalty'])
    )

  if getv(from_object, ['response_logprobs']) is not None:
    setv(
        to_object,
        ['responseLogprobs'],
        getv(from_object, ['response_logprobs']),
    )

  if getv(from_object, ['response_mime_type']) is not None:
    setv(
        to_object,
        ['responseMimeType'],
        getv(from_object, ['response_mime_type']),
    )

  if getv(from_object, ['response_modalities']) is not None:
    setv(
        to_object,
        ['responseModalities'],
        getv(from_object, ['response_modalities']),
    )

  if getv(from_object, ['response_schema']) is not None:
    setv(to_object, ['responseSchema'], getv(from_object, ['response_schema']))

  if getv(from_object, ['routing_config']) is not None:
    setv(to_object, ['routingConfig'], getv(from_object, ['routing_config']))

  if getv(from_object, ['seed']) is not None:
    setv(to_object, ['seed'], getv(from_object, ['seed']))

  if getv(from_object, ['speech_config']) is not None:
    setv(
        to_object,
        ['speechConfig'],
        _SpeechConfig_to_vertex(
            getv(from_object, ['speech_config']), to_object, root_object
        ),
    )

  if getv(from_object, ['stop_sequences']) is not None:
    setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences']))

  if getv(from_object, ['temperature']) is not None:
    setv(to_object, ['temperature'], getv(from_object, ['temperature']))

  if getv(from_object, ['thinking_config']) is not None:
    setv(to_object, ['thinkingConfig'], getv(from_object, ['thinking_config']))

  if getv(from_object, ['top_k']) is not None:
    setv(to_object, ['topK'], getv(from_object, ['top_k']))

  if getv(from_object, ['top_p']) is not None:
    setv(to_object, ['topP'], getv(from_object, ['top_p']))

  if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
    raise ValueError(
        'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
    )

  return to_object


def _GetTuningJobParameters_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['name']) is not None:
    setv(to_object, ['_url', 'name'], getv(from_object, ['name']))

  return to_object


def _GetTuningJobParameters_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['name']) is not None:
    setv(to_object, ['_url', 'name'], getv(from_object, ['name']))

  return to_object


def _ListTuningJobsConfig_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}

  if getv(from_object, ['page_size']) is not None:
    setv(
        parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
    )

  if getv(from_object, ['page_token']) is not None:
    setv(
        parent_object,
        ['_query', 'pageToken'],
        getv(from_object, ['page_token']),
    )

  if getv(from_object, ['filter']) is not None:
    setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))

  return to_object


def _ListTuningJobsConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}

  if getv(from_object, ['page_size']) is not None:
    setv(
        parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
    )

  if getv(from_object, ['page_token']) is not None:
    setv(
        parent_object,
        ['_query', 'pageToken'],
        getv(from_object, ['page_token']),
    )

  if getv(from_object, ['filter']) is not None:
    setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))

  return to_object


def _ListTuningJobsParameters_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['config']) is not None:
    _ListTuningJobsConfig_to_mldev(
        getv(from_object, ['config']), to_object, root_object
    )

  return to_object


def _ListTuningJobsParameters_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['config']) is not None:
    _ListTuningJobsConfig_to_vertex(
        getv(from_object, ['config']), to_object, root_object
    )

  return to_object


def _ListTuningJobsResponse_from_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  if getv(from_object, ['nextPageToken']) is not None:
    setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))

  if getv(from_object, ['tunedModels']) is not None:
    setv(
        to_object,
        ['tuning_jobs'],
        [
            _TuningJob_from_mldev(item, to_object, root_object)
            for item in getv(from_object, ['tunedModels'])
        ],
    )

  return to_object


def _ListTuningJobsResponse_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  if getv(from_object, ['nextPageToken']) is not None:
    setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))

  if getv(from_object, ['tuningJobs']) is not None:
    setv(
        to_object,
        ['tuning_jobs'],
        [
            _TuningJob_from_vertex(item, to_object, root_object)
            for item in getv(from_object, ['tuningJobs'])
        ],
    )

  return to_object


def _SpeechConfig_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['voice_config']) is not None:
    setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))

  if getv(from_object, ['language_code']) is not None:
    setv(to_object, ['languageCode'], getv(from_object, ['language_code']))

  if getv(from_object, ['multi_speaker_voice_config']) is not None:
    raise ValueError(
        'multi_speaker_voice_config parameter is not supported in Vertex AI.'
    )

  return to_object


def _TunedModel_from_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['name']) is not None:
    setv(to_object, ['model'], getv(from_object, ['name']))

  if getv(from_object, ['name']) is not None:
    setv(to_object, ['endpoint'], getv(from_object, ['name']))

  return to_object


def _TuningDataset_to_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['gcs_uri']) is not None:
    raise ValueError('gcs_uri parameter is not supported in Gemini API.')

  if getv(from_object, ['vertex_dataset_resource']) is not None:
    raise ValueError(
        'vertex_dataset_resource parameter is not supported in Gemini API.'
    )

  if getv(from_object, ['examples']) is not None:
    setv(
        to_object,
        ['examples', 'examples'],
        [item for item in getv(from_object, ['examples'])],
    )

  return to_object


def _TuningDataset_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['gcs_uri']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'trainingDatasetUri'],
          getv(from_object, ['gcs_uri']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['gcs_uri']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'trainingDatasetUri'],
          getv(from_object, ['gcs_uri']),
      )

  discriminator = getv(root_object, ['config', 'method'])
  if discriminator is None:
    discriminator = 'SUPERVISED_FINE_TUNING'
  if discriminator == 'SUPERVISED_FINE_TUNING':
    if getv(from_object, ['vertex_dataset_resource']) is not None:
      setv(
          parent_object,
          ['supervisedTuningSpec', 'trainingDatasetUri'],
          getv(from_object, ['vertex_dataset_resource']),
      )

  elif discriminator == 'PREFERENCE_TUNING':
    if getv(from_object, ['vertex_dataset_resource']) is not None:
      setv(
          parent_object,
          ['preferenceOptimizationSpec', 'trainingDatasetUri'],
          getv(from_object, ['vertex_dataset_resource']),
      )

  if getv(from_object, ['examples']) is not None:
    raise ValueError('examples parameter is not supported in Vertex AI.')

  return to_object


def _TuningJob_from_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  if getv(from_object, ['name']) is not None:
    setv(to_object, ['name'], getv(from_object, ['name']))

  if getv(from_object, ['state']) is not None:
    setv(
        to_object,
        ['state'],
        t.t_tuning_job_status(getv(from_object, ['state'])),
    )

  if getv(from_object, ['createTime']) is not None:
    setv(to_object, ['create_time'], getv(from_object, ['createTime']))

  if getv(from_object, ['tuningTask', 'startTime']) is not None:
    setv(
        to_object,
        ['start_time'],
        getv(from_object, ['tuningTask', 'startTime']),
    )

  if getv(from_object, ['tuningTask', 'completeTime']) is not None:
    setv(
        to_object,
        ['end_time'],
        getv(from_object, ['tuningTask', 'completeTime']),
    )

  if getv(from_object, ['updateTime']) is not None:
    setv(to_object, ['update_time'], getv(from_object, ['updateTime']))

  if getv(from_object, ['description']) is not None:
    setv(to_object, ['description'], getv(from_object, ['description']))

  if getv(from_object, ['baseModel']) is not None:
    setv(to_object, ['base_model'], getv(from_object, ['baseModel']))

  if getv(from_object, ['_self']) is not None:
    setv(
        to_object,
        ['tuned_model'],
        _TunedModel_from_mldev(
            getv(from_object, ['_self']), to_object, root_object
        ),
    )

  return to_object


def _TuningJob_from_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  if getv(from_object, ['name']) is not None:
    setv(to_object, ['name'], getv(from_object, ['name']))

  if getv(from_object, ['state']) is not None:
    setv(
        to_object,
        ['state'],
        t.t_tuning_job_status(getv(from_object, ['state'])),
    )

  if getv(from_object, ['createTime']) is not None:
    setv(to_object, ['create_time'], getv(from_object, ['createTime']))

  if getv(from_object, ['startTime']) is not None:
    setv(to_object, ['start_time'], getv(from_object, ['startTime']))

  if getv(from_object, ['endTime']) is not None:
    setv(to_object, ['end_time'], getv(from_object, ['endTime']))

  if getv(from_object, ['updateTime']) is not None:
    setv(to_object, ['update_time'], getv(from_object, ['updateTime']))

  if getv(from_object, ['error']) is not None:
    setv(to_object, ['error'], getv(from_object, ['error']))

  if getv(from_object, ['description']) is not None:
    setv(to_object, ['description'], getv(from_object, ['description']))

  if getv(from_object, ['baseModel']) is not None:
    setv(to_object, ['base_model'], getv(from_object, ['baseModel']))

  if getv(from_object, ['tunedModel']) is not None:
    setv(to_object, ['tuned_model'], getv(from_object, ['tunedModel']))

  if getv(from_object, ['preTunedModel']) is not None:
    setv(to_object, ['pre_tuned_model'], getv(from_object, ['preTunedModel']))

  if getv(from_object, ['supervisedTuningSpec']) is not None:
    setv(
        to_object,
        ['supervised_tuning_spec'],
        getv(from_object, ['supervisedTuningSpec']),
    )

  if getv(from_object, ['preferenceOptimizationSpec']) is not None:
    setv(
        to_object,
        ['preference_optimization_spec'],
        getv(from_object, ['preferenceOptimizationSpec']),
    )

  if getv(from_object, ['tuningDataStats']) is not None:
    setv(
        to_object, ['tuning_data_stats'], getv(from_object, ['tuningDataStats'])
    )

  if getv(from_object, ['encryptionSpec']) is not None:
    setv(to_object, ['encryption_spec'], getv(from_object, ['encryptionSpec']))

  if getv(from_object, ['partnerModelTuningSpec']) is not None:
    setv(
        to_object,
        ['partner_model_tuning_spec'],
        getv(from_object, ['partnerModelTuningSpec']),
    )

  if getv(from_object, ['evaluationConfig']) is not None:
    setv(
        to_object,
        ['evaluation_config'],
        _EvaluationConfig_from_vertex(
            getv(from_object, ['evaluationConfig']), to_object, root_object
        ),
    )

  if getv(from_object, ['customBaseModel']) is not None:
    setv(
        to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
    )

  if getv(from_object, ['experiment']) is not None:
    setv(to_object, ['experiment'], getv(from_object, ['experiment']))

  if getv(from_object, ['labels']) is not None:
    setv(to_object, ['labels'], getv(from_object, ['labels']))

  if getv(from_object, ['outputUri']) is not None:
    setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))

  if getv(from_object, ['pipelineJob']) is not None:
    setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))

  if getv(from_object, ['serviceAccount']) is not None:
    setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))

  if getv(from_object, ['tunedModelDisplayName']) is not None:
    setv(
        to_object,
        ['tuned_model_display_name'],
        getv(from_object, ['tunedModelDisplayName']),
    )

  if getv(from_object, ['veoTuningSpec']) is not None:
    setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))

  return to_object


def _TuningOperation_from_mldev(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['sdkHttpResponse']) is not None:
    setv(
        to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
    )

  if getv(from_object, ['name']) is not None:
    setv(to_object, ['name'], getv(from_object, ['name']))

  if getv(from_object, ['metadata']) is not None:
    setv(to_object, ['metadata'], getv(from_object, ['metadata']))

  if getv(from_object, ['done']) is not None:
    setv(to_object, ['done'], getv(from_object, ['done']))

  if getv(from_object, ['error']) is not None:
    setv(to_object, ['error'], getv(from_object, ['error']))

  return to_object


def _TuningValidationDataset_to_vertex(
    from_object: Union[dict[str, Any], object],
    parent_object: Optional[dict[str, Any]] = None,
    root_object: Optional[Union[dict[str, Any], object]] = None,
) -> dict[str, Any]:
  to_object: dict[str, Any] = {}
  if getv(from_object, ['gcs_uri']) is not None:
    setv(to_object, ['validationDatasetUri'], getv(from_object, ['gcs_uri']))

  if getv(from_object, ['vertex_dataset_resource']) is not None:
    setv(
        to_object,
        ['validationDatasetUri'],
        getv(from_object, ['vertex_dataset_resource']),
    )

  return to_object


class Tunings(_api_module.BaseModule):

  def _get(
      self,
      *,
      name: str,
      config: Optional[types.GetTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    """Gets a TuningJob.

    Args:
      name: The resource name of the tuning job.

    Returns:
      A TuningJob object.
    """

    parameter_model = types._GetTuningJobParameters(
        name=name,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _GetTuningJobParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}'.format_map(request_url_dict)
      else:
        path = '{name}'
    else:
      request_dict = _GetTuningJobParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}'.format_map(request_url_dict)
      else:
        path = '{name}'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = self._api_client.request('get', path, request_dict, http_options)

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _TuningJob_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _TuningJob_from_mldev(response_dict)

    return_value = types.TuningJob._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  def _list(
      self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
  ) -> types.ListTuningJobsResponse:
    parameter_model = types._ListTuningJobsParameters(
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _ListTuningJobsParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tuningJobs'.format_map(request_url_dict)
      else:
        path = 'tuningJobs'
    else:
      request_dict = _ListTuningJobsParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tunedModels'.format_map(request_url_dict)
      else:
        path = 'tunedModels'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = self._api_client.request('get', path, request_dict, http_options)

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _ListTuningJobsResponse_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _ListTuningJobsResponse_from_mldev(response_dict)

    return_value = types.ListTuningJobsResponse._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  def cancel(
      self,
      *,
      name: str,
      config: Optional[types.CancelTuningJobConfigOrDict] = None,
  ) -> types.CancelTuningJobResponse:
    """Cancels a tuning job.

    Args:
      name (str): TuningJob resource name.
    """

    parameter_model = types._CancelTuningJobParameters(
        name=name,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _CancelTuningJobParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}:cancel'.format_map(request_url_dict)
      else:
        path = '{name}:cancel'
    else:
      request_dict = _CancelTuningJobParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}:cancel'.format_map(request_url_dict)
      else:
        path = '{name}:cancel'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = self._api_client.request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _CancelTuningJobResponse_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _CancelTuningJobResponse_from_mldev(response_dict)

    return_value = types.CancelTuningJobResponse._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  def _tune(
      self,
      *,
      base_model: Optional[str] = None,
      pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    """Creates a tuning job and returns the TuningJob object.

    Args:
      base_model: The name of the model to tune.
      training_dataset: The training dataset to use.
      config: The configuration to use for the tuning job.

    Returns:
      A TuningJob object.
    """

    parameter_model = types._CreateTuningJobParametersPrivate(
        base_model=base_model,
        pre_tuned_model=pre_tuned_model,
        training_dataset=training_dataset,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]
    if not self._api_client.vertexai:
      raise ValueError('This method is only supported in the Vertex AI client.')
    else:
      request_dict = _CreateTuningJobParametersPrivate_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tuningJobs'.format_map(request_url_dict)
      else:
        path = 'tuningJobs'

    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = self._api_client.request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _TuningJob_from_vertex(response_dict)

    return_value = types.TuningJob._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  def _tune_mldev(
      self,
      *,
      base_model: Optional[str] = None,
      pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningOperation:
    """Creates a tuning job and returns the TuningJob object.

    Args:
      base_model: The name of the model to tune.
      training_dataset: The training dataset to use.
      config: The configuration to use for the tuning job.

    Returns:
      A TuningJob operation.
    """

    parameter_model = types._CreateTuningJobParametersPrivate(
        base_model=base_model,
        pre_tuned_model=pre_tuned_model,
        training_dataset=training_dataset,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]
    if self._api_client.vertexai:
      raise ValueError(
          'This method is only supported in the Gemini Developer client.'
      )
    else:
      request_dict = _CreateTuningJobParametersPrivate_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tunedModels'.format_map(request_url_dict)
      else:
        path = 'tunedModels'

    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = self._api_client.request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if not self._api_client.vertexai:
      response_dict = _TuningOperation_from_mldev(response_dict)

    return_value = types.TuningOperation._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  def get(
      self,
      *,
      name: str,
      config: Optional[types.GetTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    job = self._get(name=name, config=config)
    if (
        job.experiment
        and self._api_client.vertexai
        and self._api_client.project is not None
    ):
      _IpythonUtils.display_experiment_button(
          experiment=job.experiment,
          project=self._api_client.project,
      )
    return job

  @_common.experimental_warning(
      "The SDK's tuning implementation is experimental, "
      'and may change in future versions.',
  )
  def tune(
      self,
      *,
      base_model: str,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    if self._api_client.vertexai:
      if base_model.startswith('projects/'):  # Pre-tuned model
        checkpoint_id = None
        if config:
          checkpoint_id = getattr(config, 'pre_tuned_model_checkpoint_id', None)
        pre_tuned_model = types.PreTunedModel(
            tuned_model_name=base_model, checkpoint_id=checkpoint_id
        )
        tuning_job = self._tune(
            pre_tuned_model=pre_tuned_model,
            training_dataset=training_dataset,
            config=config,
        )
      else:
        validated_evaluation_config: Optional[types.EvaluationConfig] = None
        if (
            config is not None
            and getattr(config, 'evaluation_config', None) is not None
        ):
          evaluation_config = getattr(config, 'evaluation_config')
          if isinstance(evaluation_config, dict):
            evaluation_config = types.EvaluationConfig(**evaluation_config)
          if (
              not evaluation_config.metrics
              or not evaluation_config.output_config
          ):
            raise ValueError(
                'Evaluation config must have at least one metric and an output'
                ' config.'
            )
          for i in range(len(evaluation_config.metrics)):
            if isinstance(evaluation_config.metrics[i], dict):
              evaluation_config.metrics[i] = types.Metric.model_validate(
                  evaluation_config.metrics[i]
              )
          if isinstance(config, dict):
            config['evaluation_config'] = evaluation_config
          else:
            config.evaluation_config = evaluation_config
          validated_evaluation_config = evaluation_config
        tuning_job = self._tune(
            base_model=base_model,
            training_dataset=training_dataset,
            config=config,
        )
        if (
            config is not None
            and getattr(config, 'evaluation_config', None) is not None
        ):
          tuning_job.evaluation_config = validated_evaluation_config
    else:
      operation = self._tune_mldev(
          base_model=base_model,
          training_dataset=training_dataset,
          config=config,
      )
      if operation.metadata is not None and 'tunedModel' in operation.metadata:
        tuned_model_name = operation.metadata['tunedModel']
      else:
        if operation.name is None:
          raise ValueError('Operation name is required.')
        tuned_model_name = operation.name.partition('/operations/')[0]
      tuning_job = types.TuningJob(
          name=tuned_model_name,
          state=types.JobState.JOB_STATE_QUEUED,
      )
    if tuning_job.name and self._api_client.vertexai:
      _IpythonUtils.display_model_tuning_button(
          tuning_job_resource=tuning_job.name
      )
    return tuning_job

  def list(
      self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
  ) -> Pager[types.TuningJob]:
    """Lists `TuningJob` objects.

    Args:
      config: The configuration for the list request.

    Returns:
      A Pager object that contains one page of tuning jobs. When iterating over
      the pager, it automatically fetches the next page if there are more.

    Usage:

    .. code-block:: python
        for tuning_job in client.tunings.list():
            print(tuning_job.name)
    """

    list_request = self._list
    return Pager(
        'tuning_jobs',
        list_request,
        self._list(config=config),
        config,
    )


class AsyncTunings(_api_module.BaseModule):

  async def _get(
      self,
      *,
      name: str,
      config: Optional[types.GetTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    """Gets a TuningJob.

    Args:
      name: The resource name of the tuning job.

    Returns:
      A TuningJob object.
    """

    parameter_model = types._GetTuningJobParameters(
        name=name,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _GetTuningJobParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}'.format_map(request_url_dict)
      else:
        path = '{name}'
    else:
      request_dict = _GetTuningJobParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}'.format_map(request_url_dict)
      else:
        path = '{name}'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = await self._api_client.async_request(
        'get', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _TuningJob_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _TuningJob_from_mldev(response_dict)

    return_value = types.TuningJob._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  async def _list(
      self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
  ) -> types.ListTuningJobsResponse:
    parameter_model = types._ListTuningJobsParameters(
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _ListTuningJobsParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tuningJobs'.format_map(request_url_dict)
      else:
        path = 'tuningJobs'
    else:
      request_dict = _ListTuningJobsParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tunedModels'.format_map(request_url_dict)
      else:
        path = 'tunedModels'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = await self._api_client.async_request(
        'get', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _ListTuningJobsResponse_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _ListTuningJobsResponse_from_mldev(response_dict)

    return_value = types.ListTuningJobsResponse._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  async def cancel(
      self,
      *,
      name: str,
      config: Optional[types.CancelTuningJobConfigOrDict] = None,
  ) -> types.CancelTuningJobResponse:
    """Cancels a tuning job asynchronously.

    Args:
      name (str): A TuningJob resource name.
    """

    parameter_model = types._CancelTuningJobParameters(
        name=name,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]

    if self._api_client.vertexai:
      request_dict = _CancelTuningJobParameters_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}:cancel'.format_map(request_url_dict)
      else:
        path = '{name}:cancel'
    else:
      request_dict = _CancelTuningJobParameters_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = '{name}:cancel'.format_map(request_url_dict)
      else:
        path = '{name}:cancel'
    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = await self._api_client.async_request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _CancelTuningJobResponse_from_vertex(response_dict)

    if not self._api_client.vertexai:
      response_dict = _CancelTuningJobResponse_from_mldev(response_dict)

    return_value = types.CancelTuningJobResponse._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  async def _tune(
      self,
      *,
      base_model: Optional[str] = None,
      pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    """Creates a tuning job and returns the TuningJob object.

    Args:
      base_model: The name of the model to tune.
      training_dataset: The training dataset to use.
      config: The configuration to use for the tuning job.

    Returns:
      A TuningJob object.
    """

    parameter_model = types._CreateTuningJobParametersPrivate(
        base_model=base_model,
        pre_tuned_model=pre_tuned_model,
        training_dataset=training_dataset,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]
    if not self._api_client.vertexai:
      raise ValueError('This method is only supported in the Vertex AI client.')
    else:
      request_dict = _CreateTuningJobParametersPrivate_to_vertex(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tuningJobs'.format_map(request_url_dict)
      else:
        path = 'tuningJobs'

    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = await self._api_client.async_request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if self._api_client.vertexai:
      response_dict = _TuningJob_from_vertex(response_dict)

    return_value = types.TuningJob._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  async def _tune_mldev(
      self,
      *,
      base_model: Optional[str] = None,
      pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningOperation:
    """Creates a tuning job and returns the TuningJob object.

    Args:
      base_model: The name of the model to tune.
      training_dataset: The training dataset to use.
      config: The configuration to use for the tuning job.

    Returns:
      A TuningJob operation.
    """

    parameter_model = types._CreateTuningJobParametersPrivate(
        base_model=base_model,
        pre_tuned_model=pre_tuned_model,
        training_dataset=training_dataset,
        config=config,
    )

    request_url_dict: Optional[dict[str, str]]
    if self._api_client.vertexai:
      raise ValueError(
          'This method is only supported in the Gemini Developer client.'
      )
    else:
      request_dict = _CreateTuningJobParametersPrivate_to_mldev(
          parameter_model, None, parameter_model
      )
      request_url_dict = request_dict.get('_url')
      if request_url_dict:
        path = 'tunedModels'.format_map(request_url_dict)
      else:
        path = 'tunedModels'

    query_params = request_dict.get('_query')
    if query_params:
      path = f'{path}?{urlencode(query_params)}'
    # TODO: remove the hack that pops config.
    request_dict.pop('config', None)

    http_options: Optional[types.HttpOptions] = None
    if (
        parameter_model.config is not None
        and parameter_model.config.http_options is not None
    ):
      http_options = parameter_model.config.http_options

    request_dict = _common.convert_to_dict(request_dict)
    request_dict = _common.encode_unserializable_types(request_dict)

    response = await self._api_client.async_request(
        'post', path, request_dict, http_options
    )

    response_dict = {} if not response.body else json.loads(response.body)

    if not self._api_client.vertexai:
      response_dict = _TuningOperation_from_mldev(response_dict)

    return_value = types.TuningOperation._from_response(
        response=response_dict, kwargs=parameter_model.model_dump()
    )
    return_value.sdk_http_response = types.HttpResponse(
        headers=response.headers
    )
    self._api_client._verify_response(return_value)
    return return_value

  async def get(
      self,
      *,
      name: str,
      config: Optional[types.GetTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    job = await self._get(name=name, config=config)
    if (
        job.experiment
        and self._api_client.vertexai
        and self._api_client.project is not None
    ):
      _IpythonUtils.display_experiment_button(
          experiment=job.experiment,
          project=self._api_client.project,
      )
    return job

  @_common.experimental_warning(
      "The SDK's tuning implementation is experimental, "
      'and may change in future versions.'
  )
  async def tune(
      self,
      *,
      base_model: str,
      training_dataset: types.TuningDatasetOrDict,
      config: Optional[types.CreateTuningJobConfigOrDict] = None,
  ) -> types.TuningJob:
    if self._api_client.vertexai:
      if base_model.startswith('projects/'):  # Pre-tuned model
        checkpoint_id = None
        if config:
          checkpoint_id = getattr(config, 'pre_tuned_model_checkpoint_id', None)
        pre_tuned_model = types.PreTunedModel(
            tuned_model_name=base_model, checkpoint_id=checkpoint_id
        )

        tuning_job = await self._tune(
            pre_tuned_model=pre_tuned_model,
            training_dataset=training_dataset,
            config=config,
        )
      else:
        if (
            config is not None
            and getattr(config, 'evaluation_config', None) is not None
        ):
          evaluation_config = getattr(config, 'evaluation_config')
          if isinstance(evaluation_config, dict):
            evaluation_config = types.EvaluationConfig(**evaluation_config)
          if (
              not evaluation_config.metrics
              or not evaluation_config.output_config
          ):
            raise ValueError(
                'Evaluation config must have at least one metric and an output'
                ' config.'
            )
          for i in range(len(evaluation_config.metrics)):
            if isinstance(evaluation_config.metrics[i], dict):
              evaluation_config.metrics[i] = types.Metric.model_validate(
                  evaluation_config.metrics[i]
              )
          if isinstance(config, dict):
            config['evaluation_config'] = evaluation_config
          else:
            config.evaluation_config = evaluation_config
        tuning_job = await self._tune(
            base_model=base_model,
            training_dataset=training_dataset,
            config=config,
        )
    else:
      operation = await self._tune_mldev(
          base_model=base_model,
          training_dataset=training_dataset,
          config=config,
      )
      if operation.metadata is not None and 'tunedModel' in operation.metadata:
        tuned_model_name = operation.metadata['tunedModel']
      else:
        if operation.name is None:
          raise ValueError('Operation name is required.')
        tuned_model_name = operation.name.partition('/operations/')[0]
      tuning_job = types.TuningJob(
          name=tuned_model_name,
          state=types.JobState.JOB_STATE_QUEUED,
      )
    if tuning_job.name and self._api_client.vertexai:
      _IpythonUtils.display_model_tuning_button(
          tuning_job_resource=tuning_job.name
      )
    return tuning_job

  async def list(
      self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
  ) -> AsyncPager[types.TuningJob]:
    """Lists `TuningJob` objects asynchronously.

    Args:
      config: The configuration for the list request.

    Returns:
      A Pager object that contains one page of tuning jobs. When iterating over
      the pager, it automatically fetches the next page if there are more.

    Usage:

    .. code-block:: python
        async for tuning_job in await client.aio.tunings.list():
            print(tuning_job.name)
    """

    list_request = self._list
    return AsyncPager(
        'tuning_jobs',
        list_request,
        await self._list(config=config),
        config,
    )


class _IpythonUtils:
  """Temporary class to hold the IPython related functions."""

  displayed_experiments: set[str] = set()

  @staticmethod
  def _get_ipython_shell_name() -> Union[str, Any]:
    import sys

    if 'IPython' in sys.modules:
      from IPython import get_ipython

      return get_ipython().__class__.__name__
    return ''

  @staticmethod
  def is_ipython_available() -> bool:
    return bool(_IpythonUtils._get_ipython_shell_name())

  @staticmethod
  def _get_styles() -> str:
    """Returns the HTML style markup to support custom buttons."""
    return """
    <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
    <style>
      .view-vertex-resource,
      .view-vertex-resource:hover,
      .view-vertex-resource:visited {
        position: relative;
        display: inline-flex;
        flex-direction: row;
        height: 32px;
        padding: 0 12px;
          margin: 4px 18px;
        gap: 4px;
        border-radius: 4px;

        align-items: center;
        justify-content: center;
        background-color: rgb(255, 255, 255);
        color: rgb(51, 103, 214);

        font-family: Roboto,"Helvetica Neue",sans-serif;
        font-size: 13px;
        font-weight: 500;
        text-transform: uppercase;
        text-decoration: none !important;

        transition: box-shadow 280ms cubic-bezier(0.4, 0, 0.2, 1) 0s;
        box-shadow: 0px 3px 1px -2px rgba(0,0,0,0.2), 0px 2px 2px 0px rgba(0,0,0,0.14), 0px 1px 5px 0px rgba(0,0,0,0.12);
      }
      .view-vertex-resource:active {
        box-shadow: 0px 5px 5px -3px rgba(0,0,0,0.2),0px 8px 10px 1px rgba(0,0,0,0.14),0px 3px 14px 2px rgba(0,0,0,0.12);
      }
      .view-vertex-resource:active .view-vertex-ripple::before {
        position: absolute;
        top: 0;
        bottom: 0;
        left: 0;
        right: 0;
        border-radius: 4px;
        pointer-events: none;

        content: '';
        background-color: rgb(51, 103, 214);
        opacity: 0.12;
      }
      .view-vertex-icon {
        font-size: 18px;
      }
    </style>
  """

  @staticmethod
  def _parse_resource_name(marker: str, resource_parts: list[str]) -> str:
    """Returns the part after the marker text part."""
    for i in range(len(resource_parts)):
      if resource_parts[i] == marker and i + 1 < len(resource_parts):
        return resource_parts[i + 1]
    return ''

  @staticmethod
  def _display_link(
      text: str, url: str, icon: Optional[str] = 'open_in_new'
  ) -> None:
    """Creates and displays the link to open the Vertex resource.

    Args:
      text: The text displayed on the clickable button.
      url: The url that the button will lead to. Only cloud console URIs are
        allowed.
      icon: The icon name on the button (from material-icons library)
    """
    CLOUD_UI_URL = 'https://console.cloud.google.com'  # pylint: disable=invalid-name
    if not url.startswith(CLOUD_UI_URL):
      raise ValueError(f'Only urls starting with {CLOUD_UI_URL} are allowed.')

    import uuid

    button_id = f'view-vertex-resource-{str(uuid.uuid4())}'

    # Add the markup for the CSS and link component
    html = f"""
        {_IpythonUtils._get_styles()}
        <a class="view-vertex-resource" id="{button_id}" href="#view-{button_id}">
          <span class="material-icons view-vertex-icon">{icon}</span>
          <span>{text}</span>
        </a>
        """

    # Add the click handler for the link
    html += f"""
        <script>
          (function () {{
            const link = document.getElementById('{button_id}');
            link.addEventListener('click', (e) => {{
              if (window.google?.colab?.openUrl) {{
                window.google.colab.openUrl('{url}');
              }} else {{
                window.open('{url}', '_blank');
              }}
              e.stopPropagation();
              e.preventDefault();
            }});
          }})();
        </script>
    """

    from IPython.display import display
    from IPython.display import HTML

    display(HTML(html))

  @staticmethod
  def display_experiment_button(experiment: str, project: str) -> None:
    """Function to generate a link bound to the Vertex experiment.

    Args:
      experiment: The Vertex experiment name. Example format:
        projects/{project_id}/locations/{location}/metadataStores/default/contexts/{experiment_name}
      project: The project (alphanumeric) name.
    """
    if (
        not _IpythonUtils.is_ipython_available()
        or experiment in _IpythonUtils.displayed_experiments
    ):
      return
    # Experiment gives the numeric id, but we need the alphanumeric project
    # name. So we get the project from the api client object as an argument.
    resource_parts = experiment.split('/')
    location = resource_parts[3]
    experiment_name = resource_parts[-1]

    uri = (
        'https://console.cloud.google.com/vertex-ai/experiments/locations/'
        + f'{location}/experiments/{experiment_name}/'
        + f'runs?project={project}'
    )
    _IpythonUtils._display_link('View Experiment', uri, 'science')

    # Avoid repeatedly showing the button
    _IpythonUtils.displayed_experiments.add(experiment)

  @staticmethod
  def display_model_tuning_button(tuning_job_resource: str) -> None:
    """Function to generate a link bound to the Vertex model tuning job.

    Args:
      tuning_job_resource: The Vertex tuning job name. Example format:
        projects/{project_id}/locations/{location}/tuningJobs/{tuning_job_id}
    """
    if not _IpythonUtils.is_ipython_available():
      return

    resource_parts = tuning_job_resource.split('/')
    project = resource_parts[1]
    location = resource_parts[3]
    tuning_job_id = resource_parts[-1]

    uri = (
        'https://console.cloud.google.com/vertex-ai/generative/language/'
        + f'locations/{location}/tuning/tuningJob/{tuning_job_id}'
        + f'?project={project}'
    )
    _IpythonUtils._display_link('View Tuning Job', uri, 'tune')
