// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by sidekick. DO NOT EDIT.

#![allow(rustdoc::redundant_explicit_links)]
#![allow(rustdoc::broken_intra_doc_links)]
#![no_implicit_prelude]
extern crate async_trait;
extern crate bytes;
extern crate gax;
extern crate gaxi;
extern crate lazy_static;
extern crate longrunning;
extern crate lro;
extern crate reqwest;
extern crate rpc;
extern crate serde;
extern crate serde_json;
extern crate serde_with;
extern crate std;
extern crate tracing;
extern crate wkt;

mod debug;
mod deserialize;
mod serialize;

/// Video annotation request.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct AnnotateVideoRequest {
    /// Input video location. Currently, only
    /// [Cloud Storage](https://cloud.google.com/storage/) URIs are
    /// supported. URIs must be specified in the following format:
    /// `gs://bucket-id/object-id` (other URI formats return
    /// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    /// more information, see [Request
    /// URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
    /// multiple videos, a video URI may include wildcards in the `object-id`.
    /// Supported wildcards: '*' to match 0 or more characters;
    /// '?' to match 1 character. If unset, the input video should be embedded
    /// in the request as `input_content`. If set, `input_content` must be unset.
    pub input_uri: std::string::String,

    /// The video data bytes.
    /// If unset, the input video(s) should be specified via the `input_uri`.
    /// If set, `input_uri` must be unset.
    pub input_content: ::bytes::Bytes,

    /// Required. Requested video annotation features.
    pub features: std::vec::Vec<crate::model::Feature>,

    /// Additional video context and/or feature-specific parameters.
    pub video_context: std::option::Option<crate::model::VideoContext>,

    /// Optional. Location where the output (in JSON format) should be stored.
    /// Currently, only [Cloud Storage](https://cloud.google.com/storage/)
    /// URIs are supported. These must be specified in the following format:
    /// `gs://bucket-id/object-id` (other URI formats return
    /// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    /// more information, see [Request
    /// URIs](https://cloud.google.com/storage/docs/request-endpoints).
    pub output_uri: std::string::String,

    /// Optional. Cloud region where annotation should take place. Supported cloud
    /// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
    /// region is specified, the region will be determined based on video file
    /// location.
    pub location_id: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl AnnotateVideoRequest {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [input_uri][crate::model::AnnotateVideoRequest::input_uri].
    pub fn set_input_uri<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.input_uri = v.into();
        self
    }

    /// Sets the value of [input_content][crate::model::AnnotateVideoRequest::input_content].
    pub fn set_input_content<T: std::convert::Into<::bytes::Bytes>>(mut self, v: T) -> Self {
        self.input_content = v.into();
        self
    }

    /// Sets the value of [features][crate::model::AnnotateVideoRequest::features].
    pub fn set_features<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::Feature>,
    {
        use std::iter::Iterator;
        self.features = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [video_context][crate::model::AnnotateVideoRequest::video_context].
    pub fn set_video_context<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoContext>,
    {
        self.video_context = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [video_context][crate::model::AnnotateVideoRequest::video_context].
    pub fn set_or_clear_video_context<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoContext>,
    {
        self.video_context = v.map(|x| x.into());
        self
    }

    /// Sets the value of [output_uri][crate::model::AnnotateVideoRequest::output_uri].
    pub fn set_output_uri<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.output_uri = v.into();
        self
    }

    /// Sets the value of [location_id][crate::model::AnnotateVideoRequest::location_id].
    pub fn set_location_id<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.location_id = v.into();
        self
    }
}

impl wkt::message::Message for AnnotateVideoRequest {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.AnnotateVideoRequest"
    }
}

/// Video context and/or feature-specific parameters.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoContext {
    /// Video segments to annotate. The segments may overlap and are not required
    /// to be contiguous or span the whole video. If unspecified, each video is
    /// treated as a single segment.
    pub segments: std::vec::Vec<crate::model::VideoSegment>,

    /// Config for LABEL_DETECTION.
    pub label_detection_config: std::option::Option<crate::model::LabelDetectionConfig>,

    /// Config for SHOT_CHANGE_DETECTION.
    pub shot_change_detection_config: std::option::Option<crate::model::ShotChangeDetectionConfig>,

    /// Config for EXPLICIT_CONTENT_DETECTION.
    pub explicit_content_detection_config:
        std::option::Option<crate::model::ExplicitContentDetectionConfig>,

    /// Config for FACE_DETECTION.
    pub face_detection_config: std::option::Option<crate::model::FaceDetectionConfig>,

    /// Config for SPEECH_TRANSCRIPTION.
    pub speech_transcription_config: std::option::Option<crate::model::SpeechTranscriptionConfig>,

    /// Config for TEXT_DETECTION.
    pub text_detection_config: std::option::Option<crate::model::TextDetectionConfig>,

    /// Config for PERSON_DETECTION.
    pub person_detection_config: std::option::Option<crate::model::PersonDetectionConfig>,

    /// Config for OBJECT_TRACKING.
    pub object_tracking_config: std::option::Option<crate::model::ObjectTrackingConfig>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoContext {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [segments][crate::model::VideoContext::segments].
    pub fn set_segments<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::VideoSegment>,
    {
        use std::iter::Iterator;
        self.segments = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [label_detection_config][crate::model::VideoContext::label_detection_config].
    pub fn set_label_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::LabelDetectionConfig>,
    {
        self.label_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [label_detection_config][crate::model::VideoContext::label_detection_config].
    pub fn set_or_clear_label_detection_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::LabelDetectionConfig>,
    {
        self.label_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [shot_change_detection_config][crate::model::VideoContext::shot_change_detection_config].
    pub fn set_shot_change_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::ShotChangeDetectionConfig>,
    {
        self.shot_change_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [shot_change_detection_config][crate::model::VideoContext::shot_change_detection_config].
    pub fn set_or_clear_shot_change_detection_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::ShotChangeDetectionConfig>,
    {
        self.shot_change_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [explicit_content_detection_config][crate::model::VideoContext::explicit_content_detection_config].
    pub fn set_explicit_content_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::ExplicitContentDetectionConfig>,
    {
        self.explicit_content_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [explicit_content_detection_config][crate::model::VideoContext::explicit_content_detection_config].
    pub fn set_or_clear_explicit_content_detection_config<T>(
        mut self,
        v: std::option::Option<T>,
    ) -> Self
    where
        T: std::convert::Into<crate::model::ExplicitContentDetectionConfig>,
    {
        self.explicit_content_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [face_detection_config][crate::model::VideoContext::face_detection_config].
    pub fn set_face_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::FaceDetectionConfig>,
    {
        self.face_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [face_detection_config][crate::model::VideoContext::face_detection_config].
    pub fn set_or_clear_face_detection_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::FaceDetectionConfig>,
    {
        self.face_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [speech_transcription_config][crate::model::VideoContext::speech_transcription_config].
    pub fn set_speech_transcription_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::SpeechTranscriptionConfig>,
    {
        self.speech_transcription_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [speech_transcription_config][crate::model::VideoContext::speech_transcription_config].
    pub fn set_or_clear_speech_transcription_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::SpeechTranscriptionConfig>,
    {
        self.speech_transcription_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [text_detection_config][crate::model::VideoContext::text_detection_config].
    pub fn set_text_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::TextDetectionConfig>,
    {
        self.text_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [text_detection_config][crate::model::VideoContext::text_detection_config].
    pub fn set_or_clear_text_detection_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::TextDetectionConfig>,
    {
        self.text_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [person_detection_config][crate::model::VideoContext::person_detection_config].
    pub fn set_person_detection_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::PersonDetectionConfig>,
    {
        self.person_detection_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [person_detection_config][crate::model::VideoContext::person_detection_config].
    pub fn set_or_clear_person_detection_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::PersonDetectionConfig>,
    {
        self.person_detection_config = v.map(|x| x.into());
        self
    }

    /// Sets the value of [object_tracking_config][crate::model::VideoContext::object_tracking_config].
    pub fn set_object_tracking_config<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::ObjectTrackingConfig>,
    {
        self.object_tracking_config = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [object_tracking_config][crate::model::VideoContext::object_tracking_config].
    pub fn set_or_clear_object_tracking_config<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::ObjectTrackingConfig>,
    {
        self.object_tracking_config = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoContext {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.VideoContext"
    }
}

/// Config for LABEL_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct LabelDetectionConfig {
    /// What labels should be detected with LABEL_DETECTION, in addition to
    /// video-level labels or segment-level labels.
    /// If unspecified, defaults to `SHOT_MODE`.
    pub label_detection_mode: crate::model::LabelDetectionMode,

    /// Whether the video has been shot from a stationary (i.e., non-moving)
    /// camera. When set to true, might improve detection accuracy for moving
    /// objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    pub stationary_camera: bool,

    /// Model to use for label detection.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    /// The confidence threshold we perform filtering on the labels from
    /// frame-level detection. If not set, it is set to 0.4 by default. The valid
    /// range for this threshold is [0.1, 0.9]. Any value set outside of this
    /// range will be clipped.
    /// Note: For best results, follow the default threshold. We will update
    /// the default threshold everytime when we release a new model.
    pub frame_confidence_threshold: f32,

    /// The confidence threshold we perform filtering on the labels from
    /// video-level and shot-level detections. If not set, it's set to 0.3 by
    /// default. The valid range for this threshold is [0.1, 0.9]. Any value set
    /// outside of this range will be clipped.
    /// Note: For best results, follow the default threshold. We will update
    /// the default threshold everytime when we release a new model.
    pub video_confidence_threshold: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl LabelDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [label_detection_mode][crate::model::LabelDetectionConfig::label_detection_mode].
    pub fn set_label_detection_mode<T: std::convert::Into<crate::model::LabelDetectionMode>>(
        mut self,
        v: T,
    ) -> Self {
        self.label_detection_mode = v.into();
        self
    }

    /// Sets the value of [stationary_camera][crate::model::LabelDetectionConfig::stationary_camera].
    pub fn set_stationary_camera<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.stationary_camera = v.into();
        self
    }

    /// Sets the value of [model][crate::model::LabelDetectionConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }

    /// Sets the value of [frame_confidence_threshold][crate::model::LabelDetectionConfig::frame_confidence_threshold].
    pub fn set_frame_confidence_threshold<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.frame_confidence_threshold = v.into();
        self
    }

    /// Sets the value of [video_confidence_threshold][crate::model::LabelDetectionConfig::video_confidence_threshold].
    pub fn set_video_confidence_threshold<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.video_confidence_threshold = v.into();
        self
    }
}

impl wkt::message::Message for LabelDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.LabelDetectionConfig"
    }
}

/// Config for SHOT_CHANGE_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ShotChangeDetectionConfig {
    /// Model to use for shot change detection.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ShotChangeDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [model][crate::model::ShotChangeDetectionConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }
}

impl wkt::message::Message for ShotChangeDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ShotChangeDetectionConfig"
    }
}

/// Config for OBJECT_TRACKING.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ObjectTrackingConfig {
    /// Model to use for object tracking.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ObjectTrackingConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [model][crate::model::ObjectTrackingConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }
}

impl wkt::message::Message for ObjectTrackingConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ObjectTrackingConfig"
    }
}

/// Config for FACE_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct FaceDetectionConfig {
    /// Model to use for face detection.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    /// Whether bounding boxes are included in the face annotation output.
    pub include_bounding_boxes: bool,

    /// Whether to enable face attributes detection, such as glasses, dark_glasses,
    /// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
    pub include_attributes: bool,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl FaceDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [model][crate::model::FaceDetectionConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }

    /// Sets the value of [include_bounding_boxes][crate::model::FaceDetectionConfig::include_bounding_boxes].
    pub fn set_include_bounding_boxes<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.include_bounding_boxes = v.into();
        self
    }

    /// Sets the value of [include_attributes][crate::model::FaceDetectionConfig::include_attributes].
    pub fn set_include_attributes<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.include_attributes = v.into();
        self
    }
}

impl wkt::message::Message for FaceDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.FaceDetectionConfig"
    }
}

/// Config for PERSON_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct PersonDetectionConfig {
    /// Whether bounding boxes are included in the person detection annotation
    /// output.
    pub include_bounding_boxes: bool,

    /// Whether to enable pose landmarks detection. Ignored if
    /// 'include_bounding_boxes' is set to false.
    pub include_pose_landmarks: bool,

    /// Whether to enable person attributes detection, such as cloth color (black,
    /// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
    /// etc.
    /// Ignored if 'include_bounding_boxes' is set to false.
    pub include_attributes: bool,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl PersonDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [include_bounding_boxes][crate::model::PersonDetectionConfig::include_bounding_boxes].
    pub fn set_include_bounding_boxes<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.include_bounding_boxes = v.into();
        self
    }

    /// Sets the value of [include_pose_landmarks][crate::model::PersonDetectionConfig::include_pose_landmarks].
    pub fn set_include_pose_landmarks<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.include_pose_landmarks = v.into();
        self
    }

    /// Sets the value of [include_attributes][crate::model::PersonDetectionConfig::include_attributes].
    pub fn set_include_attributes<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.include_attributes = v.into();
        self
    }
}

impl wkt::message::Message for PersonDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.PersonDetectionConfig"
    }
}

/// Config for EXPLICIT_CONTENT_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ExplicitContentDetectionConfig {
    /// Model to use for explicit content detection.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ExplicitContentDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [model][crate::model::ExplicitContentDetectionConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }
}

impl wkt::message::Message for ExplicitContentDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ExplicitContentDetectionConfig"
    }
}

/// Config for TEXT_DETECTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextDetectionConfig {
    /// Language hint can be specified if the language to be detected is known a
    /// priori. It can increase the accuracy of the detection. Language hint must
    /// be language code in BCP-47 format.
    ///
    /// Automatic language detection is performed if no hint is provided.
    pub language_hints: std::vec::Vec<std::string::String>,

    /// Model to use for text detection.
    /// Supported values: "builtin/stable" (the default if unset) and
    /// "builtin/latest".
    pub model: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextDetectionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [language_hints][crate::model::TextDetectionConfig::language_hints].
    pub fn set_language_hints<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.language_hints = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [model][crate::model::TextDetectionConfig::model].
    pub fn set_model<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.model = v.into();
        self
    }
}

impl wkt::message::Message for TextDetectionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.TextDetectionConfig"
    }
}

/// Video segment.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoSegment {
    /// Time-offset, relative to the beginning of the video,
    /// corresponding to the start of the segment (inclusive).
    pub start_time_offset: std::option::Option<wkt::Duration>,

    /// Time-offset, relative to the beginning of the video,
    /// corresponding to the end of the segment (inclusive).
    pub end_time_offset: std::option::Option<wkt::Duration>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoSegment {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [start_time_offset][crate::model::VideoSegment::start_time_offset].
    pub fn set_start_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.start_time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [start_time_offset][crate::model::VideoSegment::start_time_offset].
    pub fn set_or_clear_start_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.start_time_offset = v.map(|x| x.into());
        self
    }

    /// Sets the value of [end_time_offset][crate::model::VideoSegment::end_time_offset].
    pub fn set_end_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.end_time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [end_time_offset][crate::model::VideoSegment::end_time_offset].
    pub fn set_or_clear_end_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.end_time_offset = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoSegment {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.VideoSegment"
    }
}

/// Video segment level annotation results for label detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct LabelSegment {
    /// Video segment where a label was detected.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    /// Confidence that the label is accurate. Range: [0, 1].
    pub confidence: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl LabelSegment {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [segment][crate::model::LabelSegment::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::LabelSegment::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::LabelSegment::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }
}

impl wkt::message::Message for LabelSegment {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.LabelSegment"
    }
}

/// Video frame level annotation results for label detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct LabelFrame {
    /// Time-offset, relative to the beginning of the video, corresponding to the
    /// video frame for this location.
    pub time_offset: std::option::Option<wkt::Duration>,

    /// Confidence that the label is accurate. Range: [0, 1].
    pub confidence: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl LabelFrame {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [time_offset][crate::model::LabelFrame::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::LabelFrame::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::LabelFrame::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }
}

impl wkt::message::Message for LabelFrame {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.LabelFrame"
    }
}

/// Detected entity from video analysis.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct Entity {
    /// Opaque entity ID. Some IDs may be available in
    /// [Google Knowledge Graph Search
    /// API](https://developers.google.com/knowledge-graph/).
    pub entity_id: std::string::String,

    /// Textual description, e.g., `Fixed-gear bicycle`.
    pub description: std::string::String,

    /// Language code for `description` in BCP-47 format.
    pub language_code: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl Entity {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [entity_id][crate::model::Entity::entity_id].
    pub fn set_entity_id<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.entity_id = v.into();
        self
    }

    /// Sets the value of [description][crate::model::Entity::description].
    pub fn set_description<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.description = v.into();
        self
    }

    /// Sets the value of [language_code][crate::model::Entity::language_code].
    pub fn set_language_code<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.language_code = v.into();
        self
    }
}

impl wkt::message::Message for Entity {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.Entity"
    }
}

/// Label annotation.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct LabelAnnotation {
    /// Detected entity.
    pub entity: std::option::Option<crate::model::Entity>,

    /// Common categories for the detected entity.
    /// For example, when the label is `Terrier`, the category is likely `dog`. And
    /// in some cases there might be more than one categories e.g., `Terrier` could
    /// also be a `pet`.
    pub category_entities: std::vec::Vec<crate::model::Entity>,

    /// All video segments where a label was detected.
    pub segments: std::vec::Vec<crate::model::LabelSegment>,

    /// All video frames where a label was detected.
    pub frames: std::vec::Vec<crate::model::LabelFrame>,

    /// Feature version.
    pub version: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl LabelAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [entity][crate::model::LabelAnnotation::entity].
    pub fn set_entity<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [entity][crate::model::LabelAnnotation::entity].
    pub fn set_or_clear_entity<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = v.map(|x| x.into());
        self
    }

    /// Sets the value of [category_entities][crate::model::LabelAnnotation::category_entities].
    pub fn set_category_entities<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::Entity>,
    {
        use std::iter::Iterator;
        self.category_entities = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [segments][crate::model::LabelAnnotation::segments].
    pub fn set_segments<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelSegment>,
    {
        use std::iter::Iterator;
        self.segments = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [frames][crate::model::LabelAnnotation::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelFrame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [version][crate::model::LabelAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }
}

impl wkt::message::Message for LabelAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.LabelAnnotation"
    }
}

/// Video frame level annotation results for explicit content.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ExplicitContentFrame {
    /// Time-offset, relative to the beginning of the video, corresponding to the
    /// video frame for this location.
    pub time_offset: std::option::Option<wkt::Duration>,

    /// Likelihood of the pornography content..
    pub pornography_likelihood: crate::model::Likelihood,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ExplicitContentFrame {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [time_offset][crate::model::ExplicitContentFrame::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::ExplicitContentFrame::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }

    /// Sets the value of [pornography_likelihood][crate::model::ExplicitContentFrame::pornography_likelihood].
    pub fn set_pornography_likelihood<T: std::convert::Into<crate::model::Likelihood>>(
        mut self,
        v: T,
    ) -> Self {
        self.pornography_likelihood = v.into();
        self
    }
}

impl wkt::message::Message for ExplicitContentFrame {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ExplicitContentFrame"
    }
}

/// Explicit content annotation (based on per-frame visual signals only).
/// If no explicit content has been detected in a frame, no annotations are
/// present for that frame.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ExplicitContentAnnotation {
    /// All video frames where explicit content was detected.
    pub frames: std::vec::Vec<crate::model::ExplicitContentFrame>,

    /// Feature version.
    pub version: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ExplicitContentAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [frames][crate::model::ExplicitContentAnnotation::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::ExplicitContentFrame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [version][crate::model::ExplicitContentAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }
}

impl wkt::message::Message for ExplicitContentAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ExplicitContentAnnotation"
    }
}

/// Normalized bounding box.
/// The normalized vertex coordinates are relative to the original image.
/// Range: [0, 1].
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct NormalizedBoundingBox {
    /// Left X coordinate.
    pub left: f32,

    /// Top Y coordinate.
    pub top: f32,

    /// Right X coordinate.
    pub right: f32,

    /// Bottom Y coordinate.
    pub bottom: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl NormalizedBoundingBox {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [left][crate::model::NormalizedBoundingBox::left].
    pub fn set_left<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.left = v.into();
        self
    }

    /// Sets the value of [top][crate::model::NormalizedBoundingBox::top].
    pub fn set_top<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.top = v.into();
        self
    }

    /// Sets the value of [right][crate::model::NormalizedBoundingBox::right].
    pub fn set_right<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.right = v.into();
        self
    }

    /// Sets the value of [bottom][crate::model::NormalizedBoundingBox::bottom].
    pub fn set_bottom<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.bottom = v.into();
        self
    }
}

impl wkt::message::Message for NormalizedBoundingBox {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.NormalizedBoundingBox"
    }
}

/// Face detection annotation.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct FaceDetectionAnnotation {
    /// The face tracks with attributes.
    pub tracks: std::vec::Vec<crate::model::Track>,

    /// The thumbnail of a person's face.
    pub thumbnail: ::bytes::Bytes,

    /// Feature version.
    pub version: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl FaceDetectionAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [tracks][crate::model::FaceDetectionAnnotation::tracks].
    pub fn set_tracks<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::Track>,
    {
        use std::iter::Iterator;
        self.tracks = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [thumbnail][crate::model::FaceDetectionAnnotation::thumbnail].
    pub fn set_thumbnail<T: std::convert::Into<::bytes::Bytes>>(mut self, v: T) -> Self {
        self.thumbnail = v.into();
        self
    }

    /// Sets the value of [version][crate::model::FaceDetectionAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }
}

impl wkt::message::Message for FaceDetectionAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.FaceDetectionAnnotation"
    }
}

/// Person detection annotation per video.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct PersonDetectionAnnotation {
    /// The detected tracks of a person.
    pub tracks: std::vec::Vec<crate::model::Track>,

    /// Feature version.
    pub version: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl PersonDetectionAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [tracks][crate::model::PersonDetectionAnnotation::tracks].
    pub fn set_tracks<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::Track>,
    {
        use std::iter::Iterator;
        self.tracks = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [version][crate::model::PersonDetectionAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }
}

impl wkt::message::Message for PersonDetectionAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.PersonDetectionAnnotation"
    }
}

/// Video segment level annotation results for face detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct FaceSegment {
    /// Video segment where a face was detected.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl FaceSegment {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [segment][crate::model::FaceSegment::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::FaceSegment::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for FaceSegment {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.FaceSegment"
    }
}

/// Deprecated. No effect.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
#[deprecated]
pub struct FaceFrame {
    /// Normalized Bounding boxes in a frame.
    /// There can be more than one boxes if the same face is detected in multiple
    /// locations within the current frame.
    pub normalized_bounding_boxes: std::vec::Vec<crate::model::NormalizedBoundingBox>,

    /// Time-offset, relative to the beginning of the video,
    /// corresponding to the video frame for this location.
    pub time_offset: std::option::Option<wkt::Duration>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl FaceFrame {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [normalized_bounding_boxes][crate::model::FaceFrame::normalized_bounding_boxes].
    pub fn set_normalized_bounding_boxes<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::NormalizedBoundingBox>,
    {
        use std::iter::Iterator;
        self.normalized_bounding_boxes = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [time_offset][crate::model::FaceFrame::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::FaceFrame::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for FaceFrame {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.FaceFrame"
    }
}

/// Deprecated. No effect.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
#[deprecated]
pub struct FaceAnnotation {
    /// Thumbnail of a representative face view (in JPEG format).
    pub thumbnail: ::bytes::Bytes,

    /// All video segments where a face was detected.
    pub segments: std::vec::Vec<crate::model::FaceSegment>,

    /// All video frames where a face was detected.
    pub frames: std::vec::Vec<crate::model::FaceFrame>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl FaceAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [thumbnail][crate::model::FaceAnnotation::thumbnail].
    pub fn set_thumbnail<T: std::convert::Into<::bytes::Bytes>>(mut self, v: T) -> Self {
        self.thumbnail = v.into();
        self
    }

    /// Sets the value of [segments][crate::model::FaceAnnotation::segments].
    pub fn set_segments<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::FaceSegment>,
    {
        use std::iter::Iterator;
        self.segments = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [frames][crate::model::FaceAnnotation::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::FaceFrame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for FaceAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.FaceAnnotation"
    }
}

/// For tracking related features.
/// An object at time_offset with attributes, and located with
/// normalized_bounding_box.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TimestampedObject {
    /// Normalized Bounding box in a frame, where the object is located.
    pub normalized_bounding_box: std::option::Option<crate::model::NormalizedBoundingBox>,

    /// Time-offset, relative to the beginning of the video,
    /// corresponding to the video frame for this object.
    pub time_offset: std::option::Option<wkt::Duration>,

    /// Optional. The attributes of the object in the bounding box.
    pub attributes: std::vec::Vec<crate::model::DetectedAttribute>,

    /// Optional. The detected landmarks.
    pub landmarks: std::vec::Vec<crate::model::DetectedLandmark>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TimestampedObject {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [normalized_bounding_box][crate::model::TimestampedObject::normalized_bounding_box].
    pub fn set_normalized_bounding_box<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingBox>,
    {
        self.normalized_bounding_box = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [normalized_bounding_box][crate::model::TimestampedObject::normalized_bounding_box].
    pub fn set_or_clear_normalized_bounding_box<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingBox>,
    {
        self.normalized_bounding_box = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_offset][crate::model::TimestampedObject::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::TimestampedObject::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }

    /// Sets the value of [attributes][crate::model::TimestampedObject::attributes].
    pub fn set_attributes<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::DetectedAttribute>,
    {
        use std::iter::Iterator;
        self.attributes = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [landmarks][crate::model::TimestampedObject::landmarks].
    pub fn set_landmarks<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::DetectedLandmark>,
    {
        use std::iter::Iterator;
        self.landmarks = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for TimestampedObject {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.TimestampedObject"
    }
}

/// A track of an object instance.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct Track {
    /// Video segment of a track.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    /// The object with timestamp and attributes per frame in the track.
    pub timestamped_objects: std::vec::Vec<crate::model::TimestampedObject>,

    /// Optional. Attributes in the track level.
    pub attributes: std::vec::Vec<crate::model::DetectedAttribute>,

    /// Optional. The confidence score of the tracked object.
    pub confidence: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl Track {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [segment][crate::model::Track::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::Track::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }

    /// Sets the value of [timestamped_objects][crate::model::Track::timestamped_objects].
    pub fn set_timestamped_objects<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::TimestampedObject>,
    {
        use std::iter::Iterator;
        self.timestamped_objects = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [attributes][crate::model::Track::attributes].
    pub fn set_attributes<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::DetectedAttribute>,
    {
        use std::iter::Iterator;
        self.attributes = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [confidence][crate::model::Track::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }
}

impl wkt::message::Message for Track {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.Track"
    }
}

/// A generic detected attribute represented by name in string format.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct DetectedAttribute {
    /// The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    /// A full list of supported type names will be provided in the document.
    pub name: std::string::String,

    /// Detected attribute confidence. Range [0, 1].
    pub confidence: f32,

    /// Text value of the detection result. For example, the value for "HairColor"
    /// can be "black", "blonde", etc.
    pub value: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl DetectedAttribute {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [name][crate::model::DetectedAttribute::name].
    pub fn set_name<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.name = v.into();
        self
    }

    /// Sets the value of [confidence][crate::model::DetectedAttribute::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }

    /// Sets the value of [value][crate::model::DetectedAttribute::value].
    pub fn set_value<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.value = v.into();
        self
    }
}

impl wkt::message::Message for DetectedAttribute {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.DetectedAttribute"
    }
}

/// A generic detected landmark represented by name in string format and a 2D
/// location.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct DetectedLandmark {
    /// The name of this landmark, for example, left_hand, right_shoulder.
    pub name: std::string::String,

    /// The 2D point of the detected landmark using the normalized image
    /// coordindate system. The normalized coordinates have the range from 0 to 1.
    pub point: std::option::Option<crate::model::NormalizedVertex>,

    /// The confidence score of the detected landmark. Range [0, 1].
    pub confidence: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl DetectedLandmark {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [name][crate::model::DetectedLandmark::name].
    pub fn set_name<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.name = v.into();
        self
    }

    /// Sets the value of [point][crate::model::DetectedLandmark::point].
    pub fn set_point<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedVertex>,
    {
        self.point = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [point][crate::model::DetectedLandmark::point].
    pub fn set_or_clear_point<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedVertex>,
    {
        self.point = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::DetectedLandmark::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }
}

impl wkt::message::Message for DetectedLandmark {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.DetectedLandmark"
    }
}

/// Annotation results for a single video.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoAnnotationResults {
    /// Video file location in
    /// [Cloud Storage](https://cloud.google.com/storage/).
    pub input_uri: std::string::String,

    /// Video segment on which the annotation is run.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    /// Topical label annotations on video level or user-specified segment level.
    /// There is exactly one element for each unique label.
    pub segment_label_annotations: std::vec::Vec<crate::model::LabelAnnotation>,

    /// Presence label annotations on video level or user-specified segment level.
    /// There is exactly one element for each unique label. Compared to the
    /// existing topical `segment_label_annotations`, this field presents more
    /// fine-grained, segment-level labels detected in video content and is made
    /// available only when the client sets `LabelDetectionConfig.model` to
    /// "builtin/latest" in the request.
    pub segment_presence_label_annotations: std::vec::Vec<crate::model::LabelAnnotation>,

    /// Topical label annotations on shot level.
    /// There is exactly one element for each unique label.
    pub shot_label_annotations: std::vec::Vec<crate::model::LabelAnnotation>,

    /// Presence label annotations on shot level. There is exactly one element for
    /// each unique label. Compared to the existing topical
    /// `shot_label_annotations`, this field presents more fine-grained, shot-level
    /// labels detected in video content and is made available only when the client
    /// sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
    pub shot_presence_label_annotations: std::vec::Vec<crate::model::LabelAnnotation>,

    /// Label annotations on frame level.
    /// There is exactly one element for each unique label.
    pub frame_label_annotations: std::vec::Vec<crate::model::LabelAnnotation>,

    /// Deprecated. Please use `face_detection_annotations` instead.
    #[deprecated]
    pub face_annotations: std::vec::Vec<crate::model::FaceAnnotation>,

    /// Face detection annotations.
    pub face_detection_annotations: std::vec::Vec<crate::model::FaceDetectionAnnotation>,

    /// Shot annotations. Each shot is represented as a video segment.
    pub shot_annotations: std::vec::Vec<crate::model::VideoSegment>,

    /// Explicit content annotation.
    pub explicit_annotation: std::option::Option<crate::model::ExplicitContentAnnotation>,

    /// Speech transcription.
    pub speech_transcriptions: std::vec::Vec<crate::model::SpeechTranscription>,

    /// OCR text detection and tracking.
    /// Annotations for list of detected text snippets. Each will have list of
    /// frame information associated with it.
    pub text_annotations: std::vec::Vec<crate::model::TextAnnotation>,

    /// Annotations for list of objects detected and tracked in video.
    pub object_annotations: std::vec::Vec<crate::model::ObjectTrackingAnnotation>,

    /// Annotations for list of logos detected, tracked and recognized in video.
    pub logo_recognition_annotations: std::vec::Vec<crate::model::LogoRecognitionAnnotation>,

    /// Person detection annotations.
    pub person_detection_annotations: std::vec::Vec<crate::model::PersonDetectionAnnotation>,

    /// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
    /// some videos may succeed and some may fail.
    pub error: std::option::Option<rpc::model::Status>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoAnnotationResults {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [input_uri][crate::model::VideoAnnotationResults::input_uri].
    pub fn set_input_uri<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.input_uri = v.into();
        self
    }

    /// Sets the value of [segment][crate::model::VideoAnnotationResults::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::VideoAnnotationResults::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }

    /// Sets the value of [segment_label_annotations][crate::model::VideoAnnotationResults::segment_label_annotations].
    pub fn set_segment_label_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelAnnotation>,
    {
        use std::iter::Iterator;
        self.segment_label_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [segment_presence_label_annotations][crate::model::VideoAnnotationResults::segment_presence_label_annotations].
    pub fn set_segment_presence_label_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelAnnotation>,
    {
        use std::iter::Iterator;
        self.segment_presence_label_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [shot_label_annotations][crate::model::VideoAnnotationResults::shot_label_annotations].
    pub fn set_shot_label_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelAnnotation>,
    {
        use std::iter::Iterator;
        self.shot_label_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [shot_presence_label_annotations][crate::model::VideoAnnotationResults::shot_presence_label_annotations].
    pub fn set_shot_presence_label_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelAnnotation>,
    {
        use std::iter::Iterator;
        self.shot_presence_label_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [frame_label_annotations][crate::model::VideoAnnotationResults::frame_label_annotations].
    pub fn set_frame_label_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LabelAnnotation>,
    {
        use std::iter::Iterator;
        self.frame_label_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [face_annotations][crate::model::VideoAnnotationResults::face_annotations].
    #[deprecated]
    pub fn set_face_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::FaceAnnotation>,
    {
        use std::iter::Iterator;
        self.face_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [face_detection_annotations][crate::model::VideoAnnotationResults::face_detection_annotations].
    pub fn set_face_detection_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::FaceDetectionAnnotation>,
    {
        use std::iter::Iterator;
        self.face_detection_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [shot_annotations][crate::model::VideoAnnotationResults::shot_annotations].
    pub fn set_shot_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::VideoSegment>,
    {
        use std::iter::Iterator;
        self.shot_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [explicit_annotation][crate::model::VideoAnnotationResults::explicit_annotation].
    pub fn set_explicit_annotation<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::ExplicitContentAnnotation>,
    {
        self.explicit_annotation = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [explicit_annotation][crate::model::VideoAnnotationResults::explicit_annotation].
    pub fn set_or_clear_explicit_annotation<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::ExplicitContentAnnotation>,
    {
        self.explicit_annotation = v.map(|x| x.into());
        self
    }

    /// Sets the value of [speech_transcriptions][crate::model::VideoAnnotationResults::speech_transcriptions].
    pub fn set_speech_transcriptions<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::SpeechTranscription>,
    {
        use std::iter::Iterator;
        self.speech_transcriptions = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [text_annotations][crate::model::VideoAnnotationResults::text_annotations].
    pub fn set_text_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::TextAnnotation>,
    {
        use std::iter::Iterator;
        self.text_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [object_annotations][crate::model::VideoAnnotationResults::object_annotations].
    pub fn set_object_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::ObjectTrackingAnnotation>,
    {
        use std::iter::Iterator;
        self.object_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [logo_recognition_annotations][crate::model::VideoAnnotationResults::logo_recognition_annotations].
    pub fn set_logo_recognition_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::LogoRecognitionAnnotation>,
    {
        use std::iter::Iterator;
        self.logo_recognition_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [person_detection_annotations][crate::model::VideoAnnotationResults::person_detection_annotations].
    pub fn set_person_detection_annotations<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::PersonDetectionAnnotation>,
    {
        use std::iter::Iterator;
        self.person_detection_annotations = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [error][crate::model::VideoAnnotationResults::error].
    pub fn set_error<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<rpc::model::Status>,
    {
        self.error = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [error][crate::model::VideoAnnotationResults::error].
    pub fn set_or_clear_error<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<rpc::model::Status>,
    {
        self.error = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoAnnotationResults {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.VideoAnnotationResults"
    }
}

/// Video annotation response. Included in the `response`
/// field of the `Operation` returned by the `GetOperation`
/// call of the `google::longrunning::Operations` service.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct AnnotateVideoResponse {
    /// Annotation results for all videos specified in `AnnotateVideoRequest`.
    pub annotation_results: std::vec::Vec<crate::model::VideoAnnotationResults>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl AnnotateVideoResponse {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [annotation_results][crate::model::AnnotateVideoResponse::annotation_results].
    pub fn set_annotation_results<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::VideoAnnotationResults>,
    {
        use std::iter::Iterator;
        self.annotation_results = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for AnnotateVideoResponse {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.AnnotateVideoResponse"
    }
}

/// Annotation progress for a single video.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoAnnotationProgress {
    /// Video file location in
    /// [Cloud Storage](https://cloud.google.com/storage/).
    pub input_uri: std::string::String,

    /// Approximate percentage processed thus far. Guaranteed to be
    /// 100 when fully processed.
    pub progress_percent: i32,

    /// Time when the request was received.
    pub start_time: std::option::Option<wkt::Timestamp>,

    /// Time of the most recent update.
    pub update_time: std::option::Option<wkt::Timestamp>,

    /// Specifies which feature is being tracked if the request contains more than
    /// one feature.
    pub feature: crate::model::Feature,

    /// Specifies which segment is being tracked if the request contains more than
    /// one segment.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoAnnotationProgress {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [input_uri][crate::model::VideoAnnotationProgress::input_uri].
    pub fn set_input_uri<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.input_uri = v.into();
        self
    }

    /// Sets the value of [progress_percent][crate::model::VideoAnnotationProgress::progress_percent].
    pub fn set_progress_percent<T: std::convert::Into<i32>>(mut self, v: T) -> Self {
        self.progress_percent = v.into();
        self
    }

    /// Sets the value of [start_time][crate::model::VideoAnnotationProgress::start_time].
    pub fn set_start_time<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Timestamp>,
    {
        self.start_time = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [start_time][crate::model::VideoAnnotationProgress::start_time].
    pub fn set_or_clear_start_time<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Timestamp>,
    {
        self.start_time = v.map(|x| x.into());
        self
    }

    /// Sets the value of [update_time][crate::model::VideoAnnotationProgress::update_time].
    pub fn set_update_time<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Timestamp>,
    {
        self.update_time = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [update_time][crate::model::VideoAnnotationProgress::update_time].
    pub fn set_or_clear_update_time<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Timestamp>,
    {
        self.update_time = v.map(|x| x.into());
        self
    }

    /// Sets the value of [feature][crate::model::VideoAnnotationProgress::feature].
    pub fn set_feature<T: std::convert::Into<crate::model::Feature>>(mut self, v: T) -> Self {
        self.feature = v.into();
        self
    }

    /// Sets the value of [segment][crate::model::VideoAnnotationProgress::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::VideoAnnotationProgress::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoAnnotationProgress {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.VideoAnnotationProgress"
    }
}

/// Video annotation progress. Included in the `metadata`
/// field of the `Operation` returned by the `GetOperation`
/// call of the `google::longrunning::Operations` service.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct AnnotateVideoProgress {
    /// Progress metadata for all videos specified in `AnnotateVideoRequest`.
    pub annotation_progress: std::vec::Vec<crate::model::VideoAnnotationProgress>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl AnnotateVideoProgress {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [annotation_progress][crate::model::AnnotateVideoProgress::annotation_progress].
    pub fn set_annotation_progress<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::VideoAnnotationProgress>,
    {
        use std::iter::Iterator;
        self.annotation_progress = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for AnnotateVideoProgress {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.AnnotateVideoProgress"
    }
}

/// Config for SPEECH_TRANSCRIPTION.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct SpeechTranscriptionConfig {
    /// Required. *Required* The language of the supplied audio as a
    /// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    /// Example: "en-US".
    /// See [Language Support](https://cloud.google.com/speech/docs/languages)
    /// for a list of the currently supported language codes.
    pub language_code: std::string::String,

    /// Optional. Maximum number of recognition hypotheses to be returned.
    /// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    /// within each `SpeechTranscription`. The server may return fewer than
    /// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
    /// return a maximum of one. If omitted, will return a maximum of one.
    pub max_alternatives: i32,

    /// Optional. If set to `true`, the server will attempt to filter out
    /// profanities, replacing all but the initial character in each filtered word
    /// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    /// won't be filtered out.
    pub filter_profanity: bool,

    /// Optional. A means to provide context to assist the speech recognition.
    pub speech_contexts: std::vec::Vec<crate::model::SpeechContext>,

    /// Optional. If 'true', adds punctuation to recognition result hypotheses.
    /// This feature is only available in select languages. Setting this for
    /// requests in other languages has no effect at all. The default 'false' value
    /// does not add punctuation to result hypotheses. NOTE: "This is currently
    /// offered as an experimental service, complimentary to all users. In the
    /// future this may be exclusively available as a premium feature."
    pub enable_automatic_punctuation: bool,

    /// Optional. For file formats, such as MXF or MKV, supporting multiple audio
    /// tracks, specify up to two tracks. Default: track 0.
    pub audio_tracks: std::vec::Vec<i32>,

    /// Optional. If 'true', enables speaker detection for each recognized word in
    /// the top alternative of the recognition result using a speaker_tag provided
    /// in the WordInfo.
    /// Note: When this is true, we send all the words from the beginning of the
    /// audio for the top alternative in every consecutive response.
    /// This is done in order to improve our speaker tags as our models learn to
    /// identify the speakers in the conversation over time.
    pub enable_speaker_diarization: bool,

    /// Optional. If set, specifies the estimated number of speakers in the
    /// conversation. If not set, defaults to '2'. Ignored unless
    /// enable_speaker_diarization is set to true.
    pub diarization_speaker_count: i32,

    /// Optional. If `true`, the top result includes a list of words and the
    /// confidence for those words. If `false`, no word-level confidence
    /// information is returned. The default is `false`.
    pub enable_word_confidence: bool,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl SpeechTranscriptionConfig {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [language_code][crate::model::SpeechTranscriptionConfig::language_code].
    pub fn set_language_code<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.language_code = v.into();
        self
    }

    /// Sets the value of [max_alternatives][crate::model::SpeechTranscriptionConfig::max_alternatives].
    pub fn set_max_alternatives<T: std::convert::Into<i32>>(mut self, v: T) -> Self {
        self.max_alternatives = v.into();
        self
    }

    /// Sets the value of [filter_profanity][crate::model::SpeechTranscriptionConfig::filter_profanity].
    pub fn set_filter_profanity<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.filter_profanity = v.into();
        self
    }

    /// Sets the value of [speech_contexts][crate::model::SpeechTranscriptionConfig::speech_contexts].
    pub fn set_speech_contexts<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::SpeechContext>,
    {
        use std::iter::Iterator;
        self.speech_contexts = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [enable_automatic_punctuation][crate::model::SpeechTranscriptionConfig::enable_automatic_punctuation].
    pub fn set_enable_automatic_punctuation<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.enable_automatic_punctuation = v.into();
        self
    }

    /// Sets the value of [audio_tracks][crate::model::SpeechTranscriptionConfig::audio_tracks].
    pub fn set_audio_tracks<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i32>,
    {
        use std::iter::Iterator;
        self.audio_tracks = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [enable_speaker_diarization][crate::model::SpeechTranscriptionConfig::enable_speaker_diarization].
    pub fn set_enable_speaker_diarization<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.enable_speaker_diarization = v.into();
        self
    }

    /// Sets the value of [diarization_speaker_count][crate::model::SpeechTranscriptionConfig::diarization_speaker_count].
    pub fn set_diarization_speaker_count<T: std::convert::Into<i32>>(mut self, v: T) -> Self {
        self.diarization_speaker_count = v.into();
        self
    }

    /// Sets the value of [enable_word_confidence][crate::model::SpeechTranscriptionConfig::enable_word_confidence].
    pub fn set_enable_word_confidence<T: std::convert::Into<bool>>(mut self, v: T) -> Self {
        self.enable_word_confidence = v.into();
        self
    }
}

impl wkt::message::Message for SpeechTranscriptionConfig {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.SpeechTranscriptionConfig"
    }
}

/// Provides "hints" to the speech recognizer to favor specific words and phrases
/// in the results.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct SpeechContext {
    /// Optional. A list of strings containing words and phrases "hints" so that
    /// the speech recognition is more likely to recognize them. This can be used
    /// to improve the accuracy for specific words and phrases, for example, if
    /// specific commands are typically spoken by the user. This can also be used
    /// to add additional words to the vocabulary of the recognizer. See
    /// [usage limits](https://cloud.google.com/speech/limits#content).
    pub phrases: std::vec::Vec<std::string::String>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl SpeechContext {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [phrases][crate::model::SpeechContext::phrases].
    pub fn set_phrases<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.phrases = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for SpeechContext {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.SpeechContext"
    }
}

/// A speech recognition result corresponding to a portion of the audio.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct SpeechTranscription {
    /// May contain one or more recognition hypotheses (up to the maximum specified
    /// in `max_alternatives`).  These alternatives are ordered in terms of
    /// accuracy, with the top (first) alternative being the most probable, as
    /// ranked by the recognizer.
    pub alternatives: std::vec::Vec<crate::model::SpeechRecognitionAlternative>,

    /// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
    /// language tag of the language in this result. This language code was
    /// detected to have the most likelihood of being spoken in the audio.
    pub language_code: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl SpeechTranscription {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [alternatives][crate::model::SpeechTranscription::alternatives].
    pub fn set_alternatives<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::SpeechRecognitionAlternative>,
    {
        use std::iter::Iterator;
        self.alternatives = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [language_code][crate::model::SpeechTranscription::language_code].
    pub fn set_language_code<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.language_code = v.into();
        self
    }
}

impl wkt::message::Message for SpeechTranscription {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.SpeechTranscription"
    }
}

/// Alternative hypotheses (a.k.a. n-best list).
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct SpeechRecognitionAlternative {
    /// Transcript text representing the words that the user spoke.
    pub transcript: std::string::String,

    /// Output only. The confidence estimate between 0.0 and 1.0. A higher number
    /// indicates an estimated greater likelihood that the recognized words are
    /// correct. This field is set only for the top alternative.
    /// This field is not guaranteed to be accurate and users should not rely on it
    /// to be always provided.
    /// The default of 0.0 is a sentinel value indicating `confidence` was not set.
    pub confidence: f32,

    /// Output only. A list of word-specific information for each recognized word.
    /// Note: When `enable_speaker_diarization` is set to true, you will see all
    /// the words from the beginning of the audio.
    pub words: std::vec::Vec<crate::model::WordInfo>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl SpeechRecognitionAlternative {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [transcript][crate::model::SpeechRecognitionAlternative::transcript].
    pub fn set_transcript<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.transcript = v.into();
        self
    }

    /// Sets the value of [confidence][crate::model::SpeechRecognitionAlternative::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }

    /// Sets the value of [words][crate::model::SpeechRecognitionAlternative::words].
    pub fn set_words<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::WordInfo>,
    {
        use std::iter::Iterator;
        self.words = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for SpeechRecognitionAlternative {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.SpeechRecognitionAlternative"
    }
}

/// Word-specific information for recognized words. Word information is only
/// included in the response when certain request parameters are set, such
/// as `enable_word_time_offsets`.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct WordInfo {
    /// Time offset relative to the beginning of the audio, and
    /// corresponding to the start of the spoken word. This field is only set if
    /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    /// experimental feature and the accuracy of the time offset can vary.
    pub start_time: std::option::Option<wkt::Duration>,

    /// Time offset relative to the beginning of the audio, and
    /// corresponding to the end of the spoken word. This field is only set if
    /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    /// experimental feature and the accuracy of the time offset can vary.
    pub end_time: std::option::Option<wkt::Duration>,

    /// The word corresponding to this set of information.
    pub word: std::string::String,

    /// Output only. The confidence estimate between 0.0 and 1.0. A higher number
    /// indicates an estimated greater likelihood that the recognized words are
    /// correct. This field is set only for the top alternative.
    /// This field is not guaranteed to be accurate and users should not rely on it
    /// to be always provided.
    /// The default of 0.0 is a sentinel value indicating `confidence` was not set.
    pub confidence: f32,

    /// Output only. A distinct integer value is assigned for every speaker within
    /// the audio. This field specifies which one of those speakers was detected to
    /// have spoken this word. Value ranges from 1 up to diarization_speaker_count,
    /// and is only set if speaker diarization is enabled.
    pub speaker_tag: i32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl WordInfo {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [start_time][crate::model::WordInfo::start_time].
    pub fn set_start_time<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.start_time = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [start_time][crate::model::WordInfo::start_time].
    pub fn set_or_clear_start_time<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.start_time = v.map(|x| x.into());
        self
    }

    /// Sets the value of [end_time][crate::model::WordInfo::end_time].
    pub fn set_end_time<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.end_time = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [end_time][crate::model::WordInfo::end_time].
    pub fn set_or_clear_end_time<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.end_time = v.map(|x| x.into());
        self
    }

    /// Sets the value of [word][crate::model::WordInfo::word].
    pub fn set_word<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.word = v.into();
        self
    }

    /// Sets the value of [confidence][crate::model::WordInfo::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }

    /// Sets the value of [speaker_tag][crate::model::WordInfo::speaker_tag].
    pub fn set_speaker_tag<T: std::convert::Into<i32>>(mut self, v: T) -> Self {
        self.speaker_tag = v.into();
        self
    }
}

impl wkt::message::Message for WordInfo {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.WordInfo"
    }
}

/// A vertex represents a 2D point in the image.
/// NOTE: the normalized vertex coordinates are relative to the original image
/// and range from 0 to 1.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct NormalizedVertex {
    /// X coordinate.
    pub x: f32,

    /// Y coordinate.
    pub y: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl NormalizedVertex {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [x][crate::model::NormalizedVertex::x].
    pub fn set_x<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.x = v.into();
        self
    }

    /// Sets the value of [y][crate::model::NormalizedVertex::y].
    pub fn set_y<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.y = v.into();
        self
    }
}

impl wkt::message::Message for NormalizedVertex {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.NormalizedVertex"
    }
}

/// Normalized bounding polygon for text (that might not be aligned with axis).
/// Contains list of the corner points in clockwise order starting from
/// top-left corner. For example, for a rectangular bounding box:
/// When the text is horizontal it might look like:
/// 0----1
/// |    |
/// 3----2
///
/// When it's clockwise rotated 180 degrees around the top-left corner it
/// becomes:
/// 2----3
/// |    |
/// 1----0
///
/// and the vertex order will still be (0, 1, 2, 3). Note that values can be less
/// than 0, or greater than 1 due to trignometric calculations for location of
/// the box.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct NormalizedBoundingPoly {
    /// Normalized vertices of the bounding polygon.
    pub vertices: std::vec::Vec<crate::model::NormalizedVertex>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl NormalizedBoundingPoly {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [vertices][crate::model::NormalizedBoundingPoly::vertices].
    pub fn set_vertices<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::NormalizedVertex>,
    {
        use std::iter::Iterator;
        self.vertices = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for NormalizedBoundingPoly {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.NormalizedBoundingPoly"
    }
}

/// Video segment level annotation results for text detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextSegment {
    /// Video segment where a text snippet was detected.
    pub segment: std::option::Option<crate::model::VideoSegment>,

    /// Confidence for the track of detected text. It is calculated as the highest
    /// over all frames where OCR detected text appears.
    pub confidence: f32,

    /// Information related to the frames where OCR detected text appears.
    pub frames: std::vec::Vec<crate::model::TextFrame>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextSegment {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [segment][crate::model::TextSegment::segment].
    pub fn set_segment<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [segment][crate::model::TextSegment::segment].
    pub fn set_or_clear_segment<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::VideoSegment>,
    {
        self.segment = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::TextSegment::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }

    /// Sets the value of [frames][crate::model::TextSegment::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::TextFrame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for TextSegment {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.TextSegment"
    }
}

/// Video frame level annotation results for text annotation (OCR).
/// Contains information regarding timestamp and bounding box locations for the
/// frames containing detected OCR text snippets.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextFrame {
    /// Bounding polygon of the detected text for this frame.
    pub rotated_bounding_box: std::option::Option<crate::model::NormalizedBoundingPoly>,

    /// Timestamp of this frame.
    pub time_offset: std::option::Option<wkt::Duration>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextFrame {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [rotated_bounding_box][crate::model::TextFrame::rotated_bounding_box].
    pub fn set_rotated_bounding_box<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingPoly>,
    {
        self.rotated_bounding_box = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [rotated_bounding_box][crate::model::TextFrame::rotated_bounding_box].
    pub fn set_or_clear_rotated_bounding_box<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingPoly>,
    {
        self.rotated_bounding_box = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_offset][crate::model::TextFrame::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::TextFrame::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for TextFrame {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.TextFrame"
    }
}

/// Annotations related to one detected OCR text snippet. This will contain the
/// corresponding text, confidence value, and frame level information for each
/// detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextAnnotation {
    /// The detected text.
    pub text: std::string::String,

    /// All video segments where OCR detected text appears.
    pub segments: std::vec::Vec<crate::model::TextSegment>,

    /// Feature version.
    pub version: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [text][crate::model::TextAnnotation::text].
    pub fn set_text<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.text = v.into();
        self
    }

    /// Sets the value of [segments][crate::model::TextAnnotation::segments].
    pub fn set_segments<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::TextSegment>,
    {
        use std::iter::Iterator;
        self.segments = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [version][crate::model::TextAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }
}

impl wkt::message::Message for TextAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.TextAnnotation"
    }
}

/// Video frame level annotations for object detection and tracking. This field
/// stores per frame location, time offset, and confidence.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ObjectTrackingFrame {
    /// The normalized bounding box location of this object track for the frame.
    pub normalized_bounding_box: std::option::Option<crate::model::NormalizedBoundingBox>,

    /// The timestamp of the frame in microseconds.
    pub time_offset: std::option::Option<wkt::Duration>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ObjectTrackingFrame {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [normalized_bounding_box][crate::model::ObjectTrackingFrame::normalized_bounding_box].
    pub fn set_normalized_bounding_box<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingBox>,
    {
        self.normalized_bounding_box = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [normalized_bounding_box][crate::model::ObjectTrackingFrame::normalized_bounding_box].
    pub fn set_or_clear_normalized_bounding_box<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::NormalizedBoundingBox>,
    {
        self.normalized_bounding_box = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_offset][crate::model::ObjectTrackingFrame::time_offset].
    pub fn set_time_offset<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_offset][crate::model::ObjectTrackingFrame::time_offset].
    pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_offset = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for ObjectTrackingFrame {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ObjectTrackingFrame"
    }
}

/// Annotations corresponding to one tracked object.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ObjectTrackingAnnotation {
    /// Entity to specify the object category that this track is labeled as.
    pub entity: std::option::Option<crate::model::Entity>,

    /// Object category's labeling confidence of this track.
    pub confidence: f32,

    /// Information corresponding to all frames where this object track appears.
    /// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
    /// messages in frames.
    /// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
    pub frames: std::vec::Vec<crate::model::ObjectTrackingFrame>,

    /// Feature version.
    pub version: std::string::String,

    /// Different representation of tracking info in non-streaming batch
    /// and streaming modes.
    pub track_info: std::option::Option<crate::model::object_tracking_annotation::TrackInfo>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ObjectTrackingAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [entity][crate::model::ObjectTrackingAnnotation::entity].
    pub fn set_entity<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [entity][crate::model::ObjectTrackingAnnotation::entity].
    pub fn set_or_clear_entity<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::ObjectTrackingAnnotation::confidence].
    pub fn set_confidence<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.confidence = v.into();
        self
    }

    /// Sets the value of [frames][crate::model::ObjectTrackingAnnotation::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::ObjectTrackingFrame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [version][crate::model::ObjectTrackingAnnotation::version].
    pub fn set_version<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.version = v.into();
        self
    }

    /// Sets the value of [track_info][crate::model::ObjectTrackingAnnotation::track_info].
    ///
    /// Note that all the setters affecting `track_info` are mutually
    /// exclusive.
    pub fn set_track_info<
        T: std::convert::Into<
                std::option::Option<crate::model::object_tracking_annotation::TrackInfo>,
            >,
    >(
        mut self,
        v: T,
    ) -> Self {
        self.track_info = v.into();
        self
    }

    /// The value of [track_info][crate::model::ObjectTrackingAnnotation::track_info]
    /// if it holds a `Segment`, `None` if the field is not set or
    /// holds a different branch.
    pub fn segment(&self) -> std::option::Option<&std::boxed::Box<crate::model::VideoSegment>> {
        #[allow(unreachable_patterns)]
        self.track_info.as_ref().and_then(|v| match v {
            crate::model::object_tracking_annotation::TrackInfo::Segment(v) => {
                std::option::Option::Some(v)
            }
            _ => std::option::Option::None,
        })
    }

    /// Sets the value of [track_info][crate::model::ObjectTrackingAnnotation::track_info]
    /// to hold a `Segment`.
    ///
    /// Note that all the setters affecting `track_info` are
    /// mutually exclusive.
    pub fn set_segment<T: std::convert::Into<std::boxed::Box<crate::model::VideoSegment>>>(
        mut self,
        v: T,
    ) -> Self {
        self.track_info = std::option::Option::Some(
            crate::model::object_tracking_annotation::TrackInfo::Segment(v.into()),
        );
        self
    }

    /// The value of [track_info][crate::model::ObjectTrackingAnnotation::track_info]
    /// if it holds a `TrackId`, `None` if the field is not set or
    /// holds a different branch.
    pub fn track_id(&self) -> std::option::Option<&i64> {
        #[allow(unreachable_patterns)]
        self.track_info.as_ref().and_then(|v| match v {
            crate::model::object_tracking_annotation::TrackInfo::TrackId(v) => {
                std::option::Option::Some(v)
            }
            _ => std::option::Option::None,
        })
    }

    /// Sets the value of [track_info][crate::model::ObjectTrackingAnnotation::track_info]
    /// to hold a `TrackId`.
    ///
    /// Note that all the setters affecting `track_info` are
    /// mutually exclusive.
    pub fn set_track_id<T: std::convert::Into<i64>>(mut self, v: T) -> Self {
        self.track_info = std::option::Option::Some(
            crate::model::object_tracking_annotation::TrackInfo::TrackId(v.into()),
        );
        self
    }
}

impl wkt::message::Message for ObjectTrackingAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.ObjectTrackingAnnotation"
    }
}

/// Defines additional types related to [ObjectTrackingAnnotation].
pub mod object_tracking_annotation {
    #[allow(unused_imports)]
    use super::*;

    /// Different representation of tracking info in non-streaming batch
    /// and streaming modes.
    #[derive(Clone, Debug, PartialEq)]
    #[non_exhaustive]
    pub enum TrackInfo {
        /// Non-streaming batch mode ONLY.
        /// Each object track corresponds to one video segment where it appears.
        Segment(std::boxed::Box<crate::model::VideoSegment>),
        /// Streaming mode ONLY.
        /// In streaming mode, we do not know the end time of a tracked object
        /// before it is completed. Hence, there is no VideoSegment info returned.
        /// Instead, we provide a unique identifiable integer track_id so that
        /// the customers can correlate the results of the ongoing
        /// ObjectTrackAnnotation of the same track_id over time.
        TrackId(i64),
    }
}

/// Annotation corresponding to one detected, tracked and recognized logo class.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct LogoRecognitionAnnotation {
    /// Entity category information to specify the logo class that all the logo
    /// tracks within this LogoRecognitionAnnotation are recognized as.
    pub entity: std::option::Option<crate::model::Entity>,

    /// All logo tracks where the recognized logo appears. Each track corresponds
    /// to one logo instance appearing in consecutive frames.
    pub tracks: std::vec::Vec<crate::model::Track>,

    /// All video segments where the recognized logo appears. There might be
    /// multiple instances of the same logo class appearing in one VideoSegment.
    pub segments: std::vec::Vec<crate::model::VideoSegment>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl LogoRecognitionAnnotation {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [entity][crate::model::LogoRecognitionAnnotation::entity].
    pub fn set_entity<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [entity][crate::model::LogoRecognitionAnnotation::entity].
    pub fn set_or_clear_entity<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<crate::model::Entity>,
    {
        self.entity = v.map(|x| x.into());
        self
    }

    /// Sets the value of [tracks][crate::model::LogoRecognitionAnnotation::tracks].
    pub fn set_tracks<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::Track>,
    {
        use std::iter::Iterator;
        self.tracks = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [segments][crate::model::LogoRecognitionAnnotation::segments].
    pub fn set_segments<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::VideoSegment>,
    {
        use std::iter::Iterator;
        self.segments = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for LogoRecognitionAnnotation {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.videointelligence.v1.LogoRecognitionAnnotation"
    }
}

/// Video annotation feature.
///
/// # Working with unknown values
///
/// This enum is defined as `#[non_exhaustive]` because Google Cloud may add
/// additional enum variants at any time. Adding new variants is not considered
/// a breaking change. Applications should write their code in anticipation of:
///
/// - New values appearing in future releases of the client library, **and**
/// - New values received dynamically, without application changes.
///
/// Please consult the [Working with enums] section in the user guide for some
/// guidelines.
///
/// [Working with enums]: https://google-cloud-rust.github.io/working_with_enums.html
#[derive(Clone, Debug, PartialEq)]
#[non_exhaustive]
pub enum Feature {
    /// Unspecified.
    Unspecified,
    /// Label detection. Detect objects, such as dog or flower.
    LabelDetection,
    /// Shot change detection.
    ShotChangeDetection,
    /// Explicit content detection.
    ExplicitContentDetection,
    /// Human face detection.
    FaceDetection,
    /// Speech transcription.
    SpeechTranscription,
    /// OCR text detection and tracking.
    TextDetection,
    /// Object detection and tracking.
    ObjectTracking,
    /// Logo detection, tracking, and recognition.
    LogoRecognition,
    /// Person detection.
    PersonDetection,
    /// If set, the enum was initialized with an unknown value.
    ///
    /// Applications can examine the value using [Feature::value] or
    /// [Feature::name].
    UnknownValue(feature::UnknownValue),
}

#[doc(hidden)]
pub mod feature {
    #[allow(unused_imports)]
    use super::*;
    #[derive(Clone, Debug, PartialEq)]
    pub struct UnknownValue(pub(crate) wkt::internal::UnknownEnumValue);
}

impl Feature {
    /// Gets the enum value.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the string representation of enums.
    pub fn value(&self) -> std::option::Option<i32> {
        match self {
            Self::Unspecified => std::option::Option::Some(0),
            Self::LabelDetection => std::option::Option::Some(1),
            Self::ShotChangeDetection => std::option::Option::Some(2),
            Self::ExplicitContentDetection => std::option::Option::Some(3),
            Self::FaceDetection => std::option::Option::Some(4),
            Self::SpeechTranscription => std::option::Option::Some(6),
            Self::TextDetection => std::option::Option::Some(7),
            Self::ObjectTracking => std::option::Option::Some(9),
            Self::LogoRecognition => std::option::Option::Some(12),
            Self::PersonDetection => std::option::Option::Some(14),
            Self::UnknownValue(u) => u.0.value(),
        }
    }

    /// Gets the enum value as a string.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the integer representation of enums.
    pub fn name(&self) -> std::option::Option<&str> {
        match self {
            Self::Unspecified => std::option::Option::Some("FEATURE_UNSPECIFIED"),
            Self::LabelDetection => std::option::Option::Some("LABEL_DETECTION"),
            Self::ShotChangeDetection => std::option::Option::Some("SHOT_CHANGE_DETECTION"),
            Self::ExplicitContentDetection => {
                std::option::Option::Some("EXPLICIT_CONTENT_DETECTION")
            }
            Self::FaceDetection => std::option::Option::Some("FACE_DETECTION"),
            Self::SpeechTranscription => std::option::Option::Some("SPEECH_TRANSCRIPTION"),
            Self::TextDetection => std::option::Option::Some("TEXT_DETECTION"),
            Self::ObjectTracking => std::option::Option::Some("OBJECT_TRACKING"),
            Self::LogoRecognition => std::option::Option::Some("LOGO_RECOGNITION"),
            Self::PersonDetection => std::option::Option::Some("PERSON_DETECTION"),
            Self::UnknownValue(u) => u.0.name(),
        }
    }
}

impl std::default::Default for Feature {
    fn default() -> Self {
        use std::convert::From;
        Self::from(0)
    }
}

impl std::fmt::Display for Feature {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
        wkt::internal::display_enum(f, self.name(), self.value())
    }
}

impl std::convert::From<i32> for Feature {
    fn from(value: i32) -> Self {
        match value {
            0 => Self::Unspecified,
            1 => Self::LabelDetection,
            2 => Self::ShotChangeDetection,
            3 => Self::ExplicitContentDetection,
            4 => Self::FaceDetection,
            6 => Self::SpeechTranscription,
            7 => Self::TextDetection,
            9 => Self::ObjectTracking,
            12 => Self::LogoRecognition,
            14 => Self::PersonDetection,
            _ => Self::UnknownValue(feature::UnknownValue(
                wkt::internal::UnknownEnumValue::Integer(value),
            )),
        }
    }
}

impl std::convert::From<&str> for Feature {
    fn from(value: &str) -> Self {
        use std::string::ToString;
        match value {
            "FEATURE_UNSPECIFIED" => Self::Unspecified,
            "LABEL_DETECTION" => Self::LabelDetection,
            "SHOT_CHANGE_DETECTION" => Self::ShotChangeDetection,
            "EXPLICIT_CONTENT_DETECTION" => Self::ExplicitContentDetection,
            "FACE_DETECTION" => Self::FaceDetection,
            "SPEECH_TRANSCRIPTION" => Self::SpeechTranscription,
            "TEXT_DETECTION" => Self::TextDetection,
            "OBJECT_TRACKING" => Self::ObjectTracking,
            "LOGO_RECOGNITION" => Self::LogoRecognition,
            "PERSON_DETECTION" => Self::PersonDetection,
            _ => Self::UnknownValue(feature::UnknownValue(
                wkt::internal::UnknownEnumValue::String(value.to_string()),
            )),
        }
    }
}

impl serde::ser::Serialize for Feature {
    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
    where
        S: serde::Serializer,
    {
        match self {
            Self::Unspecified => serializer.serialize_i32(0),
            Self::LabelDetection => serializer.serialize_i32(1),
            Self::ShotChangeDetection => serializer.serialize_i32(2),
            Self::ExplicitContentDetection => serializer.serialize_i32(3),
            Self::FaceDetection => serializer.serialize_i32(4),
            Self::SpeechTranscription => serializer.serialize_i32(6),
            Self::TextDetection => serializer.serialize_i32(7),
            Self::ObjectTracking => serializer.serialize_i32(9),
            Self::LogoRecognition => serializer.serialize_i32(12),
            Self::PersonDetection => serializer.serialize_i32(14),
            Self::UnknownValue(u) => u.0.serialize(serializer),
        }
    }
}

impl<'de> serde::de::Deserialize<'de> for Feature {
    fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        deserializer.deserialize_any(wkt::internal::EnumVisitor::<Feature>::new(
            ".google.cloud.videointelligence.v1.Feature",
        ))
    }
}

/// Label detection mode.
///
/// # Working with unknown values
///
/// This enum is defined as `#[non_exhaustive]` because Google Cloud may add
/// additional enum variants at any time. Adding new variants is not considered
/// a breaking change. Applications should write their code in anticipation of:
///
/// - New values appearing in future releases of the client library, **and**
/// - New values received dynamically, without application changes.
///
/// Please consult the [Working with enums] section in the user guide for some
/// guidelines.
///
/// [Working with enums]: https://google-cloud-rust.github.io/working_with_enums.html
#[derive(Clone, Debug, PartialEq)]
#[non_exhaustive]
pub enum LabelDetectionMode {
    /// Unspecified.
    Unspecified,
    /// Detect shot-level labels.
    ShotMode,
    /// Detect frame-level labels.
    FrameMode,
    /// Detect both shot-level and frame-level labels.
    ShotAndFrameMode,
    /// If set, the enum was initialized with an unknown value.
    ///
    /// Applications can examine the value using [LabelDetectionMode::value] or
    /// [LabelDetectionMode::name].
    UnknownValue(label_detection_mode::UnknownValue),
}

#[doc(hidden)]
pub mod label_detection_mode {
    #[allow(unused_imports)]
    use super::*;
    #[derive(Clone, Debug, PartialEq)]
    pub struct UnknownValue(pub(crate) wkt::internal::UnknownEnumValue);
}

impl LabelDetectionMode {
    /// Gets the enum value.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the string representation of enums.
    pub fn value(&self) -> std::option::Option<i32> {
        match self {
            Self::Unspecified => std::option::Option::Some(0),
            Self::ShotMode => std::option::Option::Some(1),
            Self::FrameMode => std::option::Option::Some(2),
            Self::ShotAndFrameMode => std::option::Option::Some(3),
            Self::UnknownValue(u) => u.0.value(),
        }
    }

    /// Gets the enum value as a string.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the integer representation of enums.
    pub fn name(&self) -> std::option::Option<&str> {
        match self {
            Self::Unspecified => std::option::Option::Some("LABEL_DETECTION_MODE_UNSPECIFIED"),
            Self::ShotMode => std::option::Option::Some("SHOT_MODE"),
            Self::FrameMode => std::option::Option::Some("FRAME_MODE"),
            Self::ShotAndFrameMode => std::option::Option::Some("SHOT_AND_FRAME_MODE"),
            Self::UnknownValue(u) => u.0.name(),
        }
    }
}

impl std::default::Default for LabelDetectionMode {
    fn default() -> Self {
        use std::convert::From;
        Self::from(0)
    }
}

impl std::fmt::Display for LabelDetectionMode {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
        wkt::internal::display_enum(f, self.name(), self.value())
    }
}

impl std::convert::From<i32> for LabelDetectionMode {
    fn from(value: i32) -> Self {
        match value {
            0 => Self::Unspecified,
            1 => Self::ShotMode,
            2 => Self::FrameMode,
            3 => Self::ShotAndFrameMode,
            _ => Self::UnknownValue(label_detection_mode::UnknownValue(
                wkt::internal::UnknownEnumValue::Integer(value),
            )),
        }
    }
}

impl std::convert::From<&str> for LabelDetectionMode {
    fn from(value: &str) -> Self {
        use std::string::ToString;
        match value {
            "LABEL_DETECTION_MODE_UNSPECIFIED" => Self::Unspecified,
            "SHOT_MODE" => Self::ShotMode,
            "FRAME_MODE" => Self::FrameMode,
            "SHOT_AND_FRAME_MODE" => Self::ShotAndFrameMode,
            _ => Self::UnknownValue(label_detection_mode::UnknownValue(
                wkt::internal::UnknownEnumValue::String(value.to_string()),
            )),
        }
    }
}

impl serde::ser::Serialize for LabelDetectionMode {
    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
    where
        S: serde::Serializer,
    {
        match self {
            Self::Unspecified => serializer.serialize_i32(0),
            Self::ShotMode => serializer.serialize_i32(1),
            Self::FrameMode => serializer.serialize_i32(2),
            Self::ShotAndFrameMode => serializer.serialize_i32(3),
            Self::UnknownValue(u) => u.0.serialize(serializer),
        }
    }
}

impl<'de> serde::de::Deserialize<'de> for LabelDetectionMode {
    fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        deserializer.deserialize_any(wkt::internal::EnumVisitor::<LabelDetectionMode>::new(
            ".google.cloud.videointelligence.v1.LabelDetectionMode",
        ))
    }
}

/// Bucketized representation of likelihood.
///
/// # Working with unknown values
///
/// This enum is defined as `#[non_exhaustive]` because Google Cloud may add
/// additional enum variants at any time. Adding new variants is not considered
/// a breaking change. Applications should write their code in anticipation of:
///
/// - New values appearing in future releases of the client library, **and**
/// - New values received dynamically, without application changes.
///
/// Please consult the [Working with enums] section in the user guide for some
/// guidelines.
///
/// [Working with enums]: https://google-cloud-rust.github.io/working_with_enums.html
#[derive(Clone, Debug, PartialEq)]
#[non_exhaustive]
pub enum Likelihood {
    /// Unspecified likelihood.
    Unspecified,
    /// Very unlikely.
    VeryUnlikely,
    /// Unlikely.
    Unlikely,
    /// Possible.
    Possible,
    /// Likely.
    Likely,
    /// Very likely.
    VeryLikely,
    /// If set, the enum was initialized with an unknown value.
    ///
    /// Applications can examine the value using [Likelihood::value] or
    /// [Likelihood::name].
    UnknownValue(likelihood::UnknownValue),
}

#[doc(hidden)]
pub mod likelihood {
    #[allow(unused_imports)]
    use super::*;
    #[derive(Clone, Debug, PartialEq)]
    pub struct UnknownValue(pub(crate) wkt::internal::UnknownEnumValue);
}

impl Likelihood {
    /// Gets the enum value.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the string representation of enums.
    pub fn value(&self) -> std::option::Option<i32> {
        match self {
            Self::Unspecified => std::option::Option::Some(0),
            Self::VeryUnlikely => std::option::Option::Some(1),
            Self::Unlikely => std::option::Option::Some(2),
            Self::Possible => std::option::Option::Some(3),
            Self::Likely => std::option::Option::Some(4),
            Self::VeryLikely => std::option::Option::Some(5),
            Self::UnknownValue(u) => u.0.value(),
        }
    }

    /// Gets the enum value as a string.
    ///
    /// Returns `None` if the enum contains an unknown value deserialized from
    /// the integer representation of enums.
    pub fn name(&self) -> std::option::Option<&str> {
        match self {
            Self::Unspecified => std::option::Option::Some("LIKELIHOOD_UNSPECIFIED"),
            Self::VeryUnlikely => std::option::Option::Some("VERY_UNLIKELY"),
            Self::Unlikely => std::option::Option::Some("UNLIKELY"),
            Self::Possible => std::option::Option::Some("POSSIBLE"),
            Self::Likely => std::option::Option::Some("LIKELY"),
            Self::VeryLikely => std::option::Option::Some("VERY_LIKELY"),
            Self::UnknownValue(u) => u.0.name(),
        }
    }
}

impl std::default::Default for Likelihood {
    fn default() -> Self {
        use std::convert::From;
        Self::from(0)
    }
}

impl std::fmt::Display for Likelihood {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
        wkt::internal::display_enum(f, self.name(), self.value())
    }
}

impl std::convert::From<i32> for Likelihood {
    fn from(value: i32) -> Self {
        match value {
            0 => Self::Unspecified,
            1 => Self::VeryUnlikely,
            2 => Self::Unlikely,
            3 => Self::Possible,
            4 => Self::Likely,
            5 => Self::VeryLikely,
            _ => Self::UnknownValue(likelihood::UnknownValue(
                wkt::internal::UnknownEnumValue::Integer(value),
            )),
        }
    }
}

impl std::convert::From<&str> for Likelihood {
    fn from(value: &str) -> Self {
        use std::string::ToString;
        match value {
            "LIKELIHOOD_UNSPECIFIED" => Self::Unspecified,
            "VERY_UNLIKELY" => Self::VeryUnlikely,
            "UNLIKELY" => Self::Unlikely,
            "POSSIBLE" => Self::Possible,
            "LIKELY" => Self::Likely,
            "VERY_LIKELY" => Self::VeryLikely,
            _ => Self::UnknownValue(likelihood::UnknownValue(
                wkt::internal::UnknownEnumValue::String(value.to_string()),
            )),
        }
    }
}

impl serde::ser::Serialize for Likelihood {
    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
    where
        S: serde::Serializer,
    {
        match self {
            Self::Unspecified => serializer.serialize_i32(0),
            Self::VeryUnlikely => serializer.serialize_i32(1),
            Self::Unlikely => serializer.serialize_i32(2),
            Self::Possible => serializer.serialize_i32(3),
            Self::Likely => serializer.serialize_i32(4),
            Self::VeryLikely => serializer.serialize_i32(5),
            Self::UnknownValue(u) => u.0.serialize(serializer),
        }
    }
}

impl<'de> serde::de::Deserialize<'de> for Likelihood {
    fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        deserializer.deserialize_any(wkt::internal::EnumVisitor::<Likelihood>::new(
            ".google.cloud.videointelligence.v1.Likelihood",
        ))
    }
}
