// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by sidekick. DO NOT EDIT.

#![allow(rustdoc::redundant_explicit_links)]
#![allow(rustdoc::broken_intra_doc_links)]
#![no_implicit_prelude]
extern crate bytes;
extern crate serde;
extern crate serde_json;
extern crate serde_with;
extern crate std;
extern crate wkt;

mod debug;
mod deserialize;
mod serialize;

/// Prediction output format for Image and Text Classification.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ClassificationPredictionResult {
    /// The resource IDs of the AnnotationSpecs that had been identified.
    pub ids: std::vec::Vec<i64>,

    /// The display names of the AnnotationSpecs that had been identified, order
    /// matches the IDs.
    pub display_names: std::vec::Vec<std::string::String>,

    /// The Model's confidences in correctness of the predicted IDs, higher value
    /// means higher confidence. Order matches the Ids.
    pub confidences: std::vec::Vec<f32>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ClassificationPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [ids][crate::model::ClassificationPredictionResult::ids].
    pub fn set_ids<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i64>,
    {
        use std::iter::Iterator;
        self.ids = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [display_names][crate::model::ClassificationPredictionResult::display_names].
    pub fn set_display_names<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.display_names = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [confidences][crate::model::ClassificationPredictionResult::confidences].
    pub fn set_confidences<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<f32>,
    {
        use std::iter::Iterator;
        self.confidences = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for ClassificationPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.ClassificationPredictionResult"
    }
}

/// Prediction output format for Image Object Detection.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ImageObjectDetectionPredictionResult {
    /// The resource IDs of the AnnotationSpecs that had been identified, ordered
    /// by the confidence score descendingly.
    pub ids: std::vec::Vec<i64>,

    /// The display names of the AnnotationSpecs that had been identified, order
    /// matches the IDs.
    pub display_names: std::vec::Vec<std::string::String>,

    /// The Model's confidences in correctness of the predicted IDs, higher value
    /// means higher confidence. Order matches the Ids.
    pub confidences: std::vec::Vec<f32>,

    /// Bounding boxes, i.e. the rectangles over the image, that pinpoint
    /// the found AnnotationSpecs. Given in order that matches the IDs. Each
    /// bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and
    /// `yMax`, which represent the extremal coordinates of the box. They are
    /// relative to the image size, and the point 0,0 is in the top left
    /// of the image.
    pub bboxes: std::vec::Vec<wkt::ListValue>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ImageObjectDetectionPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [ids][crate::model::ImageObjectDetectionPredictionResult::ids].
    pub fn set_ids<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i64>,
    {
        use std::iter::Iterator;
        self.ids = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [display_names][crate::model::ImageObjectDetectionPredictionResult::display_names].
    pub fn set_display_names<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.display_names = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [confidences][crate::model::ImageObjectDetectionPredictionResult::confidences].
    pub fn set_confidences<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<f32>,
    {
        use std::iter::Iterator;
        self.confidences = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [bboxes][crate::model::ImageObjectDetectionPredictionResult::bboxes].
    pub fn set_bboxes<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<wkt::ListValue>,
    {
        use std::iter::Iterator;
        self.bboxes = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for ImageObjectDetectionPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.ImageObjectDetectionPredictionResult"
    }
}

/// Prediction output format for Image Segmentation.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct ImageSegmentationPredictionResult {
    /// A PNG image where each pixel in the mask represents the category in which
    /// the pixel in the original image was predicted to belong to. The size of
    /// this image will be the same as the original image. The mapping between the
    /// AnntoationSpec and the color can be found in model's metadata. The model
    /// will choose the most likely category and if none of the categories reach
    /// the confidence threshold, the pixel will be marked as background.
    pub category_mask: std::string::String,

    /// A one channel image which is encoded as an 8bit lossless PNG. The size of
    /// the image will be the same as the original image. For a specific pixel,
    /// darker color means less confidence in correctness of the cateogry in the
    /// categoryMask for the corresponding pixel. Black means no confidence and
    /// white means complete confidence.
    pub confidence_mask: std::string::String,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl ImageSegmentationPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [category_mask][crate::model::ImageSegmentationPredictionResult::category_mask].
    pub fn set_category_mask<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.category_mask = v.into();
        self
    }

    /// Sets the value of [confidence_mask][crate::model::ImageSegmentationPredictionResult::confidence_mask].
    pub fn set_confidence_mask<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.confidence_mask = v.into();
        self
    }
}

impl wkt::message::Message for ImageSegmentationPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.ImageSegmentationPredictionResult"
    }
}

/// Prediction output format for Tabular Classification.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TabularClassificationPredictionResult {
    /// The name of the classes being classified, contains all possible values of
    /// the target column.
    pub classes: std::vec::Vec<std::string::String>,

    /// The model's confidence in each class being correct, higher
    /// value means higher confidence. The N-th score corresponds to
    /// the N-th class in classes.
    pub scores: std::vec::Vec<f32>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TabularClassificationPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [classes][crate::model::TabularClassificationPredictionResult::classes].
    pub fn set_classes<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.classes = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [scores][crate::model::TabularClassificationPredictionResult::scores].
    pub fn set_scores<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<f32>,
    {
        use std::iter::Iterator;
        self.scores = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for TabularClassificationPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.TabularClassificationPredictionResult"
    }
}

/// Prediction output format for Tabular Regression.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TabularRegressionPredictionResult {
    /// The regression value.
    pub value: f32,

    /// The lower bound of the prediction interval.
    pub lower_bound: f32,

    /// The upper bound of the prediction interval.
    pub upper_bound: f32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TabularRegressionPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [value][crate::model::TabularRegressionPredictionResult::value].
    pub fn set_value<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.value = v.into();
        self
    }

    /// Sets the value of [lower_bound][crate::model::TabularRegressionPredictionResult::lower_bound].
    pub fn set_lower_bound<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.lower_bound = v.into();
        self
    }

    /// Sets the value of [upper_bound][crate::model::TabularRegressionPredictionResult::upper_bound].
    pub fn set_upper_bound<T: std::convert::Into<f32>>(mut self, v: T) -> Self {
        self.upper_bound = v.into();
        self
    }
}

impl wkt::message::Message for TabularRegressionPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.TabularRegressionPredictionResult"
    }
}

/// Prediction output format for Text Extraction.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextExtractionPredictionResult {
    /// The resource IDs of the AnnotationSpecs that had been identified,
    /// ordered by the confidence score descendingly.
    pub ids: std::vec::Vec<i64>,

    /// The display names of the AnnotationSpecs that had been identified,
    /// order matches the IDs.
    pub display_names: std::vec::Vec<std::string::String>,

    /// The start offsets, inclusive, of the text segment in which the
    /// AnnotationSpec has been identified. Expressed as a zero-based number
    /// of characters as measured from the start of the text snippet.
    pub text_segment_start_offsets: std::vec::Vec<i64>,

    /// The end offsets, inclusive, of the text segment in which the
    /// AnnotationSpec has been identified. Expressed as a zero-based number
    /// of characters as measured from the start of the text snippet.
    pub text_segment_end_offsets: std::vec::Vec<i64>,

    /// The Model's confidences in correctness of the predicted IDs, higher
    /// value means higher confidence. Order matches the Ids.
    pub confidences: std::vec::Vec<f32>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextExtractionPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [ids][crate::model::TextExtractionPredictionResult::ids].
    pub fn set_ids<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i64>,
    {
        use std::iter::Iterator;
        self.ids = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [display_names][crate::model::TextExtractionPredictionResult::display_names].
    pub fn set_display_names<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<std::string::String>,
    {
        use std::iter::Iterator;
        self.display_names = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [text_segment_start_offsets][crate::model::TextExtractionPredictionResult::text_segment_start_offsets].
    pub fn set_text_segment_start_offsets<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i64>,
    {
        use std::iter::Iterator;
        self.text_segment_start_offsets = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [text_segment_end_offsets][crate::model::TextExtractionPredictionResult::text_segment_end_offsets].
    pub fn set_text_segment_end_offsets<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<i64>,
    {
        use std::iter::Iterator;
        self.text_segment_end_offsets = v.into_iter().map(|i| i.into()).collect();
        self
    }

    /// Sets the value of [confidences][crate::model::TextExtractionPredictionResult::confidences].
    pub fn set_confidences<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<f32>,
    {
        use std::iter::Iterator;
        self.confidences = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for TextExtractionPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.TextExtractionPredictionResult"
    }
}

/// Prediction output format for Text Sentiment
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct TextSentimentPredictionResult {
    /// The integer sentiment labels between 0 (inclusive) and sentimentMax label
    /// (inclusive), while 0 maps to the least positive sentiment and
    /// sentimentMax maps to the most positive one. The higher the score is, the
    /// more positive the sentiment in the text snippet is. Note: sentimentMax is
    /// an integer value between 1 (inclusive) and 10 (inclusive).
    pub sentiment: i32,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl TextSentimentPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [sentiment][crate::model::TextSentimentPredictionResult::sentiment].
    pub fn set_sentiment<T: std::convert::Into<i32>>(mut self, v: T) -> Self {
        self.sentiment = v.into();
        self
    }
}

impl wkt::message::Message for TextSentimentPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.TextSentimentPredictionResult"
    }
}

/// Prediction output format for Video Action Recognition.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoActionRecognitionPredictionResult {
    /// The resource ID of the AnnotationSpec that had been identified.
    pub id: std::string::String,

    /// The display name of the AnnotationSpec that had been identified.
    pub display_name: std::string::String,

    /// The beginning, inclusive, of the video's time segment in which the
    /// AnnotationSpec has been identified. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end.
    pub time_segment_start: std::option::Option<wkt::Duration>,

    /// The end, exclusive, of the video's time segment in which the
    /// AnnotationSpec has been identified. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end.
    pub time_segment_end: std::option::Option<wkt::Duration>,

    /// The Model's confidence in correction of this prediction, higher
    /// value means higher confidence.
    pub confidence: std::option::Option<wkt::FloatValue>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoActionRecognitionPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [id][crate::model::VideoActionRecognitionPredictionResult::id].
    pub fn set_id<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.id = v.into();
        self
    }

    /// Sets the value of [display_name][crate::model::VideoActionRecognitionPredictionResult::display_name].
    pub fn set_display_name<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.display_name = v.into();
        self
    }

    /// Sets the value of [time_segment_start][crate::model::VideoActionRecognitionPredictionResult::time_segment_start].
    pub fn set_time_segment_start<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_start][crate::model::VideoActionRecognitionPredictionResult::time_segment_start].
    pub fn set_or_clear_time_segment_start<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_segment_end][crate::model::VideoActionRecognitionPredictionResult::time_segment_end].
    pub fn set_time_segment_end<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_end][crate::model::VideoActionRecognitionPredictionResult::time_segment_end].
    pub fn set_or_clear_time_segment_end<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::VideoActionRecognitionPredictionResult::confidence].
    pub fn set_confidence<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [confidence][crate::model::VideoActionRecognitionPredictionResult::confidence].
    pub fn set_or_clear_confidence<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoActionRecognitionPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.VideoActionRecognitionPredictionResult"
    }
}

/// Prediction output format for Video Classification.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoClassificationPredictionResult {
    /// The resource ID of the AnnotationSpec that had been identified.
    pub id: std::string::String,

    /// The display name of the AnnotationSpec that had been identified.
    pub display_name: std::string::String,

    /// The type of the prediction. The requested types can be configured
    /// via parameters. This will be one of
    ///
    /// - segment-classification
    /// - shot-classification
    /// - one-sec-interval-classification
    pub r#type: std::string::String,

    /// The beginning, inclusive, of the video's time segment in which the
    /// AnnotationSpec has been identified. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end. Note that for
    /// 'segment-classification' prediction type, this equals the original
    /// 'timeSegmentStart' from the input instance, for other types it is the
    /// start of a shot or a 1 second interval respectively.
    pub time_segment_start: std::option::Option<wkt::Duration>,

    /// The end, exclusive, of the video's time segment in which the
    /// AnnotationSpec has been identified. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end. Note that for
    /// 'segment-classification' prediction type, this equals the original
    /// 'timeSegmentEnd' from the input instance, for other types it is the end
    /// of a shot or a 1 second interval respectively.
    pub time_segment_end: std::option::Option<wkt::Duration>,

    /// The Model's confidence in correction of this prediction, higher
    /// value means higher confidence.
    pub confidence: std::option::Option<wkt::FloatValue>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoClassificationPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [id][crate::model::VideoClassificationPredictionResult::id].
    pub fn set_id<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.id = v.into();
        self
    }

    /// Sets the value of [display_name][crate::model::VideoClassificationPredictionResult::display_name].
    pub fn set_display_name<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.display_name = v.into();
        self
    }

    /// Sets the value of [r#type][crate::model::VideoClassificationPredictionResult::type].
    pub fn set_type<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.r#type = v.into();
        self
    }

    /// Sets the value of [time_segment_start][crate::model::VideoClassificationPredictionResult::time_segment_start].
    pub fn set_time_segment_start<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_start][crate::model::VideoClassificationPredictionResult::time_segment_start].
    pub fn set_or_clear_time_segment_start<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_segment_end][crate::model::VideoClassificationPredictionResult::time_segment_end].
    pub fn set_time_segment_end<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_end][crate::model::VideoClassificationPredictionResult::time_segment_end].
    pub fn set_or_clear_time_segment_end<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::VideoClassificationPredictionResult::confidence].
    pub fn set_confidence<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [confidence][crate::model::VideoClassificationPredictionResult::confidence].
    pub fn set_or_clear_confidence<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = v.map(|x| x.into());
        self
    }
}

impl wkt::message::Message for VideoClassificationPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.VideoClassificationPredictionResult"
    }
}

/// Prediction output format for Video Object Tracking.
#[derive(Clone, Default, PartialEq)]
#[non_exhaustive]
pub struct VideoObjectTrackingPredictionResult {
    /// The resource ID of the AnnotationSpec that had been identified.
    pub id: std::string::String,

    /// The display name of the AnnotationSpec that had been identified.
    pub display_name: std::string::String,

    /// The beginning, inclusive, of the video's time segment in which the
    /// object instance has been detected. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end.
    pub time_segment_start: std::option::Option<wkt::Duration>,

    /// The end, inclusive, of the video's time segment in which the
    /// object instance has been detected. Expressed as a number of seconds as
    /// measured from the start of the video, with fractions up to a microsecond
    /// precision, and with "s" appended at the end.
    pub time_segment_end: std::option::Option<wkt::Duration>,

    /// The Model's confidence in correction of this prediction, higher
    /// value means higher confidence.
    pub confidence: std::option::Option<wkt::FloatValue>,

    /// All of the frames of the video in which a single object instance has been
    /// detected. The bounding boxes in the frames identify the same object.
    pub frames: std::vec::Vec<crate::model::video_object_tracking_prediction_result::Frame>,

    pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
}

impl VideoObjectTrackingPredictionResult {
    pub fn new() -> Self {
        std::default::Default::default()
    }

    /// Sets the value of [id][crate::model::VideoObjectTrackingPredictionResult::id].
    pub fn set_id<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.id = v.into();
        self
    }

    /// Sets the value of [display_name][crate::model::VideoObjectTrackingPredictionResult::display_name].
    pub fn set_display_name<T: std::convert::Into<std::string::String>>(mut self, v: T) -> Self {
        self.display_name = v.into();
        self
    }

    /// Sets the value of [time_segment_start][crate::model::VideoObjectTrackingPredictionResult::time_segment_start].
    pub fn set_time_segment_start<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_start][crate::model::VideoObjectTrackingPredictionResult::time_segment_start].
    pub fn set_or_clear_time_segment_start<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_start = v.map(|x| x.into());
        self
    }

    /// Sets the value of [time_segment_end][crate::model::VideoObjectTrackingPredictionResult::time_segment_end].
    pub fn set_time_segment_end<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [time_segment_end][crate::model::VideoObjectTrackingPredictionResult::time_segment_end].
    pub fn set_or_clear_time_segment_end<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::Duration>,
    {
        self.time_segment_end = v.map(|x| x.into());
        self
    }

    /// Sets the value of [confidence][crate::model::VideoObjectTrackingPredictionResult::confidence].
    pub fn set_confidence<T>(mut self, v: T) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = std::option::Option::Some(v.into());
        self
    }

    /// Sets or clears the value of [confidence][crate::model::VideoObjectTrackingPredictionResult::confidence].
    pub fn set_or_clear_confidence<T>(mut self, v: std::option::Option<T>) -> Self
    where
        T: std::convert::Into<wkt::FloatValue>,
    {
        self.confidence = v.map(|x| x.into());
        self
    }

    /// Sets the value of [frames][crate::model::VideoObjectTrackingPredictionResult::frames].
    pub fn set_frames<T, V>(mut self, v: T) -> Self
    where
        T: std::iter::IntoIterator<Item = V>,
        V: std::convert::Into<crate::model::video_object_tracking_prediction_result::Frame>,
    {
        use std::iter::Iterator;
        self.frames = v.into_iter().map(|i| i.into()).collect();
        self
    }
}

impl wkt::message::Message for VideoObjectTrackingPredictionResult {
    fn typename() -> &'static str {
        "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.VideoObjectTrackingPredictionResult"
    }
}

/// Defines additional types related to [VideoObjectTrackingPredictionResult].
pub mod video_object_tracking_prediction_result {
    #[allow(unused_imports)]
    use super::*;

    /// The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box,
    /// i.e. the rectangle over the video frame pinpointing the found
    /// AnnotationSpec. The coordinates are relative to the frame size, and the
    /// point 0,0 is in the top left of the frame.
    #[derive(Clone, Default, PartialEq)]
    #[non_exhaustive]
    pub struct Frame {
        /// A time (frame) of a video in which the object has been detected.
        /// Expressed as a number of seconds as measured from the
        /// start of the video, with fractions up to a microsecond precision, and
        /// with "s" appended at the end.
        pub time_offset: std::option::Option<wkt::Duration>,

        /// The leftmost coordinate of the bounding box.
        pub x_min: std::option::Option<wkt::FloatValue>,

        /// The rightmost coordinate of the bounding box.
        pub x_max: std::option::Option<wkt::FloatValue>,

        /// The topmost coordinate of the bounding box.
        pub y_min: std::option::Option<wkt::FloatValue>,

        /// The bottommost coordinate of the bounding box.
        pub y_max: std::option::Option<wkt::FloatValue>,

        pub(crate) _unknown_fields: serde_json::Map<std::string::String, serde_json::Value>,
    }

    impl Frame {
        pub fn new() -> Self {
            std::default::Default::default()
        }

        /// Sets the value of [time_offset][crate::model::video_object_tracking_prediction_result::Frame::time_offset].
        pub fn set_time_offset<T>(mut self, v: T) -> Self
        where
            T: std::convert::Into<wkt::Duration>,
        {
            self.time_offset = std::option::Option::Some(v.into());
            self
        }

        /// Sets or clears the value of [time_offset][crate::model::video_object_tracking_prediction_result::Frame::time_offset].
        pub fn set_or_clear_time_offset<T>(mut self, v: std::option::Option<T>) -> Self
        where
            T: std::convert::Into<wkt::Duration>,
        {
            self.time_offset = v.map(|x| x.into());
            self
        }

        /// Sets the value of [x_min][crate::model::video_object_tracking_prediction_result::Frame::x_min].
        pub fn set_x_min<T>(mut self, v: T) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.x_min = std::option::Option::Some(v.into());
            self
        }

        /// Sets or clears the value of [x_min][crate::model::video_object_tracking_prediction_result::Frame::x_min].
        pub fn set_or_clear_x_min<T>(mut self, v: std::option::Option<T>) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.x_min = v.map(|x| x.into());
            self
        }

        /// Sets the value of [x_max][crate::model::video_object_tracking_prediction_result::Frame::x_max].
        pub fn set_x_max<T>(mut self, v: T) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.x_max = std::option::Option::Some(v.into());
            self
        }

        /// Sets or clears the value of [x_max][crate::model::video_object_tracking_prediction_result::Frame::x_max].
        pub fn set_or_clear_x_max<T>(mut self, v: std::option::Option<T>) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.x_max = v.map(|x| x.into());
            self
        }

        /// Sets the value of [y_min][crate::model::video_object_tracking_prediction_result::Frame::y_min].
        pub fn set_y_min<T>(mut self, v: T) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.y_min = std::option::Option::Some(v.into());
            self
        }

        /// Sets or clears the value of [y_min][crate::model::video_object_tracking_prediction_result::Frame::y_min].
        pub fn set_or_clear_y_min<T>(mut self, v: std::option::Option<T>) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.y_min = v.map(|x| x.into());
            self
        }

        /// Sets the value of [y_max][crate::model::video_object_tracking_prediction_result::Frame::y_max].
        pub fn set_y_max<T>(mut self, v: T) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.y_max = std::option::Option::Some(v.into());
            self
        }

        /// Sets or clears the value of [y_max][crate::model::video_object_tracking_prediction_result::Frame::y_max].
        pub fn set_or_clear_y_max<T>(mut self, v: std::option::Option<T>) -> Self
        where
            T: std::convert::Into<wkt::FloatValue>,
        {
            self.y_max = v.map(|x| x.into());
            self
        }
    }

    impl wkt::message::Message for Frame {
        fn typename() -> &'static str {
            "type.googleapis.com/google.cloud.aiplatform.v1.schema.predict.prediction.VideoObjectTrackingPredictionResult.Frame"
        }
    }
}
