using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes.DomainAttributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Enums;
using System.Collections.Generic;
using System.ComponentModel;

namespace Baci.ArcGIS._ImageAnalystTools._DeepLearning
{
    /// <summary>
    /// <para>Train Deep Learning Model</para>
    /// <para>Trains a deep learning model using the output from the Export Training Data For Deep Learning tool.</para>
    /// <para>使用导出深度学习训练数据工具的输出训练深度学习模型。</para>
    /// </summary>    
    [DisplayName("Train Deep Learning Model")]
    public class TrainDeepLearningModel : AbstractGPProcess
    {
        /// <summary>
        /// 无参构造
        /// </summary>
        public TrainDeepLearningModel()
        {

        }

        /// <summary>
        /// 有参构造
        /// </summary>
        /// <param name="_in_folder">
        /// <para>Input Training Data</para>
        /// <para><xdoc>
        ///   <para>The folder containing the image chips, labels, and statistics required to train the model. This is the output from the Export Training Data For Deep Learning tool.</para>
        ///   <para>To train a model, the input images must be 8-bit rasters with three bands.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>包含训练模型所需的图像芯片、标签和统计信息的文件夹。这是导出深度学习训练数据工具的输出。</para>
        ///   <para>若要训练模型，输入图像必须是具有三个波段的 8 位栅格。</para>
        /// </xdoc></para>
        /// </param>
        /// <param name="_out_folder">
        /// <para>Output Model</para>
        /// <para>The output folder location that will store the trained model.</para>
        /// <para>将存储已训练模型的输出文件夹位置。</para>
        /// </param>
        public TrainDeepLearningModel(object _in_folder, object _out_folder)
        {
            this._in_folder = _in_folder;
            this._out_folder = _out_folder;
        }
        public override string ToolboxName => "Image Analyst Tools";

        public override string ToolName => "Train Deep Learning Model";

        public override string CallName => "ia.TrainDeepLearningModel";

        public override List<string> AcceptEnvironments => ["extent", "gpuID", "parallelProcessingFactor", "processorType", "scratchWorkspace", "workspace"];

        public override object[] ParameterInfo => [_in_folder, _out_folder, _max_epochs, _model_type.GetGPValue(), _batch_size, _arguments, _learning_rate, _backbone_model.GetGPValue(), _pretrained_model, _validation_percentage, _stop_training.GetGPValue(), _out_model_file, _freeze.GetGPValue()];

        /// <summary>
        /// <para>Input Training Data</para>
        /// <para><xdoc>
        ///   <para>The folder containing the image chips, labels, and statistics required to train the model. This is the output from the Export Training Data For Deep Learning tool.</para>
        ///   <para>To train a model, the input images must be 8-bit rasters with three bands.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>包含训练模型所需的图像芯片、标签和统计信息的文件夹。这是导出深度学习训练数据工具的输出。</para>
        ///   <para>若要训练模型，输入图像必须是具有三个波段的 8 位栅格。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Training Data")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _in_folder { get; set; }


        /// <summary>
        /// <para>Output Model</para>
        /// <para>The output folder location that will store the trained model.</para>
        /// <para>将存储已训练模型的输出文件夹位置。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output Model")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _out_folder { get; set; }


        /// <summary>
        /// <para>Max Epochs</para>
        /// <para>The maximum number of epochs for which the model will be trained. A maximum epoch of one means the dataset will be passed forward and backward through the neural network one time. The default value is 20.</para>
        /// <para>将训练模型的最大 epoch 数。最大 epoch 为 1 意味着数据集将通过神经网络向前和向后传递一次。默认值为 20。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Max Epochs")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _max_epochs { get; set; } = 20;


        /// <summary>
        /// <para>Model Type</para>
        /// <para><xdoc>
        ///   <para>Specifies the model type that will be used to train the deep learning model.</para>
        ///   <bulletList>
        ///     <bullet_item>Single Shot Detector (Object detection)—The Single Shot Detector (SSD) approach will be used to train the model. SSD is used for object detection. The input training data for this model type uses the Pascal Visual Object Classes metadata format.</bullet_item><para/>
        ///     <bullet_item>U-Net (Pixel classification)—The U-Net approach will be used to train the model. U-Net is used for pixel classification.</bullet_item><para/>
        ///     <bullet_item>Feature classifier (Object classification)—The Feature Classifier approach will be used to train the model. This is used for object or image classification.</bullet_item><para/>
        ///     <bullet_item>Pyramid Scene Parsing Network (Pixel classification)—The Pyramid Scene Parsing Network (PSPNET) approach will be used to train the model. PSPNET is used for pixel classification.</bullet_item><para/>
        ///     <bullet_item>RetinaNet (Object detection)—The RetinaNet approach will be used to train the model. RetinaNet is used for object detection. The input training data for this model type uses the Pascal Visual Object Classes metadata format.</bullet_item><para/>
        ///     <bullet_item>MaskRCNN (Object detection)—The MaskRCNN approach will be used to train the model. MaskRCNN is used for object detection. This approach is used for instance segmentation, which is precise delineation of objects in an image. This model type can be used to detect building footprints. It uses the MaskRCNN metadata format for training data as input. Class values for input training data must start at 1. This model type can only be trained using a CUDA-enabled GPU.</bullet_item><para/>
        ///     <bullet_item>YOLOv3 (Object detection)—The YOLOv3 approach will be used to train the model. YOLOv3 is used for object detection.</bullet_item><para/>
        ///     <bullet_item>DeepLabV3 (Pixel classification)—The DeepLabV3 approach will be used to train the model. DeepLab is used for pixel classification.</bullet_item><para/>
        ///     <bullet_item>FasterRCNN (Object detection)—The FasterRCNN approach will be used to train the model. FasterRCNN is used for object detection.</bullet_item><para/>
        ///     <bullet_item>BDCN Edge Detector (Pixel classification)— The Bi-Directional Cascade Network (BDCN) architecture will be used to train the model. The BDCN Edge Detector is used for pixel classification. This approach is useful to improve edge detection for objects at different scales.</bullet_item><para/>
        ///     <bullet_item>HED Edge Detector (Pixel classification)— The Holistically-Nested Edge Detection (HED) architecture will be used to train the model. The HED Edge Detector is used for pixel classification. This approach is useful to in edge and object boundary detection.</bullet_item><para/>
        ///     <bullet_item>Multi Task Road Extractor (Pixel classification)— The Multi Task Road Extractor architecture will be used to train the model. The Multi Task Road Extractor is used for pixel classification. This approach is useful for road network extraction from satellite imagery.</bullet_item><para/>
        ///     <bullet_item>ConnectNet (Pixel classification)—The ConnectNet architecture will be used to train the model. ConnectNet is used for pixel classification. This approach is useful for road network extraction from satellite imagery.</bullet_item><para/>
        ///     <bullet_item>Pix2Pix (Image translation)—The Pix2Pix approach will be used to train the model. Pix2Pix is used for image to image translation. This approach creates a model object that generates images of one type to another. The input training data for this model type uses the Export Tiles metadata format.</bullet_item><para/>
        ///     <bullet_item>CycleGAN (Image translation)—The CycleGAN approach will be used to train the model. CycleGAN is used for image-to-image translation. This approach creates a model object that generates images of one type to another. This approach is unique in that the images to be trained do not need to overlap. The input training data for this model type uses the CycleGAN metadata format.</bullet_item><para/>
        ///     <bullet_item>Super-resolution (Image translation)—The super-resolution approach will be used to train the model. Super-resolution is used for image-to-image translation. This approach creates a model object that increases the resolution and improves the quality of images. The input training data for this model type uses the Export Tiles metadata format.</bullet_item><para/>
        ///     <bullet_item>Change detector (Pixel Classification)—The Change Detector approach will be used to train the model. Change Detector is used for pixel classification. This approach creates a model object that uses two spatial-temporal images to create a classified raster of the change. The input training data for this model type uses the Classified Tiles metadata format.</bullet_item><para/>
        ///     <bullet_item>Image captioner (Image Translation)—The Image Captioner approach will be used to train the model. Image Captioner is used for image-to-text translation. This approach creates a model that generates text captions for an image.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定将用于训练深度学习模型的模型类型。</para>
        ///   <bulletList>
        ///     <bullet_item>单次检测器（对象检测）—单次检测器 （SSD） 方法将用于训练模型。SSD 用于对象检测。此模型类型的输入训练数据使用 Pascal Visual Object Classes 元数据格式。</bullet_item><para/>
        ///     <bullet_item>U-Net（像素分类）—将使用 U-Net 方法训练模型。U-Net 用于像素分类。</bullet_item><para/>
        ///     <bullet_item>要素分类器（对象分类）—将使用要素分类器方法训练模型。这用于对象或图像分类。</bullet_item><para/>
        ///     <bullet_item>金字塔场景解析网络（像素分类）—金字塔场景解析网络 （PSPNET） 方法将用于训练模型。PSPNET 用于像素分类。</bullet_item><para/>
        ///     <bullet_item>RetinaNet（对象检测）—将使用 RetinaNet 方法训练模型。RetinaNet 用于对象检测。此模型类型的输入训练数据使用 Pascal Visual Object Classes 元数据格式。</bullet_item><para/>
        ///     <bullet_item>MaskRCNN（对象检测）—将使用 MaskRCNN 方法训练模型。MaskRCNN 用于对象检测。此方法用于实例分割，即精确描绘图像中的对象。此模型类型可用于检测建筑物覆盖区。它使用 MaskRCNN 元数据格式作为训练数据作为输入。输入训练数据的类值必须从 1 开始。此模型类型只能使用启用了 CUDA 的 GPU 进行训练。</bullet_item><para/>
        ///     <bullet_item>YOLOv3 （对象检测） - YOLOv3 方法将用于训练模型。YOLOv3 用于对象检测。</bullet_item><para/>
        ///     <bullet_item>DeepLabV3（像素分类）—将使用 DeepLabV3 方法训练模型。DeepLab 用于像素分类。</bullet_item><para/>
        ///     <bullet_item>FasterRCNN（对象检测）—将使用 FasterRCNN 方法训练模型。FasterRCNN 用于对象检测。</bullet_item><para/>
        ///     <bullet_item>BDCN 边缘检测器（像素分类）— 双向级联网络 （BDCN） 架构将用于训练模型。BDCN边缘检测器用于像素分类。这种方法可用于改进不同比例下物体的边缘检测。</bullet_item><para/>
        ///     <bullet_item>HED 边缘检测器（像素分类）— 整体嵌套边缘检测 （HED） 架构将用于训练模型。HED 边缘检测器用于像素分类。此方法可用于边缘和物体边界检测。</bullet_item><para/>
        ///     <bullet_item>Multi Task Road Extractor （Pixel classification）— 多任务 Road Extractor 架构将用于训练模型。多任务道路提取器用于像素分类。这种方法可用于从卫星影像中提取道路网络。</bullet_item><para/>
        ///     <bullet_item>ConnectNet（像素分类）—将使用 ConnectNet 架构来训练模型。ConnectNet 用于像素分类。这种方法可用于从卫星影像中提取道路网络。</bullet_item><para/>
        ///     <bullet_item>Pix2Pix（图像平移）—将使用 Pix2Pix 方法训练模型。Pix2Pix 用于图像到图像的转换。此方法创建一个模型对象，该对象将一种类型的图像生成到另一种类型的图像。此模型类型的输入训练数据使用导出切片元数据格式。</bullet_item><para/>
        ///     <bullet_item>CycleGAN（图像翻译）—将使用 CycleGAN 方法训练模型。CycleGAN 用于图像到图像的转换。此方法创建一个模型对象，该对象将一种类型的图像生成到另一种类型的图像。这种方法的独特之处在于要训练的图像不需要重叠。此模型类型的输入训练数据使用 CycleGAN 元数据格式。</bullet_item><para/>
        ///     <bullet_item>超分辨率（图像平移）—超分辨率方法将用于训练模型。超分辨率用于图像到图像的转换。此方法创建一个模型对象，该对象可提高分辨率并改善图像质量。此模型类型的输入训练数据使用导出切片元数据格式。</bullet_item><para/>
        ///     <bullet_item>变化检测器（像素分类）—变化检测器方法将用于训练模型。变化检测器用于像素分类。此方法创建一个模型对象，该对象使用两个时空影像来创建更改的分类栅格。此模型类型的输入训练数据使用分类切片元数据格式。</bullet_item><para/>
        ///     <bullet_item>Image captioner （Image Translation） - 图像描述器方法将用于训练模型。Image Captioner 用于图像到文本的翻译。此方法创建一个模型，用于生成图像的文本标题。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Model Type")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _model_type_value? _model_type { get; set; } = null;

        public enum _model_type_value
        {
            /// <summary>
            /// <para>Single Shot Detector (Object detection)</para>
            /// <para>Single Shot Detector (Object detection)—The Single Shot Detector (SSD) approach will be used to train the model. SSD is used for object detection. The input training data for this model type uses the Pascal Visual Object Classes metadata format.</para>
            /// <para>单次检测器（对象检测）—单次检测器 （SSD） 方法将用于训练模型。SSD 用于对象检测。此模型类型的输入训练数据使用 Pascal Visual Object Classes 元数据格式。</para>
            /// </summary>
            [Description("Single Shot Detector (Object detection)")]
            [GPEnumValue("SSD")]
            _SSD,

            /// <summary>
            /// <para>U-Net (Pixel classification)</para>
            /// <para>U-Net (Pixel classification)—The U-Net approach will be used to train the model. U-Net is used for pixel classification.</para>
            /// <para>U-Net（像素分类）—将使用 U-Net 方法训练模型。U-Net 用于像素分类。</para>
            /// </summary>
            [Description("U-Net (Pixel classification)")]
            [GPEnumValue("UNET")]
            _UNET,

            /// <summary>
            /// <para>Feature classifier (Object classification)</para>
            /// <para>Feature classifier (Object classification)—The Feature Classifier approach will be used to train the model. This is used for object or image classification.</para>
            /// <para>要素分类器（对象分类）—将使用要素分类器方法训练模型。这用于对象或图像分类。</para>
            /// </summary>
            [Description("Feature classifier (Object classification)")]
            [GPEnumValue("FEATURE_CLASSIFIER")]
            _FEATURE_CLASSIFIER,

            /// <summary>
            /// <para>Pyramid Scene Parsing Network (Pixel classification)</para>
            /// <para>Pyramid Scene Parsing Network (Pixel classification)—The Pyramid Scene Parsing Network (PSPNET) approach will be used to train the model. PSPNET is used for pixel classification.</para>
            /// <para>金字塔场景解析网络（像素分类）—金字塔场景解析网络 （PSPNET） 方法将用于训练模型。PSPNET 用于像素分类。</para>
            /// </summary>
            [Description("Pyramid Scene Parsing Network (Pixel classification)")]
            [GPEnumValue("PSPNET")]
            _PSPNET,

            /// <summary>
            /// <para>RetinaNet (Object detection)</para>
            /// <para>RetinaNet (Object detection)—The RetinaNet approach will be used to train the model. RetinaNet is used for object detection. The input training data for this model type uses the Pascal Visual Object Classes metadata format.</para>
            /// <para>RetinaNet（对象检测）—将使用 RetinaNet 方法训练模型。RetinaNet 用于对象检测。此模型类型的输入训练数据使用 Pascal Visual Object Classes 元数据格式。</para>
            /// </summary>
            [Description("RetinaNet (Object detection)")]
            [GPEnumValue("RETINANET")]
            _RETINANET,

            /// <summary>
            /// <para>MaskRCNN (Object detection)</para>
            /// <para>MaskRCNN (Object detection)—The MaskRCNN approach will be used to train the model. MaskRCNN is used for object detection. This approach is used for instance segmentation, which is precise delineation of objects in an image. This model type can be used to detect building footprints. It uses the MaskRCNN metadata format for training data as input. Class values for input training data must start at 1. This model type can only be trained using a CUDA-enabled GPU.</para>
            /// <para>MaskRCNN（对象检测）—将使用 MaskRCNN 方法训练模型。MaskRCNN 用于对象检测。此方法用于实例分割，即精确描绘图像中的对象。此模型类型可用于检测建筑物覆盖区。它使用 MaskRCNN 元数据格式作为训练数据作为输入。输入训练数据的类值必须从 1 开始。此模型类型只能使用启用了 CUDA 的 GPU 进行训练。</para>
            /// </summary>
            [Description("MaskRCNN (Object detection)")]
            [GPEnumValue("MASKRCNN")]
            _MASKRCNN,

            /// <summary>
            /// <para>YOLOv3 (Object detection)</para>
            /// <para>YOLOv3 (Object detection)—The YOLOv3 approach will be used to train the model. YOLOv3 is used for object detection.</para>
            /// <para>YOLOv3 （对象检测） - YOLOv3 方法将用于训练模型。YOLOv3 用于对象检测。</para>
            /// </summary>
            [Description("YOLOv3 (Object detection)")]
            [GPEnumValue("YOLOV3")]
            _YOLOV3,

            /// <summary>
            /// <para>DeepLabV3 (Pixel classification)</para>
            /// <para>DeepLabV3 (Pixel classification)—The DeepLabV3 approach will be used to train the model. DeepLab is used for pixel classification.</para>
            /// <para>DeepLabV3（像素分类）—将使用 DeepLabV3 方法训练模型。DeepLab 用于像素分类。</para>
            /// </summary>
            [Description("DeepLabV3 (Pixel classification)")]
            [GPEnumValue("DEEPLAB")]
            _DEEPLAB,

            /// <summary>
            /// <para>FasterRCNN (Object detection)</para>
            /// <para>FasterRCNN (Object detection)—The FasterRCNN approach will be used to train the model. FasterRCNN is used for object detection.</para>
            /// <para>FasterRCNN（对象检测）—将使用 FasterRCNN 方法训练模型。FasterRCNN 用于对象检测。</para>
            /// </summary>
            [Description("FasterRCNN (Object detection)")]
            [GPEnumValue("FASTERRCNN")]
            _FASTERRCNN,

            /// <summary>
            /// <para>BDCN Edge Detector (Pixel classification)</para>
            /// <para>BDCN Edge Detector (Pixel classification)— The Bi-Directional Cascade Network (BDCN) architecture will be used to train the model. The BDCN Edge Detector is used for pixel classification. This approach is useful to improve edge detection for objects at different scales.</para>
            /// <para>BDCN 边缘检测器（像素分类）— 双向级联网络 （BDCN） 架构将用于训练模型。BDCN边缘检测器用于像素分类。这种方法可用于改进不同比例下物体的边缘检测。</para>
            /// </summary>
            [Description("BDCN Edge Detector (Pixel classification)")]
            [GPEnumValue("BDCN_EDGEDETECTOR")]
            _BDCN_EDGEDETECTOR,

            /// <summary>
            /// <para>HED Edge Detector (Pixel classification)</para>
            /// <para>HED Edge Detector (Pixel classification)— The Holistically-Nested Edge Detection (HED) architecture will be used to train the model. The HED Edge Detector is used for pixel classification. This approach is useful to in edge and object boundary detection.</para>
            /// <para>HED 边缘检测器（像素分类）— 整体嵌套边缘检测 （HED） 架构将用于训练模型。HED 边缘检测器用于像素分类。此方法可用于边缘和物体边界检测。</para>
            /// </summary>
            [Description("HED Edge Detector (Pixel classification)")]
            [GPEnumValue("HED_EDGEDETECTOR")]
            _HED_EDGEDETECTOR,

            /// <summary>
            /// <para>Multi Task Road Extractor (Pixel classification)</para>
            /// <para>Multi Task Road Extractor (Pixel classification)— The Multi Task Road Extractor architecture will be used to train the model. The Multi Task Road Extractor is used for pixel classification. This approach is useful for road network extraction from satellite imagery.</para>
            /// <para>Multi Task Road Extractor （Pixel classification）— 多任务 Road Extractor 架构将用于训练模型。多任务道路提取器用于像素分类。这种方法可用于从卫星影像中提取道路网络。</para>
            /// </summary>
            [Description("Multi Task Road Extractor (Pixel classification)")]
            [GPEnumValue("MULTITASK_ROADEXTRACTOR")]
            _MULTITASK_ROADEXTRACTOR,

            /// <summary>
            /// <para>Pix2Pix (Image translation)</para>
            /// <para>Pix2Pix (Image translation)—The Pix2Pix approach will be used to train the model. Pix2Pix is used for image to image translation. This approach creates a model object that generates images of one type to another. The input training data for this model type uses the Export Tiles metadata format.</para>
            /// <para>Pix2Pix（图像平移）—将使用 Pix2Pix 方法训练模型。Pix2Pix 用于图像到图像的转换。此方法创建一个模型对象，该对象将一种类型的图像生成到另一种类型的图像。此模型类型的输入训练数据使用导出切片元数据格式。</para>
            /// </summary>
            [Description("Pix2Pix (Image translation)")]
            [GPEnumValue("PIX2PIX")]
            _PIX2PIX,

            /// <summary>
            /// <para>CycleGAN (Image translation)</para>
            /// <para>CycleGAN (Image translation)—The CycleGAN approach will be used to train the model. CycleGAN is used for image-to-image translation. This approach creates a model object that generates images of one type to another. This approach is unique in that the images to be trained do not need to overlap. The input training data for this model type uses the CycleGAN metadata format.</para>
            /// <para>CycleGAN（图像翻译）—将使用 CycleGAN 方法训练模型。CycleGAN 用于图像到图像的转换。此方法创建一个模型对象，该对象将一种类型的图像生成到另一种类型的图像。这种方法的独特之处在于要训练的图像不需要重叠。此模型类型的输入训练数据使用 CycleGAN 元数据格式。</para>
            /// </summary>
            [Description("CycleGAN (Image translation)")]
            [GPEnumValue("CYCLEGAN")]
            _CYCLEGAN,

            /// <summary>
            /// <para>Super-resolution (Image translation)</para>
            /// <para>Super-resolution (Image translation)—The super-resolution approach will be used to train the model. Super-resolution is used for image-to-image translation. This approach creates a model object that increases the resolution and improves the quality of images. The input training data for this model type uses the Export Tiles metadata format.</para>
            /// <para>超分辨率（图像平移）—超分辨率方法将用于训练模型。超分辨率用于图像到图像的转换。此方法创建一个模型对象，该对象可提高分辨率并改善图像质量。此模型类型的输入训练数据使用导出切片元数据格式。</para>
            /// </summary>
            [Description("Super-resolution (Image translation)")]
            [GPEnumValue("SUPERRESOLUTION")]
            _SUPERRESOLUTION,

            /// <summary>
            /// <para>Change detector (Pixel Classification)</para>
            /// <para>Change detector (Pixel Classification)—The Change Detector approach will be used to train the model. Change Detector is used for pixel classification. This approach creates a model object that uses two spatial-temporal images to create a classified raster of the change. The input training data for this model type uses the Classified Tiles metadata format.</para>
            /// <para>变化检测器（像素分类）—变化检测器方法将用于训练模型。变化检测器用于像素分类。此方法创建一个模型对象，该对象使用两个时空影像来创建更改的分类栅格。此模型类型的输入训练数据使用分类切片元数据格式。</para>
            /// </summary>
            [Description("Change detector (Pixel Classification)")]
            [GPEnumValue("CHANGEDETECTOR")]
            _CHANGEDETECTOR,

            /// <summary>
            /// <para>Image captioner (Image Translation)</para>
            /// <para>Image captioner (Image Translation)—The Image Captioner approach will be used to train the model. Image Captioner is used for image-to-text translation. This approach creates a model that generates text captions for an image.</para>
            /// <para>Image captioner （Image Translation） - 图像描述器方法将用于训练模型。Image Captioner 用于图像到文本的翻译。此方法创建一个模型，用于生成图像的文本标题。</para>
            /// </summary>
            [Description("Image captioner (Image Translation)")]
            [GPEnumValue("IMAGECAPTIONER")]
            _IMAGECAPTIONER,

            /// <summary>
            /// <para>ConnectNet (Pixel classification)</para>
            /// <para>ConnectNet (Pixel classification)—The ConnectNet architecture will be used to train the model. ConnectNet is used for pixel classification. This approach is useful for road network extraction from satellite imagery.</para>
            /// <para>ConnectNet（像素分类）—将使用 ConnectNet 架构来训练模型。ConnectNet 用于像素分类。这种方法可用于从卫星影像中提取道路网络。</para>
            /// </summary>
            [Description("ConnectNet (Pixel classification)")]
            [GPEnumValue("CONNECTNET")]
            _CONNECTNET,

        }

        /// <summary>
        /// <para>Batch Size</para>
        /// <para><xdoc>
        ///   <para>The number of training samples to be processed for training at one time. The default value is 2.</para>
        ///   <para>If you have a powerful GPU, this number can be increased to 8, 16, 32, or 64.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>一次要处理的训练样本数。默认值为 2。</para>
        ///   <para>如果您拥有强大的 GPU，则此数字可以增加到 8、16、32 或 64。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Batch Size")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _batch_size { get; set; } = 64;


        /// <summary>
        /// <para>Model Arguments</para>
        /// <para><xdoc>
        ///   <para>The function arguments are defined in the Python raster function class. This is where you list additional deep learning parameters and arguments for experiments and refinement, such as a confidence threshold for adjusting sensitivity. The names of the arguments are populated from reading the Python module.</para>
        ///   <para>When you choose Single Shot Detector as the Model Type parameter value, the Model Arguments parameter will be populated with the following arguments:
        ///   <bulletList>
        ///     <bullet_item>grids—The number of grids the image will be divided into for processing. Setting this argument to 4 means the image will be divided into 4 x 4 or 16 grid cells. If no value is specified, the optimal grid value will be calculated based on the input imagery.  </bullet_item><para/>
        ///     <bullet_item>zooms—The number of zoom levels each grid cell will be scaled up or down. Setting this argument to 1 means all the grid cells will remain at the same size or zoom level. A zoom level of 2 means all the grid cells will become twice as large (zoomed in 100 percent). Providing a list of zoom levels means all the grid cells will be scaled using all the numbers in the list. The default is 1.0.  </bullet_item><para/>
        ///     <bullet_item>ratios—The list of aspect ratios to use for the anchor boxes. In object detection, an anchor box represents the ideal location, shape, and size of the object being predicted. Setting this argument to [1.0,1.0], [1.0, 0.5] means the anchor box is a square (1:1) or a rectangle in which the horizontal side is half the size of the vertical side (1:0.5). The default is [1.0, 1.0].  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        ///   <para>When you choose a pixel classification model such as Pyramid Scene Parsing Network, U-Net, or DeepLabv3 as the Model Type parameter value, the Model Arguments parameter will be populated with the following arguments:
        ///   <bulletList>
        ///     <bullet_item>use_net—Specifies whether the U-Net decoder will be used to recover data once the pyramid pooling is complete. The default is True. This argument is specific to the Pyramid Scene Parsing Network model.  </bullet_item><para/>
        ///     <bullet_item>pyramid_sizes—The number and size of convolution layers to be applied to the different subregions. The default is [1,2,3,6]. This argument is specific to the Pyramid Scene Parsing Network model.  </bullet_item><para/>
        ///     <bullet_item>mixup—Specifies whether mixup augmentation and mixup loss will be used. The default is False.  </bullet_item><para/>
        ///     <bullet_item>class_balancing—Specifies whether the cross-entropy loss inverse will be balanced to the frequency of pixels per class. The default is False.  </bullet_item><para/>
        ///     <bullet_item>focal_loss—Specifies whether focal loss will be used. The default is False.  </bullet_item><para/>
        ///     <bullet_item>ignore_classes—Contains the list of class values on which the model will not incur loss.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        ///   <para>When you choose RetinaNet as the Model Type parameter value, the Model Arguments parameter will be populated with the following arguments:
        ///   <bulletList>
        ///     <bullet_item>scales—The number of scale levels each cell will be scaled up or down. The default is [1, 0.8, 0.63].  </bullet_item><para/>
        ///     <bullet_item>ratios—The aspect ratio of the anchor box. The default is 0.5,1,2.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        ///   <para>When you choose Multi Task Road Extractor or ConnectNet as the Model Type parameter value, the Model Arguments parameter will be populated with the following arguments:
        ///   <bulletList>
        ///     <bullet_item>gaussian_thresh—Sets the Gaussian threshold, which sets the required road width. The valid range is 0.0 to 1.0. The default is 0.76.  </bullet_item><para/>
        ///     <bullet_item>orient_bin_size—Sets the bin size for orientation angles. The default is 20.  </bullet_item><para/>
        ///     <bullet_item>orient_theta—Sets the width of orientation mask. The default is 8.  </bullet_item><para/>
        ///     <bullet_item>mtl_model—Sets the architecture type that will be used to create the model. Valid choices are linknet or hourglass for linknet-based or hourglass-based, respectively, neural architectures. The default is hourglass.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        ///   <para>When you choose Image Captioner as the Model Type parameter value, the Model Arguments parameter will be populated with the following arguments:The decode_params, are comprised of the following six parameters:
        ///   <bulletList>
        ///     <bullet_item>decode_params—A dictionary that controls how the Image Captioner will run. The default value is {'embed_size':100, 'hidden_size':100, 'attention_size':100, 'teacher_forcing':1, 'dropout':0.1, 'pretrained_emb':False}.  </bullet_item><para/>
        ///     <bullet_item>chip_size—Sets the size of image to train the model. Images are cropped to the specified chip size. If image size is less than chip size, the image size is used. The default size is 224 pixels.  </bullet_item><para/>
        ///   </bulletList>
        ///   <bulletList>
        ///     <bullet_item>embed_size—Sets the embedding size. The default is 100 layers in the neural network.  </bullet_item><para/>
        ///     <bullet_item>hidden_size—Sets the hidden layer size. The default is 100 layers in the neural network.  </bullet_item><para/>
        ///     <bullet_item>attention_size—Sets the intermediate attention layer size . The default is 100 layers in the neural network.  </bullet_item><para/>
        ///     <bullet_item>teacher_forcing—Sets the probability of teacher forcing. Teacher forcing is a strategy for training recurrent neural networks that uses model output from a prior time step as an input, instead of the previous output, during back propagation. Valid ranges is from 0.0 to 1.0. The default is 1.  </bullet_item><para/>
        ///     <bullet_item>dropout—Sets the dropout probability. Valid ranges is from 0.0 to 1.0. The default is 0.1.  </bullet_item><para/>
        ///     <bullet_item>pretrained_emb—Sets the pretrained embedding flag. If True, it will use fast text embedding. If False, it will not use the pretrained text embedding. The default is False.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        ///   <para>When you choose Change Detector as the Model Type parameter value, the Model Arguments parameter will be populated with the following argument:
        ///   <bulletList>
        ///     <bullet_item>attention_type—Specifies the module type. The module choices are PAM (Pyramid Attention Module) or BAM (Basic Attention Module). The default is PAM.  </bullet_item></bulletList>
        ///   </para>
        ///   <para>All model types support the chip_size argument, which is the image chip size of the training samples. The image chip size is extracted from the .emd file from the folder specified in the Input Training Data parameter.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>函数参数在 Python 栅格函数类中定义。在这里，您可以列出用于实验和优化的其他深度学习参数和参数，例如用于调整灵敏度的置信度阈值。参数的名称是通过读取 Python 模块来填充的。</para>
        /// <para>当您选择“单次检测器”作为“模型类型”参数值时，“模型参数”参数将填充以下参数：
        ///   <bulletList>
        ///     <bullet_item>grids - 将图像划分为用于处理的网格数。将此参数设置为 4 意味着图像将被划分为 4 x 4 或 16 个网格单元格。如果未指定任何值，则将根据输入影像计算最佳格网值。 </bullet_item><para/>
        ///     <bullet_item>缩放 - 每个格网像元将按比例放大或缩小的缩放级别数。将此参数设置为 1 意味着所有网格单元格将保持相同的大小或缩放级别。缩放级别为 2 表示所有网格像元将变大两倍（放大 100%）。提供缩放级别列表意味着将使用列表中的所有数字缩放所有网格单元格。默认值为 1.0。 </bullet_item><para/>
        ///     <bullet_item>ratios - 用于锚点框的纵横比列表。在对象检测中，锚点框表示被预测对象的理想位置、形状和大小。将此参数设置为 [1.0,1.0]， [1.0， 0.5] 表示锚框是正方形 （1：1） 或矩形，其中水平边的大小是垂直边 （1：0.5） 的一半。默认值为 [1.0， 1.0]。 </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// <para>当您选择像素分类模型（如金字塔场景解析网络、U-Net 或 DeepLabv3）作为模型类型参数值时，模型参数将填充以下参数：
        ///   <bulletList>
        ///     <bullet_item>use_net - 指定金字塔池化完成后是否使用 U-Net 解码器恢复数据。默认值为 True。此参数特定于金字塔场景解析网络模型。 </bullet_item><para/>
        ///     <bullet_item>pyramid_sizes - 要应用于不同子区域的卷积层的数量和大小。默认值为 [1,2,3,6]。此参数特定于金字塔场景解析网络模型。 </bullet_item><para/>
        ///     <bullet_item>mixup - 指定是否使用混淆增强和混淆丢失。默认值为 False。 </bullet_item><para/>
        ///     <bullet_item>class_balancing - 指定交叉熵损失逆是否与每个类的像素频率相平衡。默认值为 False。 </bullet_item><para/>
        ///     <bullet_item>focal_loss - 指定是否使用焦点损失。默认值为 False。 </bullet_item><para/>
        ///     <bullet_item>ignore_classes - 包含模型不会发生损失的类值列表。</bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// <para>选择 RetinaNet 作为模型类型参数值时，模型参数将填充以下参数：
        ///   <bulletList>
        ///     <bullet_item>scales - 每个像元将按比例放大或缩小的比例级别数。默认值为 [1， 0.8， 0.63]。 </bullet_item><para/>
        ///     <bullet_item>ratios - 锚框的纵横比。默认值为 0.5,1,2。 </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// <para>当您选择“多任务道路提取器”或“连接网络”作为“模型类型”参数值时，“模型参数”参数将填充以下参数：
        ///   <bulletList>
        ///     <bullet_item>gaussian_thresh - 设置高斯阈值，该阈值设置所需的道路宽度。有效范围为 0.0 到 1.0。默认值为 0.76。 </bullet_item><para/>
        ///     <bullet_item>orient_bin_size - 设置方向角度的图格大小。默认值为 20。 </bullet_item><para/>
        ///     <bullet_item>orient_theta （） - 设置方向蒙版的宽度。默认值为 8。 </bullet_item><para/>
        ///     <bullet_item>mtl_model - 设置将用于创建模型的体系结构类型。对于基于 linknet 或基于沙漏的神经架构，有效的选择是 linknet 或 hourglass。默认值为 hourglass。 </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// <para>当您选择“图像描述器”作为“模型类型”参数值时，“模型参数”参数将填充以下参数：decode_params由以下六个参数组成：
        ///   <bulletList>
        ///     <bullet_item>decode_params - 控制图像标题器运行方式的字典。默认值为 {'embed_size'：100， 'hidden_size'：100， 'attention_size'：100， 'teacher_forcing'：1， 'dropout'：0.1， 'pretrained_emb'：False}。 </bullet_item><para/>
        ///     <bullet_item>chip_size - 设置用于训练模型的图像大小。图像被裁剪为指定的芯片尺寸。如果图像大小小于芯片大小，则使用图像大小。默认大小为 224 像素。 </bullet_item><para/>
        ///   </bulletList>
        ///   <bulletList>
        ///     <bullet_item>embed_size - 设置嵌入大小。默认值为神经网络中的 100 层。 </bullet_item><para/>
        ///     <bullet_item>hidden_size - 设置隐藏图层大小。默认值为神经网络中的 100 层。 </bullet_item><para/>
        ///     <bullet_item>attention_size - 设置中间注意图层大小。默认值为神经网络中的 100 层。 </bullet_item><para/>
        ///     <bullet_item>teacher_forcing - 设置教师强制的概率。教师强制是一种训练循环神经网络的策略，它使用来自前一个时间步的模型输出作为输入，而不是在反向传播期间的前一个输出。有效范围为 0.0 到 1.0。默认值为 1。 </bullet_item><para/>
        ///     <bullet_item>dropout - 设置辍学概率。有效范围为 0.0 到 1.0。默认值为 0.1。 </bullet_item><para/>
        ///     <bullet_item>pretrained_emb - 设置预训练的嵌入标志。如果为 True，它将使用快速文本嵌入。如果为 False，则不会使用预训练的文本嵌入。默认值为 False。 </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// <para>当您选择“更改检测器”作为“模型类型”参数值时，“模型参数”参数将填充以下参数：
        ///   <bulletList>
        /// <bullet_item>attention_type - 指定模块类型。模块选项为 PAM（金字塔注意力模块）或 BAM（基本注意力模块）。默认值为 PAM。 </bullet_item></bulletList>
        ///   </para>
        ///   <para>所有模型类型都支持 chip_size 参数，即训练样本的图像芯片大小。图像芯片大小是从输入训练数据参数中指定的文件夹的 .emd 文件中提取的。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Model Arguments")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _arguments { get; set; } = null;


        /// <summary>
        /// <para>Learning Rate</para>
        /// <para>The rate at which existing information will be overwritten with newly acquired information throughout the training process. If no value is specified, the optimal learning rate will be extracted from the learning curve during the training process.</para>
        /// <para>在整个训练过程中，现有信息被新获取的信息覆盖的速率。如果未指定值，则将在训练过程中从学习曲线中提取最佳学习率。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Learning Rate")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double? _learning_rate { get; set; } = null;


        /// <summary>
        /// <para>Backbone Model</para>
        /// <para><xdoc>
        ///   <para>Specifies the preconfigured neural network that will be used as the architecture for training the new model. This method is known as Transfer Learning.</para>
        ///   <bulletList>
        ///     <bullet_item>DenseNet-121—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 121 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</bullet_item><para/>
        ///     <bullet_item>DenseNet-161—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 161 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</bullet_item><para/>
        ///     <bullet_item>DenseNet-169—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 169 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</bullet_item><para/>
        ///     <bullet_item>DenseNet-201—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 201 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</bullet_item><para/>
        ///     <bullet_item>MobileNet version 2—This preconfigured model will be trained on the ImageNet Database and is 54 layers deep geared toward Edge device computing, since it uses less memory.</bullet_item><para/>
        ///     <bullet_item>ResNet-18—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than million images and is 18 layers deep.</bullet_item><para/>
        ///     <bullet_item>ResNet-34—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 34 layers deep. This is the default.</bullet_item><para/>
        ///     <bullet_item>ResNet-50—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 50 layers deep.</bullet_item><para/>
        ///     <bullet_item>ResNet-101—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 101 layers deep.</bullet_item><para/>
        ///     <bullet_item>ResNet-152—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 152 layers deep.</bullet_item><para/>
        ///     <bullet_item>VGG-11—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 11 layers deep.</bullet_item><para/>
        ///     <bullet_item>VGG-11 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 11 layers.</bullet_item><para/>
        ///     <bullet_item>VGG-13—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 13 layers deep.</bullet_item><para/>
        ///     <bullet_item>VGG-13 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 13 layers.</bullet_item><para/>
        ///     <bullet_item>VGG-16—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 16 layers deep.</bullet_item><para/>
        ///     <bullet_item>VGG-16 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 16 layers.</bullet_item><para/>
        ///     <bullet_item>VGG-19—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 19 layers deep.</bullet_item><para/>
        ///     <bullet_item>VGG-19 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 19 layers.</bullet_item><para/>
        ///     <bullet_item>DarkNet-53—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images and is 53 layers deep.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定将用作训练新模型的体系结构的预配置神经网络。这种方法称为迁移学习。</para>
        ///   <bulletList>
        ///     <bullet_item>DenseNet-121—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 121 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</bullet_item><para/>
        ///     <bullet_item>DenseNet-161—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 161 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</bullet_item><para/>
        ///     <bullet_item>DenseNet-169—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 169 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</bullet_item><para/>
        ///     <bullet_item>DenseNet-201—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 201 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</bullet_item><para/>
        ///     <bullet_item>MobileNet 版本 2 — 此预配置模型将在 ImageNet 数据库上进行训练，并且有 54 层深度，面向边缘设备计算，因为它使用的内存更少。</bullet_item><para/>
        ///     <bullet_item>ResNet-18—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过一百万张影像，深度为18个图层。</bullet_item><para/>
        ///     <bullet_item>ResNet-34—预配置模型将在ImageNET数据集上训练的残差网络，其中包含超过100万张影像，深度为34个图层。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>ResNet-50—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为50个图层。</bullet_item><para/>
        ///     <bullet_item>ResNet-101—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为101个图层。</bullet_item><para/>
        ///     <bullet_item>ResNet-152—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为152个图层。</bullet_item><para/>
        ///     <bullet_item>VGG-11 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，可将图像分类为 1,000 个对象类别，深度为 11 层。</bullet_item><para/>
        ///     <bullet_item>具有批量归一化的 VGG-11 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上训练，有 11 个层。</bullet_item><para/>
        ///     <bullet_item>VGG-13 - 预配置模型将在ImageNET数据集上训练的卷积神经网络，该数据集包含超过100万张图像，可将图像分类为1,000个对象类别，深度为13层。</bullet_item><para/>
        ///     <bullet_item>具有批量归一化的 VGG-13 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每个图层都已归一化。它在 ImageNet 数据集上训练，有 13 个层。</bullet_item><para/>
        ///     <bullet_item>VGG-16 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，用于将图像分类为 1,000 个对象类别，深度为 16 层。</bullet_item><para/>
        ///     <bullet_item>具有批量归一化的 VGG-16 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上进行训练，有 16 个层。</bullet_item><para/>
        ///     <bullet_item>VGG-19 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，用于将图像分类为 1,000 个对象类别，深度为 19 层。</bullet_item><para/>
        ///     <bullet_item>具有批量归一化的 VGG-19 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上训练，有 19 个图层。</bullet_item><para/>
        ///     <bullet_item>DarkNet-53 - 预配置模型将在ImageNET数据集上训练的卷积神经网络，该数据集包含超过100万张图像，深度为53层。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Backbone Model")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _backbone_model_value? _backbone_model { get; set; } = null;

        public enum _backbone_model_value
        {
            /// <summary>
            /// <para>ResNet-18</para>
            /// <para>ResNet-18—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than million images and is 18 layers deep.</para>
            /// <para>ResNet-18—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过一百万张影像，深度为18个图层。</para>
            /// </summary>
            [Description("ResNet-18")]
            [GPEnumValue("RESNET18")]
            _RESNET18,

            /// <summary>
            /// <para>ResNet-34</para>
            /// <para>ResNet-34—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 34 layers deep. This is the default.</para>
            /// <para>ResNet-34—预配置模型将在ImageNET数据集上训练的残差网络，其中包含超过100万张影像，深度为34个图层。这是默认设置。</para>
            /// </summary>
            [Description("ResNet-34")]
            [GPEnumValue("RESNET34")]
            _RESNET34,

            /// <summary>
            /// <para>ResNet-50</para>
            /// <para>ResNet-50—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 50 layers deep.</para>
            /// <para>ResNet-50—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为50个图层。</para>
            /// </summary>
            [Description("ResNet-50")]
            [GPEnumValue("RESNET50")]
            _RESNET50,

            /// <summary>
            /// <para>ResNet-101</para>
            /// <para>ResNet-101—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 101 layers deep.</para>
            /// <para>ResNet-101—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为101个图层。</para>
            /// </summary>
            [Description("ResNet-101")]
            [GPEnumValue("RESNET101")]
            _RESNET101,

            /// <summary>
            /// <para>ResNet-152</para>
            /// <para>ResNet-152—The preconfigured model will be a residual network trained on the ImageNET Dataset that contains more than 1 million images and is 152 layers deep.</para>
            /// <para>ResNet-152—预配置模型将在ImageNET数据集上训练的残差网络，该数据集包含超过100万张影像，深度为152个图层。</para>
            /// </summary>
            [Description("ResNet-152")]
            [GPEnumValue("RESNET152")]
            _RESNET152,

            /// <summary>
            /// <para>DenseNet-121</para>
            /// <para>DenseNet-121—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 121 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</para>
            /// <para>DenseNet-121—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 121 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</para>
            /// </summary>
            [Description("DenseNet-121")]
            [GPEnumValue("DENSENET121")]
            _DENSENET121,

            /// <summary>
            /// <para>DenseNet-169</para>
            /// <para>DenseNet-169—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 169 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</para>
            /// <para>DenseNet-169—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 169 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</para>
            /// </summary>
            [Description("DenseNet-169")]
            [GPEnumValue("DENSENET169")]
            _DENSENET169,

            /// <summary>
            /// <para>DenseNet-161</para>
            /// <para>DenseNet-161—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 161 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</para>
            /// <para>DenseNet-161—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 161 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</para>
            /// </summary>
            [Description("DenseNet-161")]
            [GPEnumValue("DENSENET161")]
            _DENSENET161,

            /// <summary>
            /// <para>DenseNet-201</para>
            /// <para>DenseNet-201—The preconfigured model will be a dense network trained on the ImageNET Dataset that contains more than 1 million images and is 201 layers deep. Unlike RESNET, which combines the layer using summation, DenseNet combines the layers using concatenation.</para>
            /// <para>DenseNet-201—预配置模型将是一个在 ImageNET 数据集上训练的密集网络，其中包含超过 100 万张影像，深度为 201 个图层。与使用求和合并层的 RESNET 不同，DenseNet 使用串联合并层。</para>
            /// </summary>
            [Description("DenseNet-201")]
            [GPEnumValue("DENSENET201")]
            _DENSENET201,

            /// <summary>
            /// <para>VGG-11</para>
            /// <para>VGG-11—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 11 layers deep.</para>
            /// <para>VGG-11 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，可将图像分类为 1,000 个对象类别，深度为 11 层。</para>
            /// </summary>
            [Description("VGG-11")]
            [GPEnumValue("VGG11")]
            _VGG11,

            /// <summary>
            /// <para>VGG-11 with batch normalization</para>
            /// <para>VGG-11 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 11 layers.</para>
            /// <para>具有批量归一化的 VGG-11 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上训练，有 11 个层。</para>
            /// </summary>
            [Description("VGG-11 with batch normalization")]
            [GPEnumValue("VGG11_BN")]
            _VGG11_BN,

            /// <summary>
            /// <para>VGG-13</para>
            /// <para>VGG-13—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 13 layers deep.</para>
            /// <para>VGG-13 - 预配置模型将在ImageNET数据集上训练的卷积神经网络，该数据集包含超过100万张图像，可将图像分类为1,000个对象类别，深度为13层。</para>
            /// </summary>
            [Description("VGG-13")]
            [GPEnumValue("VGG13")]
            _VGG13,

            /// <summary>
            /// <para>VGG-13 with batch normalization</para>
            /// <para>VGG-13 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 13 layers.</para>
            /// <para>具有批量归一化的 VGG-13 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每个图层都已归一化。它在 ImageNet 数据集上训练，有 13 个层。</para>
            /// </summary>
            [Description("VGG-13 with batch normalization")]
            [GPEnumValue("VGG13_BN")]
            _VGG13_BN,

            /// <summary>
            /// <para>VGG-16</para>
            /// <para>VGG-16—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 16 layers deep.</para>
            /// <para>VGG-16 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，用于将图像分类为 1,000 个对象类别，深度为 16 层。</para>
            /// </summary>
            [Description("VGG-16")]
            [GPEnumValue("VGG16")]
            _VGG16,

            /// <summary>
            /// <para>VGG-16 with batch normalization</para>
            /// <para>VGG-16 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 16 layers.</para>
            /// <para>具有批量归一化的 VGG-16 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上进行训练，有 16 个层。</para>
            /// </summary>
            [Description("VGG-16 with batch normalization")]
            [GPEnumValue("VGG16_BN")]
            _VGG16_BN,

            /// <summary>
            /// <para>VGG-19</para>
            /// <para>VGG-19—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images to classify images into 1,000 object categories and is 19 layers deep.</para>
            /// <para>VGG-19 - 预配置模型将是一个在 ImageNET 数据集上训练的卷积神经网络，该数据集包含超过 100 万张图像，用于将图像分类为 1,000 个对象类别，深度为 19 层。</para>
            /// </summary>
            [Description("VGG-19")]
            [GPEnumValue("VGG19")]
            _VGG19,

            /// <summary>
            /// <para>VGG-19 with batch normalization</para>
            /// <para>VGG-19 with batch normalization—This preconfigured model will be based on the VGG network but with batch normalization, which means each layer in the network is normalized. It trained on the ImageNet dataset and has 19 layers.</para>
            /// <para>具有批量归一化的 VGG-19 - 此预配置模型将基于 VGG 网络，但具有批量归一化，这意味着网络中的每一层都已归一化。它在 ImageNet 数据集上训练，有 19 个图层。</para>
            /// </summary>
            [Description("VGG-19 with batch normalization")]
            [GPEnumValue("VGG19_BN")]
            _VGG19_BN,

            /// <summary>
            /// <para>MobileNet version 2</para>
            /// <para>MobileNet version 2—This preconfigured model will be trained on the ImageNet Database and is 54 layers deep geared toward Edge device computing, since it uses less memory.</para>
            /// <para>MobileNet 版本 2 — 此预配置模型将在 ImageNet 数据库上进行训练，并且有 54 层深度，面向边缘设备计算，因为它使用的内存更少。</para>
            /// </summary>
            [Description("MobileNet version 2")]
            [GPEnumValue("MOBILENET_V2")]
            _MOBILENET_V2,

            /// <summary>
            /// <para>DarkNet-53</para>
            /// <para>DarkNet-53—The preconfigured model will be a convolution neural network trained on the ImageNET Dataset that contains more than 1 million images and is 53 layers deep.</para>
            /// <para>DarkNet-53 - 预配置模型将在ImageNET数据集上训练的卷积神经网络，该数据集包含超过100万张图像，深度为53层。</para>
            /// </summary>
            [Description("DarkNet-53")]
            [GPEnumValue("DARKNET53")]
            _DARKNET53,

        }

        /// <summary>
        /// <para>Pre-trained Model</para>
        /// <para><xdoc>
        ///   <para>A pretrained model that will be used to fine-tune the new model. The input is an Esri Model Definition file (.emd) or a deep learning package file (.dlpk).</para>
        ///   <para>A pretrained model with similar classes can be fine-tuned to fit the new model. The pretrained model must have been trained with the same model type and backbone model that will be used to train the new model.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>将用于微调新模型的预训练模型。输入是 Esri 模型定义文件 （.emd） 或深度学习包文件 （.dlpk）。</para>
        ///   <para>可以对具有相似类的预训练模型进行微调以适应新模型。预训练模型必须使用将用于训练新模型的相同模型类型和主干模型进行训练。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Pre-trained Model")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _pretrained_model { get; set; } = null;


        /// <summary>
        /// <para>Validation %</para>
        /// <para>The percentage of training samples that will be used for validating the model. The default value is 10.</para>
        /// <para>将用于验证模型的训练样本的百分比。默认值为 10。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Validation %")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double _validation_percentage { get; set; } = 10;


        /// <summary>
        /// <para>Stop when model stops improving</para>
        /// <para><xdoc>
        ///   <para>Specifies whether early stopping will be implemented.</para>
        ///   <bulletList>
        ///     <bullet_item>Checked—Early stopping will be implemented, and the model training will stop when the model is no longer improving, regardless of the Max Epochs parameter value specified. This is the default.</bullet_item><para/>
        ///     <bullet_item>Unchecked—Early stopping will not be implemented, and the model training will continue until the Max Epochs parameter value is reached.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定是否实施提前停止。</para>
        ///   <bulletList>
        ///     <bullet_item>选中—将实施提前停止，并且无论指定了何种最大纪元参数值，模型训练都将在模型不再改进时停止。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>未选中—不会实施提前停止，模型训练将继续进行，直到达到最大 Epochs 参数值。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Stop when model stops improving")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _stop_training_value _stop_training { get; set; } = _stop_training_value._true;

        public enum _stop_training_value
        {
            /// <summary>
            /// <para>STOP_TRAINING</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("STOP_TRAINING")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>CONTINUE_TRAINING</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("CONTINUE_TRAINING")]
            [GPEnumValue("false")]
            _false,

        }

        /// <summary>
        /// <para>Output Model</para>
        /// <para></para>
        /// <para></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output Model")]
        [Description("")]
        [Option(OptionTypeEnum.derived)]
        public object _out_model_file { get; set; }


        /// <summary>
        /// <para>Freeze Model</para>
        /// <para><xdoc>
        ///   <para>Specifies whether the backbone layers in the pretrained model will be frozen, so that the weights and biases remain as originally designed.
        ///   <bulletList>
        ///     <bullet_item>Checked—The backbone layers will be frozen, and the predefined weights and biases will not be altered in the Backbone Model parameter. This is the default.  </bullet_item><para/>
        ///     <bullet_item>Unchecked—The backbone layers will not be frozen, and the weights and biases of the Backbone Model parameter can be altered to fit the training samples. This takes more time to process but typically produces better results.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para><xdoc>
        /// <para>指定是否冻结预训练模型中的主干层，以便权重和偏差保持原始设计。
        ///   <bulletList>
        ///     <bullet_item>选中 - 主干图层将被冻结，并且不会在主干模型参数中更改预定义的权重和偏差。这是默认设置。 </bullet_item><para/>
        ///     <bullet_item>未选中—主干图层不会被冻结，并且可以更改主干模型参数的权重和偏差以拟合训练样本。这需要更多时间来处理，但通常会产生更好的结果。 </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Freeze Model")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _freeze_value _freeze { get; set; } = _freeze_value._true;

        public enum _freeze_value
        {
            /// <summary>
            /// <para>FREEZE_MODEL</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("FREEZE_MODEL")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>UNFREEZE_MODEL</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("UNFREEZE_MODEL")]
            [GPEnumValue("false")]
            _false,

        }

        public TrainDeepLearningModel SetEnv(object extent = null, object parallelProcessingFactor = null, object scratchWorkspace = null, object workspace = null)
        {
            base.SetEnv(extent: extent, parallelProcessingFactor: parallelProcessingFactor, scratchWorkspace: scratchWorkspace, workspace: workspace);
            return this;
        }

    }

}