using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes.DomainAttributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Enums;
using System.Collections.Generic;
using System.ComponentModel;

namespace Baci.ArcGIS._ImageAnalystTools._DeepLearning
{
    /// <summary>
    /// <para>Detect Objects Using Deep Learning</para>
    /// <para>Runs a trained deep learning model on an input raster to produce a feature class containing the objects it finds. The features can be bounding boxes or polygons around the objects found or points at the centers of the objects.</para>
    /// <para>在输入栅格上运行经过训练的深度学习模型，以生成包含其所查找对象的要素类。要素可以是围绕找到的对象或对象中心的点的边界框或面。</para>
    /// </summary>    
    [DisplayName("Detect Objects Using Deep Learning")]
    public class DetectObjectsUsingDeepLearning : AbstractGPProcess
    {
        /// <summary>
        /// 无参构造
        /// </summary>
        public DetectObjectsUsingDeepLearning()
        {

        }

        /// <summary>
        /// 有参构造
        /// </summary>
        /// <param name="_in_raster">
        /// <para>Input Raster</para>
        /// <para>The input image used to detect objects. The input can be a single raster or multiple rasters in a mosaic dataset, image service, or folder of images. A feature class with image attachments is also supported.</para>
        /// <para>用于检测对象的输入图像。输入可以是镶嵌数据集、影像服务或影像文件夹中的单个栅格或多个栅格。还支持带有影像附件的要素类。</para>
        /// </param>
        /// <param name="_out_detected_objects">
        /// <para>Output Detected Objects</para>
        /// <para>The output feature class that will contain geometries circling the object or objects detected in the input image.</para>
        /// <para>输出要素类，该类将包含围绕输入图像中检测到的一个或多个对象的几何。</para>
        /// </param>
        /// <param name="_in_model_definition">
        /// <para>Model Definition</para>
        /// <para><xdoc>
        ///   <para>This parameter can be an Esri model definition JSON file (.emd), a JSON string, or a deep learning model package (.dlpk). A JSON string is useful when this tool is used on the server so you can paste the JSON string rather than upload the .emd file. The .dlpk file must be stored locally.</para>
        ///   <para>It contains the path to the deep learning binary model file, the path to the Python raster function to be used, and other parameters such as preferred tile size or padding.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>此参数可以是 Esri 模型定义 JSON 文件 （.emd）、JSON 字符串或深度学习模型包 （.dlpk）。在服务器上使用此工具时，JSON 字符串非常有用，因此您可以粘贴 JSON 字符串，而不是上传 .emd 文件。.dlpk 文件必须存储在本地。</para>
        ///   <para>它包含深度学习二进制模型文件的路径、要使用的 Python 栅格函数的路径以及其他参数，例如首选切片大小或填充。</para>
        /// </xdoc></para>
        /// </param>
        public DetectObjectsUsingDeepLearning(object _in_raster, object _out_detected_objects, object _in_model_definition)
        {
            this._in_raster = _in_raster;
            this._out_detected_objects = _out_detected_objects;
            this._in_model_definition = _in_model_definition;
        }
        public override string ToolboxName => "Image Analyst Tools";

        public override string ToolName => "Detect Objects Using Deep Learning";

        public override string CallName => "ia.DetectObjectsUsingDeepLearning";

        public override List<string> AcceptEnvironments => ["cellSize", "extent", "geographicTransformations", "gpuID", "mask", "outputCoordinateSystem", "parallelProcessingFactor", "processorType", "scratchWorkspace", "workspace"];

        public override object[] ParameterInfo => [_in_raster, _out_detected_objects, _in_model_definition, _arguments, _run_nms.GetGPValue(), _confidence_score_field, _class_value_field, _max_overlap_ratio, _processing_mode.GetGPValue()];

        /// <summary>
        /// <para>Input Raster</para>
        /// <para>The input image used to detect objects. The input can be a single raster or multiple rasters in a mosaic dataset, image service, or folder of images. A feature class with image attachments is also supported.</para>
        /// <para>用于检测对象的输入图像。输入可以是镶嵌数据集、影像服务或影像文件夹中的单个栅格或多个栅格。还支持带有影像附件的要素类。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Raster")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _in_raster { get; set; }


        /// <summary>
        /// <para>Output Detected Objects</para>
        /// <para>The output feature class that will contain geometries circling the object or objects detected in the input image.</para>
        /// <para>输出要素类，该类将包含围绕输入图像中检测到的一个或多个对象的几何。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output Detected Objects")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _out_detected_objects { get; set; }


        /// <summary>
        /// <para>Model Definition</para>
        /// <para><xdoc>
        ///   <para>This parameter can be an Esri model definition JSON file (.emd), a JSON string, or a deep learning model package (.dlpk). A JSON string is useful when this tool is used on the server so you can paste the JSON string rather than upload the .emd file. The .dlpk file must be stored locally.</para>
        ///   <para>It contains the path to the deep learning binary model file, the path to the Python raster function to be used, and other parameters such as preferred tile size or padding.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>此参数可以是 Esri 模型定义 JSON 文件 （.emd）、JSON 字符串或深度学习模型包 （.dlpk）。在服务器上使用此工具时，JSON 字符串非常有用，因此您可以粘贴 JSON 字符串，而不是上传 .emd 文件。.dlpk 文件必须存储在本地。</para>
        ///   <para>它包含深度学习二进制模型文件的路径、要使用的 Python 栅格函数的路径以及其他参数，例如首选切片大小或填充。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Model Definition")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _in_model_definition { get; set; }


        /// <summary>
        /// <para>Arguments</para>
        /// <para>The function arguments defined in the Python raster function class. This is where additional deep learning parameters and arguments for experiments and refinement are listed, such as a confidence threshold for adjusting the sensitivity. The names of the arguments are populated from the Python module.</para>
        /// <para>Python 栅格函数类中定义的函数参数。在这里列出了额外的深度学习参数以及实验和改进的参数，例如用于调整灵敏度的置信阈值。参数的名称是从 Python 模块填充的。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Arguments")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _arguments { get; set; } = null;


        /// <summary>
        /// <para>Non Maximum Suppression</para>
        /// <para><xdoc>
        ///   <para>Specifies whether nonmaximum suppression will be performed in which duplicate objects are identified and the duplicate features with lower confidence value are removed.
        ///   <bulletList>
        ///     <bullet_item>Unchecked—Nonmaximum suppression will not be performed. All objects that are detected will be in the output feature class. This is the default.  </bullet_item><para/>
        ///     <bullet_item>Checked—Nonmaximum suppression will be performed and duplicate objects that are detected will be removed.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para><xdoc>
        /// <para>指定是否执行非最大抑制，以识别重复对象并移除置信度较低的重复要素。
        ///   <bulletList>
        ///     <bullet_item>未选中 - 不执行非最大抑制。检测到的所有对象都将位于输出要素类中。这是默认设置。 </bullet_item><para/>
        ///     <bullet_item>选中 - 将执行非最大抑制，并将删除检测到的重复对象。</bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Non Maximum Suppression")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _run_nms_value _run_nms { get; set; } = _run_nms_value._false;

        public enum _run_nms_value
        {
            /// <summary>
            /// <para>NMS</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("NMS")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>NO_NMS</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("NO_NMS")]
            [GPEnumValue("false")]
            _false,

        }

        /// <summary>
        /// <para>Confidence Score Field</para>
        /// <para><xdoc>
        ///   <para>The name of the field in the feature class that will contain the confidence scores as output by the object detection method.</para>
        ///   <para>This parameter is required when the Non Maximum Suppression parameter is checked.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>要素类中将包含对象检测方法输出的置信度分数的字段的名称。</para>
        ///   <para>选中 Non Maximum Suppression 参数时，此参数是必需的。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Confidence Score Field")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _confidence_score_field { get; set; } = null;


        /// <summary>
        /// <para>Class Value Field</para>
        /// <para><xdoc>
        ///   <para>The name of the class value field in the input feature class.</para>
        ///   <para>If a field name is not specified, a Classvalue or Value field will be used. If these fields do not exist, all records will be identified as belonging to one class.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>输入要素类中类值字段的名称。</para>
        ///   <para>如果未指定字段名称，则将使用 Classvalue 或 Value 字段。如果这些字段不存在，则所有记录都将标识为属于一个类。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Class Value Field")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _class_value_field { get; set; } = null;


        /// <summary>
        /// <para>Max Overlap Ratio</para>
        /// <para>The maximum overlap ratio for two overlapping features, which is defined as the ratio of intersection area over union area. The default is 0.</para>
        /// <para>两个重叠要素的最大重叠比，定义为交点面积与并集面积的比值。默认值为 0。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Max Overlap Ratio")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double _max_overlap_ratio { get; set; } = 0;


        /// <summary>
        /// <para>Processing Mode</para>
        /// <para><xdoc>
        ///   <para>Specifies how all raster items in a mosaic dataset or an image service will be processed. This parameter is applied when the input raster is a mosaic dataset or an image service.</para>
        ///   <bulletList>
        ///     <bullet_item>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</bullet_item><para/>
        ///     <bullet_item>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定如何处理镶嵌数据集或影像服务中的所有栅格项目。当输入栅格为镶嵌数据集或影像服务时，将应用此参数。</para>
        ///   <bulletList>
        ///     <bullet_item>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Processing Mode")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _processing_mode_value _processing_mode { get; set; } = _processing_mode_value._PROCESS_AS_MOSAICKED_IMAGE;

        public enum _processing_mode_value
        {
            /// <summary>
            /// <para>Process as mosaicked image</para>
            /// <para>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</para>
            /// <para>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</para>
            /// </summary>
            [Description("Process as mosaicked image")]
            [GPEnumValue("PROCESS_AS_MOSAICKED_IMAGE")]
            _PROCESS_AS_MOSAICKED_IMAGE,

            /// <summary>
            /// <para>Process all raster items separately</para>
            /// <para>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</para>
            /// <para>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</para>
            /// </summary>
            [Description("Process all raster items separately")]
            [GPEnumValue("PROCESS_ITEMS_SEPARATELY")]
            _PROCESS_ITEMS_SEPARATELY,

        }

        public DetectObjectsUsingDeepLearning SetEnv(object cellSize = null, object extent = null, object geographicTransformations = null, object mask = null, object outputCoordinateSystem = null, object parallelProcessingFactor = null, object scratchWorkspace = null, object workspace = null)
        {
            base.SetEnv(cellSize: cellSize, extent: extent, geographicTransformations: geographicTransformations, mask: mask, outputCoordinateSystem: outputCoordinateSystem, parallelProcessingFactor: parallelProcessingFactor, scratchWorkspace: scratchWorkspace, workspace: workspace);
            return this;
        }

    }

}