using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes.DomainAttributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Enums;
using System.Collections.Generic;
using System.ComponentModel;

namespace Baci.ArcGIS._RasterAnalysisTools._DeepLearning
{
    /// <summary>
    /// <para>Detect Objects Using Deep Learning</para>
    /// <para>Runs a trained deep learning model on an input raster to produce a feature class containing the objects it identifies. The feature class can be shared as a hosted feature layer in your portal. The features can be bounding boxes or polygons around the objects found, or points at the centers of the objects.</para>
    /// <para>在输入栅格上运行经过训练的深度学习模型，以生成包含其标识对象的要素类。要素类可以作为门户中的托管要素图层共享。这些要素可以是围绕找到的对象的边界框或面，也可以是对象中心的点。</para>
    /// </summary>    
    [DisplayName("Detect Objects Using Deep Learning")]
    public class DetectObjectsUsingDeepLearning : AbstractGPProcess
    {
        /// <summary>
        /// 无参构造
        /// </summary>
        public DetectObjectsUsingDeepLearning()
        {

        }

        /// <summary>
        /// 有参构造
        /// </summary>
        /// <param name="_inputRaster">
        /// <para>Input Raster</para>
        /// <para>The input image used to detect objects. It can be an image service URL, a raster layer, an image service, a map server layer, or an internet tiled layer.</para>
        /// <para>用于检测对象的输入图像。它可以是影像服务 URL、栅格图层、影像服务、地图服务器图层或 Internet 切片图层。</para>
        /// </param>
        /// <param name="_inputModel">
        /// <para>Input Model</para>
        /// <para>The input model can be a file or a URL of a deep learning package (.dlpk) item from the portal.</para>
        /// <para>输入模型可以是门户中深度学习包 （.dlpk） 项的文件或 URL。</para>
        /// </param>
        /// <param name="_outputName">
        /// <para>Output Name</para>
        /// <para>The name of the output feature service of detected objects.</para>
        /// <para>检测到的对象的输出要素服务的名称。</para>
        /// </param>
        public DetectObjectsUsingDeepLearning(object _inputRaster, object _inputModel, object _outputName)
        {
            this._inputRaster = _inputRaster;
            this._inputModel = _inputModel;
            this._outputName = _outputName;
        }
        public override string ToolboxName => "Raster Analysis Tools";

        public override string ToolName => "Detect Objects Using Deep Learning";

        public override string CallName => "ra.DetectObjectsUsingDeepLearning";

        public override List<string> AcceptEnvironments => ["cellSize", "extent", "outputCoordinateSystem", "parallelProcessingFactor", "processorType"];

        public override object[] ParameterInfo => [_inputRaster, _inputModel, _outputName, _modelArguments, _runNMS.GetGPValue(), _confidenceScoreField, _classValueField, _maxOverlapRatio, _outObjects, _processingMode.GetGPValue()];

        /// <summary>
        /// <para>Input Raster</para>
        /// <para>The input image used to detect objects. It can be an image service URL, a raster layer, an image service, a map server layer, or an internet tiled layer.</para>
        /// <para>用于检测对象的输入图像。它可以是影像服务 URL、栅格图层、影像服务、地图服务器图层或 Internet 切片图层。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Raster")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _inputRaster { get; set; }


        /// <summary>
        /// <para>Input Model</para>
        /// <para>The input model can be a file or a URL of a deep learning package (.dlpk) item from the portal.</para>
        /// <para>输入模型可以是门户中深度学习包 （.dlpk） 项的文件或 URL。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Model")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _inputModel { get; set; }


        /// <summary>
        /// <para>Output Name</para>
        /// <para>The name of the output feature service of detected objects.</para>
        /// <para>检测到的对象的输出要素服务的名称。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output Name")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _outputName { get; set; }


        /// <summary>
        /// <para>Model Arguments</para>
        /// <para>The function model arguments are defined in the Python raster function class referenced by the input model. This is where you list additional deep learning parameters and arguments for experiments and refinement, such as a confidence threshold for fine tuning the sensitivity. The names of the arguments are populated by the tool from reading the Python module on the RA server.</para>
        /// <para>函数模型参数在输入模型引用的 Python 栅格函数类中定义。在这里，您可以列出用于实验和优化的其他深度学习参数和参数，例如用于微调灵敏度的置信度阈值。参数的名称由工具通过读取 RA 服务器上的 Python 模块来填充。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Model Arguments")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _modelArguments { get; set; } = null;


        /// <summary>
        /// <para>Non Maximum Suppression</para>
        /// <para><xdoc>
        ///   <para>Specifies whether non maximum suppression, where duplicate objects are identified and the duplicate feature with a lower confidence value is removed, will be performed.
        ///   <bulletList>
        ///     <bullet_item>Unchecked—All detected objects will be in the output feature class. This is the default.  </bullet_item><para/>
        ///     <bullet_item>Checked— Duplicate detected objects will be removed.  </bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para><xdoc>
        /// <para>指定是否执行非最大抑制，即识别重复对象并移除置信度较低的重复特征。
        ///   <bulletList>
        ///     <bullet_item>未选中 - 所有检测到的对象都将位于输出要素类中。这是默认设置。 </bullet_item><para/>
        ///     <bullet_item>选中 — 将删除重复检测到的对象。</bullet_item><para/>
        ///   </bulletList>
        ///   </para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Non Maximum Suppression")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _runNMS_value _runNMS { get; set; } = _runNMS_value._false;

        public enum _runNMS_value
        {
            /// <summary>
            /// <para>NMS</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("NMS")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>NO_NMS</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("NO_NMS")]
            [GPEnumValue("false")]
            _false,

        }

        /// <summary>
        /// <para>Confidence Score Field</para>
        /// <para><xdoc>
        ///   <para>The field in the feature service that contains the confidence scores that will be used as output by the object detection method.</para>
        ///   <para>This parameter is required when the Non Maximum Suppression parameter is checked.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>要素服务中包含置信度分数的字段，该分数将用作对象检测方法的输出。</para>
        ///   <para>选中 Non Maximum Suppression 参数时，此参数是必需的。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Confidence Score Field")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _confidenceScoreField { get; set; } = null;


        /// <summary>
        /// <para>Class Value Field</para>
        /// <para><xdoc>
        ///   <para>The name of the class value field in the feature service.</para>
        ///   <para>If a field name is not specified, a Classvalue or Value field will be used. If these fields do not exist, all records will be identified as belonging to one class.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>要素服务中类值字段的名称。</para>
        ///   <para>如果未指定字段名称，则将使用 Classvalue 或 Value 字段。如果这些字段不存在，则所有记录都将标识为属于一个类。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Class Value Field")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _classValueField { get; set; } = null;


        /// <summary>
        /// <para>Max Overlap Ratio</para>
        /// <para>The maximum overlap ratio for two overlapping features, which is defined as the ratio of intersection area over union area. The default is 0.</para>
        /// <para>两个重叠要素的最大重叠比，定义为交点面积与并集面积的比值。默认值为 0。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Max Overlap Ratio")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double _maxOverlapRatio { get; set; } = 0;


        /// <summary>
        /// <para>Out Objects</para>
        /// <para></para>
        /// <para></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Out Objects")]
        [Description("")]
        [Option(OptionTypeEnum.derived)]
        public object _outObjects { get; set; }


        /// <summary>
        /// <para>Processing Mode</para>
        /// <para><xdoc>
        ///   <para>Specifies how all raster items in a mosaic dataset or an image service will be processed. This parameter is applied when the input raster is a mosaic dataset or an image service.</para>
        ///   <bulletList>
        ///     <bullet_item>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</bullet_item><para/>
        ///     <bullet_item>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定如何处理镶嵌数据集或影像服务中的所有栅格项目。当输入栅格为镶嵌数据集或影像服务时，将应用此参数。</para>
        ///   <bulletList>
        ///     <bullet_item>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Processing Mode")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _processingMode_value _processingMode { get; set; } = _processingMode_value._PROCESS_AS_MOSAICKED_IMAGE;

        public enum _processingMode_value
        {
            /// <summary>
            /// <para>Process as mosaicked image</para>
            /// <para>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</para>
            /// <para>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</para>
            /// </summary>
            [Description("Process as mosaicked image")]
            [GPEnumValue("PROCESS_AS_MOSAICKED_IMAGE")]
            _PROCESS_AS_MOSAICKED_IMAGE,

            /// <summary>
            /// <para>Process all raster items separately</para>
            /// <para>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</para>
            /// <para>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</para>
            /// </summary>
            [Description("Process all raster items separately")]
            [GPEnumValue("PROCESS_ITEMS_SEPARATELY")]
            _PROCESS_ITEMS_SEPARATELY,

        }

        public DetectObjectsUsingDeepLearning SetEnv(object cellSize = null, object extent = null, object outputCoordinateSystem = null, object parallelProcessingFactor = null)
        {
            base.SetEnv(cellSize: cellSize, extent: extent, outputCoordinateSystem: outputCoordinateSystem, parallelProcessingFactor: parallelProcessingFactor);
            return this;
        }

    }

}