using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Attributes.DomainAttributes;
using Baci.Net.ToolKit.ArcGISProGeoprocessor.Models.Enums;
using System.Collections.Generic;
using System.ComponentModel;

namespace Baci.ArcGIS._SpatialAnalystTools._SegmentationandClassification
{
    /// <summary>
    /// <para>Export Training Data For Deep Learning</para>
    /// <para>Converts labeled vector or raster data into deep learning training datasets using a remote sensing image. The output will be a folder of image chips and a folder of metadata files in the specified format.</para>
    /// <para>使用遥感影像将标注的矢量或栅格数据转换为深度学习训练数据集。输出将是指定格式的图像芯片文件夹和元数据文件文件夹。</para>
    /// </summary>    
    [DisplayName("Export Training Data For Deep Learning")]
    public class ExportTrainingDataForDeepLearning : AbstractGPProcess
    {
        /// <summary>
        /// 无参构造
        /// </summary>
        public ExportTrainingDataForDeepLearning()
        {

        }

        /// <summary>
        /// 有参构造
        /// </summary>
        /// <param name="_in_raster">
        /// <para>Input Raster</para>
        /// <para><xdoc>
        ///   <para>The input source imagery, typically multispectral imagery.</para>
        ///   <para>Examples of the types of input source imagery include multispectral satellite, drone, aerial, and National Agriculture Imagery Program (NAIP). The input can be a folder of images.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>输入源影像，通常为多光谱影像。</para>
        ///   <para>输入源影像类型的示例包括多光谱卫星、无人机、航拍和国家农业影像计划 （NAIP）。输入可以是图像文件夹。</para>
        /// </xdoc></para>
        /// </param>
        /// <param name="_out_folder">
        /// <para>Output Folder</para>
        /// <para><xdoc>
        ///   <para>The folder where the output image chips and metadata will be stored.</para>
        ///   <para>The folder can also be a folder URL that uses a cloud storage connection file (*.acs).</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>将存储输出图像芯片和元数据的文件夹。</para>
        ///   <para>该文件夹也可以是使用云存储连接文件 （*.acs） 的文件夹 URL。</para>
        /// </xdoc></para>
        /// </param>
        public ExportTrainingDataForDeepLearning(object _in_raster, object _out_folder)
        {
            this._in_raster = _in_raster;
            this._out_folder = _out_folder;
        }
        public override string ToolboxName => "Spatial Analyst Tools";

        public override string ToolName => "Export Training Data For Deep Learning";

        public override string CallName => "sa.ExportTrainingDataForDeepLearning";

        public override List<string> AcceptEnvironments => ["cellSize", "extent", "scratchWorkspace", "workspace"];

        public override object[] ParameterInfo => [_in_raster, _out_folder, _in_class_data, _image_chip_format.GetGPValue(), _tile_size_x, _tile_size_y, _stride_x, _stride_y, _output_nofeature_tiles.GetGPValue(), _metadata_format.GetGPValue(), _start_index, _class_value_field, _buffer_radius, _in_mask_polygons, _rotation_angle, _reference_system.GetGPValue(), _processing_mode.GetGPValue(), _blacken_around_feature.GetGPValue(), _crop_mode.GetGPValue(), _in_raster2];

        /// <summary>
        /// <para>Input Raster</para>
        /// <para><xdoc>
        ///   <para>The input source imagery, typically multispectral imagery.</para>
        ///   <para>Examples of the types of input source imagery include multispectral satellite, drone, aerial, and National Agriculture Imagery Program (NAIP). The input can be a folder of images.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>输入源影像，通常为多光谱影像。</para>
        ///   <para>输入源影像类型的示例包括多光谱卫星、无人机、航拍和国家农业影像计划 （NAIP）。输入可以是图像文件夹。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Raster")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _in_raster { get; set; }


        /// <summary>
        /// <para>Output Folder</para>
        /// <para><xdoc>
        ///   <para>The folder where the output image chips and metadata will be stored.</para>
        ///   <para>The folder can also be a folder URL that uses a cloud storage connection file (*.acs).</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>将存储输出图像芯片和元数据的文件夹。</para>
        ///   <para>该文件夹也可以是使用云存储连接文件 （*.acs） 的文件夹 URL。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output Folder")]
        [Description("")]
        [Option(OptionTypeEnum.Must)]
        public object _out_folder { get; set; }


        /// <summary>
        /// <para>Input Feature Class Or Classified Raster Or Table</para>
        /// <para><xdoc>
        ///   <para>The training sample data in either vector or raster form.</para>
        ///   <para>Vector inputs should follow the training sample format generated using the Training Samples Manager pane. Raster inputs should follow a classified raster format generated by the Classify Raster tool. The raster input can also be from a folder of classified rasters. Input tables should follow a training sample format generated by the Label Objects for Deep Learning tool in the Training Samples Manager pane. Following the proper training sample format will produce optimal results with the statistical information; however, the input can also be a point feature class without a class value field or an integer raster without any class information.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>矢量或栅格形式的训练样本数据。</para>
        ///   <para>向量输入应遵循使用“训练样本管理器”窗格生成的训练样本格式。栅格输入应遵循分类栅格工具生成的分类栅格格式。栅格输入也可以来自分类栅格的文件夹。输入表应遵循训练样本管理器窗格中的深度学习标注对象工具生成的训练样本格式。遵循正确的训练样本格式将产生统计信息的最佳结果;但是，输入也可以是没有类值字段的点要素类或没有任何类信息的整数栅格。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Feature Class Or Classified Raster Or Table")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _in_class_data { get; set; } = null;


        /// <summary>
        /// <para>Image Format</para>
        /// <para><xdoc>
        ///   <para>Specifies the raster format that will be used for the image chip outputs.</para>
        ///   <para>The PNG and JPEG formats support up to three bands.</para>
        ///   <bulletList>
        ///     <bullet_item>TIFF format—TIFF format will be used.</bullet_item><para/>
        ///     <bullet_item>PNG format—PNG format will be used.</bullet_item><para/>
        ///     <bullet_item>JPEG format—JPEG format will be used.</bullet_item><para/>
        ///     <bullet_item>MRF (Meta Raster Format)—Meta Raster Format (MRF) will be used.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定将用于图像芯片输出的光栅格式。</para>
        ///   <para>PNG 和 JPEG 格式最多支持三个波段。</para>
        ///   <bulletList>
        ///     <bullet_item>TIFF 格式—将使用 TIFF 格式。</bullet_item><para/>
        ///     <bullet_item>PNG 格式—将使用 PNG 格式。</bullet_item><para/>
        ///     <bullet_item>JPEG 格式—将使用 JPEG 格式。</bullet_item><para/>
        ///     <bullet_item>MRF（元栅格格式）—将使用元栅格格式 （MRF）。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Image Format")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _image_chip_format_value _image_chip_format { get; set; } = _image_chip_format_value._TIFF;

        public enum _image_chip_format_value
        {
            /// <summary>
            /// <para>TIFF format</para>
            /// <para>TIFF format—TIFF format will be used.</para>
            /// <para>TIFF 格式—将使用 TIFF 格式。</para>
            /// </summary>
            [Description("TIFF format")]
            [GPEnumValue("TIFF")]
            _TIFF,

            /// <summary>
            /// <para>MRF (Meta Raster Format)</para>
            /// <para>MRF (Meta Raster Format)—Meta Raster Format (MRF) will be used.</para>
            /// <para>MRF（元栅格格式）—将使用元栅格格式 （MRF）。</para>
            /// </summary>
            [Description("MRF (Meta Raster Format)")]
            [GPEnumValue("MRF")]
            _MRF,

            /// <summary>
            /// <para>PNG format</para>
            /// <para>PNG format—PNG format will be used.</para>
            /// <para>PNG 格式—将使用 PNG 格式。</para>
            /// </summary>
            [Description("PNG format")]
            [GPEnumValue("PNG")]
            _PNG,

            /// <summary>
            /// <para>JPEG format</para>
            /// <para>JPEG format—JPEG format will be used.</para>
            /// <para>JPEG 格式—将使用 JPEG 格式。</para>
            /// </summary>
            [Description("JPEG format")]
            [GPEnumValue("JPEG")]
            _JPEG,

        }

        /// <summary>
        /// <para>Tile Size X</para>
        /// <para>The size of the image chips for the x dimension.</para>
        /// <para>x 维度的图像芯片的大小。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Tile Size X")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _tile_size_x { get; set; } = 256;


        /// <summary>
        /// <para>Tile Size Y</para>
        /// <para>The size of the image chips for the y dimension.</para>
        /// <para>y 维的图像切片的大小。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Tile Size Y")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _tile_size_y { get; set; } = 256;


        /// <summary>
        /// <para>Stride X</para>
        /// <para><xdoc>
        ///   <para>The distance to move in the x direction when creating the next image chips.</para>
        ///   <para>When stride is equal to tile size, there will be no overlap. When stride is equal to half the tile size, there will be 50 percent overlap.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>创建下一个图像芯片时沿 x 方向移动的距离。</para>
        ///   <para>当步幅等于图块大小时，不会有重叠。当步幅等于图块大小的一半时，将有 50% 的重叠。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Stride X")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _stride_x { get; set; } = 128;


        /// <summary>
        /// <para>Stride Y</para>
        /// <para><xdoc>
        ///   <para>The distance to move in the y direction when creating the next image chips.</para>
        ///   <para>When stride is equal to tile size, there will be no overlap. When stride is equal to half the tile size, there will be 50 percent overlap.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>创建下一个图像芯片时沿 y 方向移动的距离。</para>
        ///   <para>当步幅等于图块大小时，不会有重叠。当步幅等于图块大小的一半时，将有 50% 的重叠。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Stride Y")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _stride_y { get; set; } = 128;


        /// <summary>
        /// <para>Output No Feature Tiles</para>
        /// <para><xdoc>
        ///   <para>Specifies whether image chips that do not capture training samples will be exported.</para>
        ///   <bulletList>
        ///     <bullet_item>Checked—All image chips, including those that do not capture training samples, will be exported.</bullet_item><para/>
        ///     <bullet_item>Unchecked—Only image chips that capture training samples will be exported. This is the default.</bullet_item><para/>
        ///   </bulletList>
        ///   <para>If checked, image chips that do not capture labeled data will also be exported; if not checked, they will not be exported.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定是否导出未捕获训练样本的图像芯片。</para>
        ///   <bulletList>
        ///     <bullet_item>选中 - 将导出所有影像芯片，包括未捕获训练样本的影像芯片。</bullet_item><para/>
        ///     <bullet_item>未选中 - 仅导出捕获训练样本的影像切片。这是默认设置。</bullet_item><para/>
        ///   </bulletList>
        ///   <para>如果选中，未捕获标记数据的图像芯片也将被导出;如果未选中，则不会导出它们。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Output No Feature Tiles")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _output_nofeature_tiles_value _output_nofeature_tiles { get; set; } = _output_nofeature_tiles_value._false;

        public enum _output_nofeature_tiles_value
        {
            /// <summary>
            /// <para>ALL_TILES</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("ALL_TILES")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>ONLY_TILES_WITH_FEATURES</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("ONLY_TILES_WITH_FEATURES")]
            [GPEnumValue("false")]
            _false,

        }

        /// <summary>
        /// <para>Metadata Format</para>
        /// <para><xdoc>
        ///   <para>Specifies the format of the output metadata labels.</para>
        ///   <para>If the input training sample data is a feature class layer, such as a building layer or a standard classification training sample file, use the KITTI Labels or PASCAL Visual Object Classes option (KITTI_rectangles or PASCAL_VOC_rectangles in Python). The output metadata is a .txt file or an .xml file containing the training sample data contained in the minimum bounding rectangle. The name of the metadata file matches the input source image name. If the input training sample data is a class map, use the Classified Tiles option (Classified_Tiles in Python) as the output metadata format.</para>
        ///   <bulletList>
        ///     <bullet_item>KITTI Labels—The metadata follows the same format as the Karlsruhe Institute of Technology and Toyota Technological Institute (KITTI) Object Detection Evaluation dataset. The KITTI dataset is a vision benchmark suite. The label files are plain text files. All values, both numerical and strings, are separated by spaces, and each row corresponds to one object.This format is used for object detection.</bullet_item><para/>
        ///     <bullet_item>PASCAL Visual Object Classes—The metadata follows the same format as the Pattern Analysis, Statistical Modeling and Computational Learning, Visual Object Classes (PASCAL_VOC) dataset. The PASCAL VOC dataset is a standardized image dataset for object class recognition. The label files are in XML format and contain information about image name, class value, and bounding boxes.This format is used for object detection. This is the default.</bullet_item><para/>
        ///     <bullet_item>Classified Tiles—The output will be one classified image chip per input image chip. No other metadata for each image chip is used. Only the statistics output has more information on the classes, such as class names, class values, and output statistics.This format is primarily used for pixel classification. This format is also used for change detection when the output is one classified image chip from two image chips.</bullet_item><para/>
        ///     <bullet_item>RCNN Masks—The output will be image chips that have a mask on the areas where the sample exists. The model generates bounding boxes and segmentation masks for each instance of an object in the image. This format is based on Feature Pyramid Network (FPN) and a ResNet101 backbone in the deep learning framework model.This format is used for object detection.</bullet_item><para/>
        ///     <bullet_item>Labeled Tiles—Each output tile will be labeled with a specific class.This format is used for object classification.</bullet_item><para/>
        ///     <bullet_item>Multi-labeled Tiles—Each output tile will be labeled with one or more classes. For example, a tile may be labeled agriculture and also cloudy.This format is used for object classification.</bullet_item><para/>
        ///     <bullet_item>Export Tiles—The output will be image chips with no label.This format is used for image translation techniques, such as Pix2Pix and Super Resolution.</bullet_item><para/>
        ///     <bullet_item>CycleGAN—The output will be image chips with no label. This format is used for image translation technique CycleGAN, which is used to train images that do not overlap.</bullet_item><para/>
        ///   </bulletList>
        ///   <para>For the KITTI metadata format, 15 columns are created, but only 5 of them are used in the tool. The first column is the class value. The next 3 columns are skipped. Columns 5 through 8 define the minimum bounding rectangle, which is composed of four image coordinate locations: left, top, right, and bottom pixels. The minimum bounding rectangle encompasses the training chip used in the deep learning classifier. The remaining columns are not used.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定输出元数据标签的格式。</para>
        ///   <para>如果输入训练样本数据是要素类图层（例如建筑图层或标准分类训练样本文件），请使用 KITTI 标注或 PASCAL 可视对象类选项（Python 中的KITTI_rectangles或PASCAL_VOC_rectangles）。输出元数据是 .txt 文件或 .xml 文件，其中包含最小边界矩形中包含的训练示例数据。元数据文件的名称与输入源图像名称匹配。如果输入训练示例数据是类映射，请使用“分类切片”选项（在 Python 中为 Classified_Tiles）作为输出元数据格式。</para>
        ///   <bulletList>
        ///     <bullet_item>KITTI 标注 - 元数据采用与卡尔斯鲁厄理工学院和丰田技术学院 （KITTI） 对象检测评估数据集相同的格式。KITTI数据集是一个视觉基准套件。标签文件是纯文本文件。所有值（包括数字值和字符串值）都用空格分隔，每行对应一个对象。此格式用于对象检测。</bullet_item><para/>
        ///     <bullet_item>PASCAL 可视对象类 - 元数据遵循与模式分析、统计建模和计算学习、可视对象类 （PASCAL_VOC） 数据集相同的格式。PASCAL VOC数据集是用于对象类别识别的标准化图像数据集。标签文件采用 XML 格式，包含有关图像名称、类值和边界框的信息。此格式用于对象检测。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>分类切片—输出将为每个输入图像芯片一个分类图像芯片。不使用每个图像芯片的其他元数据。只有统计信息输出包含有关类的详细信息，例如类名、类值和输出统计信息。此格式主要用于像素分类。当输出是来自两个图像芯片的一个分类图像芯片时，此格式也用于变化检测。</bullet_item><para/>
        ///     <bullet_item>RCNN 掩模 - 输出将是在样品存在的区域上具有掩模的图像芯片。该模型为图像中对象的每个实例生成边界框和分割蒙版。此格式基于深度学习框架模型中的特征金字塔网络 （FPN） 和 ResNet101 主干。此格式用于对象检测。</bullet_item><para/>
        ///     <bullet_item>标记的切片—每个输出切片都将标注一个特定的类。此格式用于对象分类。</bullet_item><para/>
        ///     <bullet_item>多标记切片—每个输出切片都将标注一个或多个类。例如，磁贴可能被标记为农业和多云。此格式用于对象分类。</bullet_item><para/>
        ///     <bullet_item>导出切片 - 输出将是没有标签的影像切片。此格式用于图像转换技术，例如 Pix2Pix 和超分辨率。</bullet_item><para/>
        ///     <bullet_item>CycleGAN—输出将是没有标签的图像芯片。此格式用于图像转换技术 CycleGAN，用于训练不重叠的图像。</bullet_item><para/>
        ///   </bulletList>
        ///   <para>对于 KITTI 元数据格式，将创建 15 列，但工具中仅使用其中的 5 列。第一列是类值。接下来的 3 列将被跳过。第 5 列到第 8 列定义最小边界矩形，该矩形由四个图像坐标位置组成：左、上、右和下像素。最小边界矩形包含深度学习分类器中使用的训练芯片。其余列不使用。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Metadata Format")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _metadata_format_value _metadata_format { get; set; } = _metadata_format_value._PASCAL_VOC_rectangles;

        public enum _metadata_format_value
        {
            /// <summary>
            /// <para>KITTI Labels</para>
            /// <para>KITTI Labels—The metadata follows the same format as the Karlsruhe Institute of Technology and Toyota Technological Institute (KITTI) Object Detection Evaluation dataset. The KITTI dataset is a vision benchmark suite. The label files are plain text files. All values, both numerical and strings, are separated by spaces, and each row corresponds to one object.This format is used for object detection.</para>
            /// <para>KITTI 标注 - 元数据采用与卡尔斯鲁厄理工学院和丰田技术学院 （KITTI） 对象检测评估数据集相同的格式。KITTI数据集是一个视觉基准套件。标签文件是纯文本文件。所有值（包括数字值和字符串值）都用空格分隔，每行对应一个对象。此格式用于对象检测。</para>
            /// </summary>
            [Description("KITTI Labels")]
            [GPEnumValue("KITTI_rectangles")]
            _KITTI_rectangles,

            /// <summary>
            /// <para>PASCAL Visual Object Classes</para>
            /// <para>PASCAL Visual Object Classes—The metadata follows the same format as the Pattern Analysis, Statistical Modeling and Computational Learning, Visual Object Classes (PASCAL_VOC) dataset. The PASCAL VOC dataset is a standardized image dataset for object class recognition. The label files are in XML format and contain information about image name, class value, and bounding boxes.This format is used for object detection. This is the default.</para>
            /// <para>PASCAL 可视对象类 - 元数据遵循与模式分析、统计建模和计算学习、可视对象类 （PASCAL_VOC） 数据集相同的格式。PASCAL VOC数据集是用于对象类别识别的标准化图像数据集。标签文件采用 XML 格式，包含有关图像名称、类值和边界框的信息。此格式用于对象检测。这是默认设置。</para>
            /// </summary>
            [Description("PASCAL Visual Object Classes")]
            [GPEnumValue("PASCAL_VOC_rectangles")]
            _PASCAL_VOC_rectangles,

            /// <summary>
            /// <para>Classified Tiles</para>
            /// <para>Classified Tiles—The output will be one classified image chip per input image chip. No other metadata for each image chip is used. Only the statistics output has more information on the classes, such as class names, class values, and output statistics.This format is primarily used for pixel classification. This format is also used for change detection when the output is one classified image chip from two image chips.</para>
            /// <para>分类切片—输出将为每个输入图像芯片一个分类图像芯片。不使用每个图像芯片的其他元数据。只有统计信息输出包含有关类的详细信息，例如类名、类值和输出统计信息。此格式主要用于像素分类。当输出是来自两个图像芯片的一个分类图像芯片时，此格式也用于变化检测。</para>
            /// </summary>
            [Description("Classified Tiles")]
            [GPEnumValue("Classified_Tiles")]
            _Classified_Tiles,

            /// <summary>
            /// <para>RCNN Masks</para>
            /// <para>RCNN Masks—The output will be image chips that have a mask on the areas where the sample exists. The model generates bounding boxes and segmentation masks for each instance of an object in the image. This format is based on Feature Pyramid Network (FPN) and a ResNet101 backbone in the deep learning framework model.This format is used for object detection.</para>
            /// <para>RCNN 掩模 - 输出将是在样品存在的区域上具有掩模的图像芯片。该模型为图像中对象的每个实例生成边界框和分割蒙版。此格式基于深度学习框架模型中的特征金字塔网络 （FPN） 和 ResNet101 主干。此格式用于对象检测。</para>
            /// </summary>
            [Description("RCNN Masks")]
            [GPEnumValue("RCNN_Masks")]
            _RCNN_Masks,

            /// <summary>
            /// <para>Labeled Tiles</para>
            /// <para>Labeled Tiles—Each output tile will be labeled with a specific class.This format is used for object classification.</para>
            /// <para>标记的切片—每个输出切片都将标注一个特定的类。此格式用于对象分类。</para>
            /// </summary>
            [Description("Labeled Tiles")]
            [GPEnumValue("Labeled_Tiles")]
            _Labeled_Tiles,

            /// <summary>
            /// <para>Multi-labeled Tiles</para>
            /// <para>Multi-labeled Tiles—Each output tile will be labeled with one or more classes. For example, a tile may be labeled agriculture and also cloudy.This format is used for object classification.</para>
            /// <para>多标记切片—每个输出切片都将标注一个或多个类。例如，磁贴可能被标记为农业和多云。此格式用于对象分类。</para>
            /// </summary>
            [Description("Multi-labeled Tiles")]
            [GPEnumValue("MultiLabeled_Tiles")]
            _MultiLabeled_Tiles,

            /// <summary>
            /// <para>Export Tiles</para>
            /// <para>Export Tiles—The output will be image chips with no label.This format is used for image translation techniques, such as Pix2Pix and Super Resolution.</para>
            /// <para>导出切片 - 输出将是没有标签的影像切片。此格式用于图像转换技术，例如 Pix2Pix 和超分辨率。</para>
            /// </summary>
            [Description("Export Tiles")]
            [GPEnumValue("Export_Tiles")]
            _Export_Tiles,

            /// <summary>
            /// <para>CycleGAN</para>
            /// <para>CycleGAN—The output will be image chips with no label. This format is used for image translation technique CycleGAN, which is used to train images that do not overlap.</para>
            /// <para>CycleGAN—输出将是没有标签的图像芯片。此格式用于图像转换技术 CycleGAN，用于训练不重叠的图像。</para>
            /// </summary>
            [Description("CycleGAN")]
            [GPEnumValue("CycleGAN")]
            _CycleGAN,

        }

        /// <summary>
        /// <para>Start Index</para>
        /// <para>This parameter has been deprecated.</para>
        /// <para>此参数已弃用。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Start Index")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public long _start_index { get; set; } = 0;


        /// <summary>
        /// <para>Class Value Field</para>
        /// <para>The field that contains the class values. If no field is specified, the system searches for a value or classvalue field. If the feature does not contain a class field, the system determines that all records belong to one class.</para>
        /// <para>包含类值的字段。如果未指定任何字段，系统将搜索值或类值字段。如果要素不包含类字段，则系统将确定所有记录都属于一个类。</para>
        /// <para></para>
        /// </summary>
        [DisplayName("Class Value Field")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _class_value_field { get; set; } = null;


        /// <summary>
        /// <para>Buffer Radius</para>
        /// <para><xdoc>
        ///   <para>The radius for a buffer around each training sample to delineate a training sample area. This allows you to create circular polygon training samples from points.</para>
        ///   <para>The linear unit of the Input Feature Class Or Classified Raster spatial reference is used.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>每个训练样本周围缓冲区的半径，用于描绘训练样本区域。这允许您从点创建圆形多边形训练样本。</para>
        ///   <para>使用输入要素类或分类栅格空间参考的线性单位。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Buffer Radius")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double _buffer_radius { get; set; } = 0;


        /// <summary>
        /// <para>Input Mask Polygons</para>
        /// <para><xdoc>
        ///   <para>A polygon feature class that delineates the area where image chips will be created.</para>
        ///   <para>Only image chips that fall completely within the polygons will be created.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>一个面要素类，用于描绘将创建图像切片的区域。</para>
        ///   <para>仅创建完全位于多边形内的图像切片。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Input Mask Polygons")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _in_mask_polygons { get; set; } = null;


        /// <summary>
        /// <para>Rotation Angle</para>
        /// <para><xdoc>
        ///   <para>The rotation angle that will be used to generate additional image chips.</para>
        ///   <para>An image chip will be generated with a rotation angle of 0, which means no rotation. It will then be rotated at the specified angle to create an additional image chip. The same training samples will be captured at multiple angles in multiple image chips for data augmentation.</para>
        ///   <para>The default rotation angle is 0.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>将用于生成其他图像芯片的旋转角度。</para>
        ///   <para>将生成旋转角度为 0 的图像芯片，这意味着没有旋转。然后，它将以指定的角度旋转以创建额外的图像芯片。相同的训练样本将在多个图像芯片中以多个角度捕获，以进行数据增强。</para>
        ///   <para>默认旋转角度为 0。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Rotation Angle")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public double _rotation_angle { get; set; } = 0;


        /// <summary>
        /// <para>Reference System</para>
        /// <para><xdoc>
        ///   <para>Specifies the type of reference system that will be used to interpret the input image. The reference system specified must match the reference system used to train the deep learning model.</para>
        ///   <bulletList>
        ///     <bullet_item>Map space—A map-based coordinate system will be used. This is the default.</bullet_item><para/>
        ///     <bullet_item>Pixel space—Image space will be used, with no rotation and no distortion.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定将用于解释输入图像的参考系类型。指定的参考系统必须与用于训练深度学习模型的参考系统匹配。</para>
        ///   <bulletList>
        ///     <bullet_item>地图空间—将使用基于地图的坐标系。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>像素空间—将使用图像空间，不旋转，不失真。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Reference System")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _reference_system_value _reference_system { get; set; } = _reference_system_value._MAP_SPACE;

        public enum _reference_system_value
        {
            /// <summary>
            /// <para>Map space</para>
            /// <para>Map space—A map-based coordinate system will be used. This is the default.</para>
            /// <para>地图空间—将使用基于地图的坐标系。这是默认设置。</para>
            /// </summary>
            [Description("Map space")]
            [GPEnumValue("MAP_SPACE")]
            _MAP_SPACE,

            /// <summary>
            /// <para>Pixel space</para>
            /// <para>Pixel space—Image space will be used, with no rotation and no distortion.</para>
            /// <para>像素空间—将使用图像空间，不旋转，不失真。</para>
            /// </summary>
            [Description("Pixel space")]
            [GPEnumValue("PIXEL_SPACE")]
            _PIXEL_SPACE,

        }

        /// <summary>
        /// <para>Processing Mode</para>
        /// <para><xdoc>
        ///   <para>Specifies how all raster items in a mosaic dataset or an image service will be processed. This parameter is applied when the input raster is a mosaic dataset or an image service.</para>
        ///   <bulletList>
        ///     <bullet_item>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</bullet_item><para/>
        ///     <bullet_item>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定如何处理镶嵌数据集或影像服务中的所有栅格项目。当输入栅格为镶嵌数据集或影像服务时，将应用此参数。</para>
        ///   <bulletList>
        ///     <bullet_item>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Processing Mode")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _processing_mode_value _processing_mode { get; set; } = _processing_mode_value._PROCESS_AS_MOSAICKED_IMAGE;

        public enum _processing_mode_value
        {
            /// <summary>
            /// <para>Process as mosaicked image</para>
            /// <para>Process as mosaicked image—All raster items in the mosaic dataset or image service will be mosaicked together and processed. This is the default.</para>
            /// <para>处理为镶嵌影像 - 镶嵌数据集或影像服务中的所有栅格项目都将一起镶嵌并进行处理。这是默认设置。</para>
            /// </summary>
            [Description("Process as mosaicked image")]
            [GPEnumValue("PROCESS_AS_MOSAICKED_IMAGE")]
            _PROCESS_AS_MOSAICKED_IMAGE,

            /// <summary>
            /// <para>Process all raster items separately</para>
            /// <para>Process all raster items separately—All raster items in the mosaic dataset or image service will be processed as separate images.</para>
            /// <para>单独处理所有栅格项目 - 镶嵌数据集或影像服务中的所有栅格项目都将作为单独的影像进行处理。</para>
            /// </summary>
            [Description("Process all raster items separately")]
            [GPEnumValue("PROCESS_ITEMS_SEPARATELY")]
            _PROCESS_ITEMS_SEPARATELY,

        }

        /// <summary>
        /// <para>Blacken Around Feature</para>
        /// <para><xdoc>
        ///   <para>Specifies whether the pixels around each object or feature in each image tile will be masked out.</para>
        ///   <para>This parameter only applies when the metadata format is set to Labeled Tiles and an input feature class or classified raster has been specified.</para>
        ///   <bulletList>
        ///     <bullet_item>Unchecked—Pixels surrounding objects or features will not be masked out. This is the default.</bullet_item><para/>
        ///     <bullet_item>Checked—Pixels surrounding objects or features will be masked out.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定是否遮罩每个图像切片中每个对象或要素周围的像素。</para>
        ///   <para>仅当元数据格式设置为标注切片且已指定输入要素类或分类栅格时，此参数才适用。</para>
        ///   <bulletList>
        ///     <bullet_item>未选中 - 对象或要素周围的像素不会被遮盖。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>选中 - 对象或要素周围的像素将被遮罩。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Blacken Around Feature")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _blacken_around_feature_value _blacken_around_feature { get; set; } = _blacken_around_feature_value._false;

        public enum _blacken_around_feature_value
        {
            /// <summary>
            /// <para>BLACKEN_AROUND_FEATURE</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("BLACKEN_AROUND_FEATURE")]
            [GPEnumValue("true")]
            _true,

            /// <summary>
            /// <para>NO_BLACKEN</para>
            /// <para></para>
            /// <para></para>
            /// </summary>
            [Description("NO_BLACKEN")]
            [GPEnumValue("false")]
            _false,

        }

        /// <summary>
        /// <para>Crop Mode</para>
        /// <para><xdoc>
        ///   <para>Specifies whether the exported tiles will be cropped so that they are all the same size.</para>
        ///   <para>This parameter only applies when the metadata format is set to Labeled Tiles and an input feature class or classified raster has been specified.</para>
        ///   <bulletList>
        ///     <bullet_item>Fixed size—Exported tiles will be cropped to the same size and will center on the feature. This is the default.</bullet_item><para/>
        ///     <bullet_item>Bounding box—Exported tiles will be cropped so that the bounding geometry surrounds only the feature in the tile.</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>指定是否裁剪导出的切片，使其大小相同。</para>
        ///   <para>仅当元数据格式设置为标注切片且已指定输入要素类或分类栅格时，此参数才适用。</para>
        ///   <bulletList>
        ///     <bullet_item>固定大小 - 导出的切片将被裁剪为相同的大小，并以要素为中心。这是默认设置。</bullet_item><para/>
        ///     <bullet_item>边界框 - 将裁剪导出的切片，以便边界几何仅围绕切片中的要素。</bullet_item><para/>
        ///   </bulletList>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Crop Mode")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public _crop_mode_value _crop_mode { get; set; } = _crop_mode_value._FIXED_SIZE;

        public enum _crop_mode_value
        {
            /// <summary>
            /// <para>Fixed size</para>
            /// <para>Fixed size—Exported tiles will be cropped to the same size and will center on the feature. This is the default.</para>
            /// <para>固定大小 - 导出的切片将被裁剪为相同的大小，并以要素为中心。这是默认设置。</para>
            /// </summary>
            [Description("Fixed size")]
            [GPEnumValue("FIXED_SIZE")]
            _FIXED_SIZE,

            /// <summary>
            /// <para>Bounding box</para>
            /// <para>Bounding box—Exported tiles will be cropped so that the bounding geometry surrounds only the feature in the tile.</para>
            /// <para>边界框 - 将裁剪导出的切片，以便边界几何仅围绕切片中的要素。</para>
            /// </summary>
            [Description("Bounding box")]
            [GPEnumValue("BOUNDING_BOX")]
            _BOUNDING_BOX,

        }

        /// <summary>
        /// <para>Additional Input Raster</para>
        /// <para><xdoc>
        ///   <para>An additional input imagery source for image translation methods.</para>
        ///   <para>This parameter is valid when the Metadata Format parameter is set to Classified Tiles, Export Tiles, or CycleGAN.</para>
        /// </xdoc></para>
        /// <para><xdoc>
        ///   <para>图像转换方法的附加输入影像源。</para>
        ///   <para>当元数据格式参数设置为分类切片、导出切片或 CycleGAN 时，此参数有效。</para>
        /// </xdoc></para>
        /// <para></para>
        /// </summary>
        [DisplayName("Additional Input Raster")]
        [Description("")]
        [Option(OptionTypeEnum.optional)]
        public object _in_raster2 { get; set; } = null;


        public ExportTrainingDataForDeepLearning SetEnv(object cellSize = null, object extent = null, object scratchWorkspace = null, object workspace = null)
        {
            base.SetEnv(cellSize: cellSize, extent: extent, scratchWorkspace: scratchWorkspace, workspace: workspace);
            return this;
        }

    }

}