﻿#include "function_dl.h"
#include <para.h>
extern MyPARA my_para;

HTuple hv_DLDeviceHandles;

void initHalconGpu()
{
//    QueryAvailableDlDevices("runtime", "gpu", &hv_DLDeviceHandles);
	try
	{
		QueryAvailableDlDevices("ai_accelerator_interface", "tensorrt", &hv_DLDeviceHandles);
	}
	catch (HException &HDevExpDefaultException)
	{
		QMessageBox::critical(nullptr, "", HDevExpDefaultException.ErrorMessage().Text());
		return;
	}
    

}

#pragma region

///////////////////////////////////////////////////////////////////////////////
// File generated by HDevelop for HALCON/C++ Version 24.11.1.0
// Non-ASCII strings in this file are encoded in local-8-bit encoding (cp936).
// Ensure that the interface encoding is set to locale encoding by calling
// SetHcppInterfaceStringEncodingIsUtf8(false) at the beginning of the program.
// 
// Please note that non-ASCII characters in string constants are exported
// as octal codes in order to guarantee that the strings are correctly
// created on all systems, independent on any compiler settings.
// 
// Source files with different encoding should not be mixed in one project.
///////////////////////////////////////////////////////////////////////////////
#include "HalconCpp.h"
#include "HDevThread.h"



using namespace HalconCpp;

// Procedure declarations 
// Local procedures 
// Chapter: Deep Learning / Model
// Short Description: Compute zoom factors to fit an image to a target size. 
void calculate_dl_image_zoom_factors(HTuple hv_ImageWidth, HTuple hv_ImageHeight,
	HTuple hv_TargetWidth, HTuple hv_TargetHeight, HTuple hv_DLPreprocessParam, HTuple *hv_ZoomFactorWidth,
	HTuple *hv_ZoomFactorHeight);
// Chapter: Deep Learning / Model
// Short Description: Check the content of the parameter dictionary DLPreprocessParam. 
void check_dl_preprocess_param(HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Compute 3D normals. 
void compute_normals_xyz(HObject ho_x, HObject ho_y, HObject ho_z, HObject *ho_NXImage,
	HObject *ho_NYImage, HObject *ho_NZImage, HTuple hv_Smoothing);
// Chapter: Tools / Geometry
// Short Description: Convert the parameters of rectangles with format rectangle2 to the coordinates of its 4 corner-points. 
void convert_rect2_5to8param(HTuple hv_Row, HTuple hv_Col, HTuple hv_Length1, HTuple hv_Length2,
	HTuple hv_Phi, HTuple *hv_Row1, HTuple *hv_Col1, HTuple *hv_Row2, HTuple *hv_Col2,
	HTuple *hv_Row3, HTuple *hv_Col3, HTuple *hv_Row4, HTuple *hv_Col4);
// Chapter: Tools / Geometry
// Short Description: Convert for four-sided figures the coordinates of the 4 corner-points to the parameters of format rectangle2. 
void convert_rect2_8to5param(HTuple hv_Row1, HTuple hv_Col1, HTuple hv_Row2, HTuple hv_Col2,
	HTuple hv_Row3, HTuple hv_Col3, HTuple hv_Row4, HTuple hv_Col4, HTuple hv_ForceL1LargerL2,
	HTuple *hv_Row, HTuple *hv_Col, HTuple *hv_Length1, HTuple *hv_Length2, HTuple *hv_Phi);
// Chapter: Deep Learning / Model
// Short Description: Crops a given image object based on the given domain handling. 
void crop_dl_sample_image(HObject ho_Domain, HTuple hv_DLSample, HTuple hv_Key,
	HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Filter the instance segmentation masks of a DL sample based on a given selection. 
void filter_dl_sample_instance_segmentation_masks(HTuple hv_DLSample, HTuple hv_BBoxSelectionMask);
// Chapter: OCR / Deep OCR
// Short Description: Generate ground truth characters if they don't exist and words to characters mapping. 
void gen_dl_ocr_detection_gt_chars(HTuple hv_DLSampleTargets, HTuple hv_DLSample,
	HTuple hv_ScaleWidth, HTuple hv_ScaleHeight, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping);
// Chapter: OCR / Deep OCR
// Short Description: Generate target link score map for ocr detection training. 
void gen_dl_ocr_detection_gt_link_map(HObject *ho_GtLinkMap, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSampleTargets, HTupleVector/*{eTupleVector,Dim=1}*/ hvec_WordToCharVec,
	HTuple hv_Alpha);
// Chapter: OCR / Deep OCR
// Short Description: Generate target orientation score maps for ocr detection training. 
void gen_dl_ocr_detection_gt_orientation_map(HObject *ho_GtOrientationMaps, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSample);
// Chapter: OCR / Deep OCR
// Short Description: Generate target text score map for ocr detection training. 
void gen_dl_ocr_detection_gt_score_map(HObject *ho_TargetText, HTuple hv_DLSample,
	HTuple hv_BoxCutoff, HTuple hv_RenderCutoff, HTuple hv_ImageWidth, HTuple hv_ImageHeight);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess dl samples and generate targets and weights for ocr detection training. 
void gen_dl_ocr_detection_targets(HTuple hv_DLSampleOriginal, HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Generate link score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_link_map(HObject ho_LinkMap, HObject ho_TargetWeight,
	HObject *ho_TargetWeightLink, HTuple hv_LinkZeroWeightRadius);
// Chapter: OCR / Deep OCR
// Short Description: Generate orientation score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_orientation_map(HObject ho_InitialWeight, HObject *ho_OrientationTargetWeight,
	HTuple hv_DLSample);
// Chapter: OCR / Deep OCR
// Short Description: Generate text score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_score_map(HObject *ho_TargetWeightText, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSample, HTuple hv_BoxCutoff, HTuple hv_WSWeightRenderThreshold,
	HTuple hv_Confidence);
// Chapter: Deep Learning / Model
// Short Description: Store the given images in a tuple of dictionaries DLSamples. 
void gen_dl_samples_from_images(HObject ho_Images, HTuple *hv_DLSampleBatch);
// Chapter: OCR / Deep OCR
// Short Description: Generate a word to characters mapping. 
void gen_words_chars_mapping(HTuple hv_DLSample, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping);
void inferenceDL(HObject ho_image, HObject *ho_region_segment, HTuple hv_model,
	HTuple hv_preprocess, HTuple *hv_name_segment, HTuple *hv_area_segment, HTuple *hv_row_segment,
	HTuple *hv_column_segment, HTuple *hv_ret);
void initInferenceDL (HTuple hv_RetrainedModelFileName, HTuple hv_PreprocessParamFileName,
    HTuple hv_index, HTuple *hv_Ret, HTuple *hv_DLModelHandle, HTuple *hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess 3D data for deep-learning-based training and inference. 
void preprocess_dl_model_3d_data(HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess anomaly images for evaluation and visualization of deep-learning-based anomaly detection or Global Context Anomaly Detection. 
void preprocess_dl_model_anomaly(HObject ho_AnomalyImages, HObject *ho_AnomalyImagesPreprocessed,
	HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess the provided DLSample image for augmentation purposes. 
void preprocess_dl_model_augmentation_data(HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle1' for a given sample. 
void preprocess_dl_model_bbox_rect1(HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle2' for a given sample. 
void preprocess_dl_model_bbox_rect2(HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess images for deep-learning-based training and inference. 
void preprocess_dl_model_images(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR detection models. 
void preprocess_dl_model_images_ocr_detection(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR recognition models. 
void preprocess_dl_model_images_ocr_recognition(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the instance segmentation masks for a sample given by the dictionary DLSample. 
void preprocess_dl_model_instance_masks(HObject ho_ImageRaw, HTuple hv_DLSample,
	HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Preprocess segmentation and weight images for deep-learning-based segmentation training and inference. 
void preprocess_dl_model_segmentations(HObject ho_ImagesRaw, HObject ho_Segmentations,
	HObject *ho_SegmentationsPreprocessed, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess given DLSamples according to the preprocessing parameters given in DLPreprocessParam. 
void preprocess_dl_samples(HTuple hv_DLSampleBatch, HTuple hv_DLPreprocessParam);
// Chapter: Image / Manipulation
// Short Description: Change value of ValuesToChange in Image to NewValue. 
void reassign_pixel_values(HObject ho_Image, HObject *ho_ImageOut, HTuple hv_ValuesToChange,
	HTuple hv_NewValue);
// Chapter: Deep Learning / Model
// Short Description: Remove invalid 3D pixels from a given domain. 
void remove_invalid_3d_pixels(HObject ho_ImageX, HObject ho_ImageY, HObject ho_ImageZ,
	HObject ho_Domain, HObject *ho_DomainOut, HTuple hv_InvalidPixelValue);
// Chapter: Deep Learning / Model
// Short Description: Replace legacy preprocessing parameters or values. 
void replace_legacy_preprocessing_parameters(HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Split rectangle2 into a number of rectangles. 
void split_rectangle2(HTuple hv_Row, HTuple hv_Column, HTuple hv_Phi, HTuple hv_Length1,
	HTuple hv_Length2, HTuple hv_NumSplits, HTuple *hv_SplitRow, HTuple *hv_SplitColumn,
	HTuple *hv_SplitPhi, HTuple *hv_SplitLength1Out, HTuple *hv_SplitLength2Out);

// Procedures 
// Local procedures 
// Chapter: Deep Learning / Model
// Short Description: Compute zoom factors to fit an image to a target size. 
void calculate_dl_image_zoom_factors(HTuple hv_ImageWidth, HTuple hv_ImageHeight,
	HTuple hv_TargetWidth, HTuple hv_TargetHeight, HTuple hv_DLPreprocessParam, HTuple *hv_ZoomFactorWidth,
	HTuple *hv_ZoomFactorHeight)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_ScaleWidthUnit, hv_ScaleHeightUnit;
	HTuple  hv_PreserveAspectRatio, hv_Scale, hv___Tmp_Ctrl_Dict_Init_0;

	//Calculate the unit zoom factors, which zoom the input image to 1px.
	hv_ScaleWidthUnit = 1.0 / (hv_ImageWidth.TupleReal());
	hv_ScaleHeightUnit = 1.0 / (hv_ImageHeight.TupleReal());
	//
	//Calculate the required zoom factors for the available target size.
	(*hv_ZoomFactorWidth) = hv_TargetWidth * hv_ScaleWidthUnit;
	(*hv_ZoomFactorHeight) = hv_TargetHeight * hv_ScaleHeightUnit;
	//
	//Aspect-ratio preserving zoom is supported for model type 'ocr_detection' only.
	CreateDict(&hv___Tmp_Ctrl_Dict_Init_0);
	SetDictTuple(hv___Tmp_Ctrl_Dict_Init_0, "comp", "ocr_detection");
	hv_PreserveAspectRatio = (hv_DLPreprocessParam.TupleConcat(hv___Tmp_Ctrl_Dict_Init_0)).TupleTestEqualDictItem("model_type", "comp");
	hv___Tmp_Ctrl_Dict_Init_0 = HTuple::TupleConstant("HNULL");
	//
	if (0 != hv_PreserveAspectRatio)
	{
		//
		//Use smaller scaling factor, which results in unfilled domain
		//on the respective other axis.
		hv_Scale = (*hv_ZoomFactorWidth).TupleMin2((*hv_ZoomFactorHeight));
		//Ensure that the zoom factors result in lengths of at least 1px.
		(*hv_ZoomFactorWidth) = hv_Scale.TupleMax2(hv_ScaleWidthUnit);
		(*hv_ZoomFactorHeight) = hv_Scale.TupleMax2(hv_ScaleHeightUnit);
	}
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Check the content of the parameter dictionary DLPreprocessParam. 
void check_dl_preprocess_param(HTuple hv_DLPreprocessParam)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_CheckParams, hv_KeyExists, hv_DLModelType;
	HTuple  hv_Exception, hv_SupportedModelTypes, hv_Index;
	HTuple  hv_ParamNamesGeneral, hv_ParamNamesSegmentation;
	HTuple  hv_ParamNamesDetectionOptional, hv_ParamNamesPreprocessingOptional;
	HTuple  hv_ParamNames3DGrippingPointsOptional, hv_ParamNamesAll;
	HTuple  hv_ParamNames, hv_KeysExists, hv_I, hv_Exists, hv_InputKeys;
	HTuple  hv_Key, hv_Value, hv_Indices, hv_ValidValues, hv_ValidTypes;
	HTuple  hv_V, hv_T, hv_IsInt, hv_ValidTypesListing, hv_ValidValueListing;
	HTuple  hv_EmptyStrings, hv_ImageRangeMinExists, hv_ImageRangeMaxExists;
	HTuple  hv_ImageRangeMin, hv_ImageRangeMax, hv_IndexParam;
	HTuple  hv_SetBackgroundID, hv_ClassIDsBackground, hv_Intersection;
	HTuple  hv_IgnoreClassIDs, hv_KnownClasses, hv_IgnoreClassID;
	HTuple  hv_OptionalKeysExist, hv_InstanceType, hv_IsInstanceSegmentation;
	HTuple  hv_IgnoreDirection, hv_ClassIDsNoOrientation, hv_SemTypes;

	//
	//This procedure checks a dictionary with parameters for DL preprocessing.
	//
	hv_CheckParams = 1;
	//If check_params is set to false, do not check anything.
	GetDictParam(hv_DLPreprocessParam, "key_exists", "check_params", &hv_KeyExists);
	if (0 != hv_KeyExists)
	{
		GetDictTuple(hv_DLPreprocessParam, "check_params", &hv_CheckParams);
		if (0 != (hv_CheckParams.TupleNot()))
		{
			return;
		}
	}
	//
	try
	{
		GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_DLModelType);
	}
	// catch (Exception) 
	catch (HException &HDevExpDefaultException)
	{
		HDevExpDefaultException.ToHTuple(&hv_Exception);
		throw HException(HTuple(HTuple("DLPreprocessParam needs the parameter: '") + "model_type") + "'");
	}
	//
	//Check for correct model type.
	hv_SupportedModelTypes.Clear();
	hv_SupportedModelTypes[0] = "counting";
	hv_SupportedModelTypes[1] = "3d_gripping_point_detection";
	hv_SupportedModelTypes[2] = "anomaly_detection";
	hv_SupportedModelTypes[3] = "classification";
	hv_SupportedModelTypes[4] = "detection";
	hv_SupportedModelTypes[5] = "gc_anomaly_detection";
	hv_SupportedModelTypes[6] = "multi_label_classification";
	hv_SupportedModelTypes[7] = "ocr_recognition";
	hv_SupportedModelTypes[8] = "ocr_detection";
	hv_SupportedModelTypes[9] = "segmentation";
	TupleFind(hv_SupportedModelTypes, hv_DLModelType, &hv_Index);
	if (0 != (HTuple(int(hv_Index == -1)).TupleOr(int(hv_Index == HTuple()))))
	{
		throw HException(HTuple("Only models of type '3d_gripping_point_detection', 'anomaly_detection', 'classification', 'detection', 'gc_anomaly_detection', 'multi_label_classification', 'ocr_recognition', 'ocr_detection' or 'segmentation' are supported"));
		return;
	}
	//
	//Parameter names that are required.
	//General parameters.
	hv_ParamNamesGeneral.Clear();
	hv_ParamNamesGeneral[0] = "model_type";
	hv_ParamNamesGeneral[1] = "image_width";
	hv_ParamNamesGeneral[2] = "image_height";
	hv_ParamNamesGeneral[3] = "image_num_channels";
	hv_ParamNamesGeneral[4] = "image_range_min";
	hv_ParamNamesGeneral[5] = "image_range_max";
	hv_ParamNamesGeneral[6] = "normalization_type";
	hv_ParamNamesGeneral[7] = "domain_handling";
	//Segmentation specific parameters.
	hv_ParamNamesSegmentation.Clear();
	hv_ParamNamesSegmentation[0] = "ignore_class_ids";
	hv_ParamNamesSegmentation[1] = "set_background_id";
	hv_ParamNamesSegmentation[2] = "class_ids_background";
	//Detection specific parameters.
	hv_ParamNamesDetectionOptional.Clear();
	hv_ParamNamesDetectionOptional[0] = "instance_type";
	hv_ParamNamesDetectionOptional[1] = "ignore_direction";
	hv_ParamNamesDetectionOptional[2] = "class_ids_no_orientation";
	hv_ParamNamesDetectionOptional[3] = "instance_segmentation";
	//Optional preprocessing parameters.
	hv_ParamNamesPreprocessingOptional.Clear();
	hv_ParamNamesPreprocessingOptional[0] = "mean_values_normalization";
	hv_ParamNamesPreprocessingOptional[1] = "deviation_values_normalization";
	hv_ParamNamesPreprocessingOptional[2] = "check_params";
	hv_ParamNamesPreprocessingOptional[3] = "augmentation";
	//3D Gripping Point Detection specific parameters.
	hv_ParamNames3DGrippingPointsOptional.Clear();
	hv_ParamNames3DGrippingPointsOptional[0] = "min_z";
	hv_ParamNames3DGrippingPointsOptional[1] = "max_z";
	hv_ParamNames3DGrippingPointsOptional[2] = "normal_image_width";
	hv_ParamNames3DGrippingPointsOptional[3] = "normal_image_height";
	//All parameters
	hv_ParamNamesAll.Clear();
	hv_ParamNamesAll.Append(hv_ParamNamesGeneral);
	hv_ParamNamesAll.Append(hv_ParamNamesSegmentation);
	hv_ParamNamesAll.Append(hv_ParamNamesDetectionOptional);
	hv_ParamNamesAll.Append(hv_ParamNames3DGrippingPointsOptional);
	hv_ParamNamesAll.Append(hv_ParamNamesPreprocessingOptional);
	hv_ParamNames = hv_ParamNamesGeneral;
	if (0 != (HTuple(int(hv_DLModelType == HTuple("segmentation"))).TupleOr(int(hv_DLModelType == HTuple("3d_gripping_point_detection")))))
	{
		//Extend ParamNames for models of type segmentation.
		hv_ParamNames = hv_ParamNames.TupleConcat(hv_ParamNamesSegmentation);
	}
	//
	//Check if legacy parameter exist.
	//Otherwise map it to the legal parameter.
	replace_legacy_preprocessing_parameters(hv_DLPreprocessParam);
	//
	//Check that all necessary parameters are included.
	//
	GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNames, &hv_KeysExists);
	if (0 != (int(((hv_KeysExists.TupleEqualElem(0)).TupleSum()) > 0)))
	{
		{
			HTuple end_val54 = hv_KeysExists.TupleLength();
			HTuple step_val54 = 1;
			for (hv_I = 0; hv_I.Continue(end_val54, step_val54); hv_I += step_val54)
			{
				hv_Exists = HTuple(hv_KeysExists[hv_I]);
				if (0 != (hv_Exists.TupleNot()))
				{
					throw HException(("DLPreprocessParam needs the parameter: '" + HTuple(hv_ParamNames[hv_I])) + "'");
				}
			}
		}
	}
	//
	//Check the keys provided.
	GetDictParam(hv_DLPreprocessParam, "keys", HTuple(), &hv_InputKeys);
	{
		HTuple end_val64 = (hv_InputKeys.TupleLength()) - 1;
		HTuple step_val64 = 1;
		for (hv_I = 0; hv_I.Continue(end_val64, step_val64); hv_I += step_val64)
		{
			hv_Key = HTuple(hv_InputKeys[hv_I]);
			GetDictTuple(hv_DLPreprocessParam, hv_Key, &hv_Value);
			//Check that the key is known.
			TupleFind(hv_ParamNamesAll, hv_Key, &hv_Indices);
			if (0 != (int(hv_Indices == -1)))
			{
				throw HException(("Unknown key for DLPreprocessParam: '" + HTuple(hv_InputKeys[hv_I])) + "'");
				return;
			}
			//Set expected values and types.
			hv_ValidValues = HTuple();
			hv_ValidTypes = HTuple();
			if (0 != (int(hv_Key == HTuple("normalization_type"))))
			{
				hv_ValidValues.Clear();
				hv_ValidValues[0] = "all_channels";
				hv_ValidValues[1] = "first_channel";
				hv_ValidValues[2] = "constant_values";
				hv_ValidValues[3] = "none";
			}
			else if (0 != (int(hv_Key == HTuple("domain_handling"))))
			{
				if (0 != (HTuple(int(hv_DLModelType == HTuple("anomaly_detection"))).TupleOr(int(hv_DLModelType == HTuple("gc_anomaly_detection")))))
				{
					hv_ValidValues.Clear();
					hv_ValidValues[0] = "full_domain";
					hv_ValidValues[1] = "crop_domain";
					hv_ValidValues[2] = "keep_domain";
				}
				else if (0 != (int(hv_DLModelType == HTuple("3d_gripping_point_detection"))))
				{
					hv_ValidValues.Clear();
					hv_ValidValues[0] = "full_domain";
					hv_ValidValues[1] = "crop_domain";
					hv_ValidValues[2] = "keep_domain";
				}
				else
				{
					hv_ValidValues.Clear();
					hv_ValidValues[0] = "full_domain";
					hv_ValidValues[1] = "crop_domain";
				}
			}
			else if (0 != (int(hv_Key == HTuple("model_type"))))
			{
				hv_ValidValues.Clear();
				hv_ValidValues[0] = "counting";
				hv_ValidValues[1] = "3d_gripping_point_detection";
				hv_ValidValues[2] = "anomaly_detection";
				hv_ValidValues[3] = "classification";
				hv_ValidValues[4] = "detection";
				hv_ValidValues[5] = "gc_anomaly_detection";
				hv_ValidValues[6] = "multi_label_classification";
				hv_ValidValues[7] = "ocr_recognition";
				hv_ValidValues[8] = "ocr_detection";
				hv_ValidValues[9] = "segmentation";
			}
			else if (0 != (int(hv_Key == HTuple("augmentation"))))
			{
				hv_ValidValues.Clear();
				hv_ValidValues[0] = "true";
				hv_ValidValues[1] = "false";
			}
			else if (0 != (int(hv_Key == HTuple("set_background_id"))))
			{
				hv_ValidTypes = "int";
			}
			else if (0 != (int(hv_Key == HTuple("class_ids_background"))))
			{
				hv_ValidTypes = "int";
			}
			//Check that type is valid.
			if (0 != (int((hv_ValidTypes.TupleLength()) > 0)))
			{
				{
					HTuple end_val97 = (hv_ValidTypes.TupleLength()) - 1;
					HTuple step_val97 = 1;
					for (hv_V = 0; hv_V.Continue(end_val97, step_val97); hv_V += step_val97)
					{
						hv_T = HTuple(hv_ValidTypes[hv_V]);
						if (0 != (int(hv_T == HTuple("int"))))
						{
							TupleIsInt(hv_Value, &hv_IsInt);
							if (0 != (hv_IsInt.TupleNot()))
							{
								hv_ValidTypes = ("'" + hv_ValidTypes) + "'";
								if (0 != (int((hv_ValidTypes.TupleLength()) < 2)))
								{
									hv_ValidTypesListing = hv_ValidTypes;
								}
								else
								{
									hv_ValidTypesListing = (((hv_ValidTypes.TupleSelectRange(0, HTuple(0).TupleMax2((hv_ValidTypes.TupleLength()) - 2))) + HTuple(", ")) + HTuple(hv_ValidTypes[(hv_ValidTypes.TupleLength()) - 1])).TupleSum();
								}
								throw HException(((((("The value given in the key '" + hv_Key) + "' of DLPreprocessParam is invalid. Valid types are: ") + hv_ValidTypesListing) + ". The given value was '") + hv_Value) + "'.");
								return;
							}
						}
						else
						{
							throw HException("Internal error. Unknown valid type.");
						}
					}
				}
			}
			//Check that value is valid.
			if (0 != (int((hv_ValidValues.TupleLength()) > 0)))
			{
				TupleFindFirst(hv_ValidValues, hv_Value, &hv_Index);
				if (0 != (int(hv_Index == -1)))
				{
					hv_ValidValues = ("'" + hv_ValidValues) + "'";
					if (0 != (int((hv_ValidValues.TupleLength()) < 2)))
					{
						hv_ValidValueListing = hv_ValidValues;
					}
					else
					{
						hv_EmptyStrings = HTuple((hv_ValidValues.TupleLength()) - 2, "");
						hv_ValidValueListing = (((hv_ValidValues.TupleSelectRange(0, HTuple(0).TupleMax2((hv_ValidValues.TupleLength()) - 2))) + HTuple(", ")) + (hv_EmptyStrings.TupleConcat(HTuple(hv_ValidValues[(hv_ValidValues.TupleLength()) - 1])))).TupleSum();
					}
					throw HException(((((("The value given in the key '" + hv_Key) + "' of DLPreprocessParam is invalid. Valid values are: ") + hv_ValidValueListing) + ". The given value was '") + hv_Value) + "'.");
				}
			}
		}
	}
	//
	//Check the correct setting of ImageRangeMin and ImageRangeMax.
	if (0 != (HTuple(HTuple(int(hv_DLModelType == HTuple("classification"))).TupleOr(int(hv_DLModelType == HTuple("multi_label_classification")))).TupleOr(int(hv_DLModelType == HTuple("detection")))))
	{
		//Check ImageRangeMin and ImageRangeMax.
		GetDictParam(hv_DLPreprocessParam, "key_exists", "image_range_min", &hv_ImageRangeMinExists);
		GetDictParam(hv_DLPreprocessParam, "key_exists", "image_range_max", &hv_ImageRangeMaxExists);
		//If they are present, check that they are set correctly.
		if (0 != hv_ImageRangeMinExists)
		{
			GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
			if (0 != (int(hv_ImageRangeMin != -127)))
			{
				throw HException(("For model type " + hv_DLModelType) + " ImageRangeMin has to be -127.");
			}
		}
		if (0 != hv_ImageRangeMaxExists)
		{
			GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
			if (0 != (int(hv_ImageRangeMax != 128)))
			{
				throw HException(("For model type " + hv_DLModelType) + " ImageRangeMax has to be 128.");
			}
		}
	}
	//
	//Check segmentation specific parameters.
	if (0 != (HTuple(int(hv_DLModelType == HTuple("segmentation"))).TupleOr(int(hv_DLModelType == HTuple("3d_gripping_point_detection")))))
	{
		//Check if detection specific parameters are set.
		GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesDetectionOptional,
			&hv_KeysExists);
		//If they are present, check that they are [].
		{
			HTuple end_val157 = (hv_ParamNamesDetectionOptional.TupleLength()) - 1;
			HTuple step_val157 = 1;
			for (hv_IndexParam = 0; hv_IndexParam.Continue(end_val157, step_val157); hv_IndexParam += step_val157)
			{
				if (0 != (HTuple(hv_KeysExists[hv_IndexParam])))
				{
					GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[hv_IndexParam]),
						&hv_Value);
					if (0 != (int(hv_Value != HTuple())))
					{
						throw HException(((("The preprocessing parameter '" + HTuple(hv_ParamNamesDetectionOptional[hv_IndexParam])) + "' was set to ") + hv_Value) + HTuple(" but for segmentation it should be set to [], as it is not used for this method."));
					}
				}
			}
		}
		//Check 'set_background_id'.
		GetDictTuple(hv_DLPreprocessParam, "set_background_id", &hv_SetBackgroundID);
		if (0 != (HTuple(int(hv_SetBackgroundID != HTuple())).TupleAnd(int(hv_DLModelType == HTuple("3d_gripping_point_detection")))))
		{
			throw HException(HTuple(HTuple("The preprocessing parameter '") + "set_background_id") + HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
		}
		if (0 != (int((hv_SetBackgroundID.TupleLength()) > 1)))
		{
			throw HException("Only one class_id as 'set_background_id' allowed.");
		}
		//Check 'class_ids_background'.
		GetDictTuple(hv_DLPreprocessParam, "class_ids_background", &hv_ClassIDsBackground);
		if (0 != (HTuple(int(hv_ClassIDsBackground != HTuple())).TupleAnd(int(hv_DLModelType == HTuple("3d_gripping_point_detection")))))
		{
			throw HException(HTuple(HTuple("The preprocessing parameter '") + "class_ids_background") + HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
		}
		if (0 != (HTuple(HTuple(int((hv_SetBackgroundID.TupleLength()) > 0)).TupleAnd(HTuple(int((hv_ClassIDsBackground.TupleLength()) > 0)).TupleNot())).TupleOr(HTuple(int((hv_ClassIDsBackground.TupleLength()) > 0)).TupleAnd(HTuple(int((hv_SetBackgroundID.TupleLength()) > 0)).TupleNot()))))
		{
			throw HException("Both keys 'set_background_id' and 'class_ids_background' are required.");
		}
		//Check that 'class_ids_background' and 'set_background_id' are disjoint.
		if (0 != (int((hv_SetBackgroundID.TupleLength()) > 0)))
		{
			TupleIntersection(hv_SetBackgroundID, hv_ClassIDsBackground, &hv_Intersection);
			if (0 != (hv_Intersection.TupleLength()))
			{
				throw HException("Class IDs in 'set_background_id' and 'class_ids_background' need to be disjoint.");
			}
		}
		//Check 'ignore_class_ids'.
		GetDictTuple(hv_DLPreprocessParam, "ignore_class_ids", &hv_IgnoreClassIDs);
		if (0 != (HTuple(int(hv_IgnoreClassIDs != HTuple())).TupleAnd(int(hv_DLModelType == HTuple("3d_gripping_point_detection")))))
		{
			throw HException(HTuple(HTuple("The preprocessing parameter '") + "ignore_class_ids") + HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
		}
		hv_KnownClasses.Clear();
		hv_KnownClasses.Append(hv_SetBackgroundID);
		hv_KnownClasses.Append(hv_ClassIDsBackground);
		{
			HTuple end_val194 = (hv_IgnoreClassIDs.TupleLength()) - 1;
			HTuple step_val194 = 1;
			for (hv_I = 0; hv_I.Continue(end_val194, step_val194); hv_I += step_val194)
			{
				hv_IgnoreClassID = HTuple(hv_IgnoreClassIDs[hv_I]);
				TupleFindFirst(hv_KnownClasses, hv_IgnoreClassID, &hv_Index);
				if (0 != (HTuple(int((hv_Index.TupleLength()) > 0)).TupleAnd(int(hv_Index != -1))))
				{
					throw HException("The given 'ignore_class_ids' must not be included in the 'class_ids_background' or 'set_background_id'.");
				}
			}
		}
	}
	else if (0 != (int(hv_DLModelType == HTuple("detection"))))
	{
		//Check if segmentation specific parameters are set.
		GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesSegmentation, &hv_KeysExists);
		//If they are present, check that they are [].
		{
			HTuple end_val205 = (hv_ParamNamesSegmentation.TupleLength()) - 1;
			HTuple step_val205 = 1;
			for (hv_IndexParam = 0; hv_IndexParam.Continue(end_val205, step_val205); hv_IndexParam += step_val205)
			{
				if (0 != (HTuple(hv_KeysExists[hv_IndexParam])))
				{
					GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesSegmentation[hv_IndexParam]),
						&hv_Value);
					if (0 != (int(hv_Value != HTuple())))
					{
						throw HException(((("The preprocessing parameter '" + HTuple(hv_ParamNamesSegmentation[hv_IndexParam])) + "' was set to ") + hv_Value) + HTuple(" but for detection it should be set to [], as it is not used for this method."));
					}
				}
			}
		}
		//Check optional parameters.
		GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesDetectionOptional,
			&hv_OptionalKeysExist);
		if (0 != (HTuple(hv_OptionalKeysExist[0])))
		{
			//Check 'instance_type'.
			GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[0]),
				&hv_InstanceType);
			if (0 != (int((((HTuple("rectangle1").Append("rectangle2")).Append("mask")).TupleFind(hv_InstanceType)) == -1)))
			{
				throw HException(("Invalid generic parameter for 'instance_type': " + hv_InstanceType) + HTuple(", only 'rectangle1' and 'rectangle2' are allowed"));
			}
		}
		//If instance_segmentation is set we might overwrite the instance_type for the preprocessing.
		if (0 != (HTuple(hv_OptionalKeysExist[3])))
		{
			GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[3]),
				&hv_IsInstanceSegmentation);
			if (0 != (int(((((HTuple(1).Append(0)).Append("true")).Append("false")).TupleFind(hv_IsInstanceSegmentation)) == -1)))
			{
				throw HException(("Invalid generic parameter for 'instance_segmentation': " + hv_IsInstanceSegmentation) + HTuple(", only true, false, 'true' and 'false' are allowed"));
			}
		}
		if (0 != (HTuple(hv_OptionalKeysExist[1])))
		{
			//Check 'ignore_direction'.
			GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[1]),
				&hv_IgnoreDirection);
			if (0 != (int(((HTuple(1).Append(0)).TupleFind(hv_IgnoreDirection)) == -1)))
			{
				throw HException(("Invalid generic parameter for 'ignore_direction': " + hv_IgnoreDirection) + HTuple(", only true and false are allowed"));
			}
		}
		if (0 != (HTuple(hv_OptionalKeysExist[2])))
		{
			//Check 'class_ids_no_orientation'.
			GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[2]),
				&hv_ClassIDsNoOrientation);
			TupleSemTypeElem(hv_ClassIDsNoOrientation, &hv_SemTypes);
			if (0 != (HTuple(int(hv_ClassIDsNoOrientation != HTuple())).TupleAnd(int(((hv_SemTypes.TupleEqualElem("integer")).TupleSum()) != (hv_ClassIDsNoOrientation.TupleLength())))))
			{
				throw HException(("Invalid generic parameter for 'class_ids_no_orientation': " + hv_ClassIDsNoOrientation) + HTuple(", only integers are allowed"));
			}
			else
			{
				if (0 != (HTuple(int(hv_ClassIDsNoOrientation != HTuple())).TupleAnd(int(((hv_ClassIDsNoOrientation.TupleGreaterEqualElem(0)).TupleSum()) != (hv_ClassIDsNoOrientation.TupleLength())))))
				{
					throw HException(("Invalid generic parameter for 'class_ids_no_orientation': " + hv_ClassIDsNoOrientation) + HTuple(", only non-negative integers are allowed"));
				}
			}
		}
	}
	//
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Compute 3D normals. 
void compute_normals_xyz(HObject ho_x, HObject ho_y, HObject ho_z, HObject *ho_NXImage,
	HObject *ho_NYImage, HObject *ho_NZImage, HTuple hv_Smoothing)
{

	// Local iconic variables
	HObject  ho_xScaled, ho_yScaled, ho_zScaled, ho_xDiffRow;
	HObject  ho_xDiffCol, ho_yDiffRow, ho_yDiffCol, ho_zDiffRow;
	HObject  ho_zDiffCol, ho_ImageResult, ho_ImageResult2, ho_NXRaw;
	HObject  ho_NYRaw, ho_NZRaw, ho_NXSquare, ho_NYSquare, ho_NZSquare;
	HObject  ho_ImageResult1, ho_SqrtImage;

	// Local control variables
	HTuple  hv_Factor, hv_MaskRow, hv_MaskCol;

	//For numerical reasons we scale the input data
	hv_Factor = 1e6;
	ScaleImage(ho_x, &ho_xScaled, hv_Factor, 0);
	ScaleImage(ho_y, &ho_yScaled, hv_Factor, 0);
	ScaleImage(ho_z, &ho_zScaled, hv_Factor, 0);

	//Filter for diffs in row/col direction
	hv_MaskRow.Clear();
	hv_MaskRow[0] = 2;
	hv_MaskRow[1] = 1;
	hv_MaskRow[2] = 1.0;
	hv_MaskRow[3] = 1;
	hv_MaskRow[4] = -1;
	hv_MaskCol.Clear();
	hv_MaskCol[0] = 1;
	hv_MaskCol[1] = 2;
	hv_MaskCol[2] = 1.0;
	hv_MaskCol[3] = -1;
	hv_MaskCol[4] = 1;
	ConvolImage(ho_xScaled, &ho_xDiffRow, hv_MaskRow, "continued");
	ConvolImage(ho_xScaled, &ho_xDiffCol, hv_MaskCol, "continued");
	ConvolImage(ho_yScaled, &ho_yDiffRow, hv_MaskRow, "continued");
	ConvolImage(ho_yScaled, &ho_yDiffCol, hv_MaskCol, "continued");
	ConvolImage(ho_zScaled, &ho_zDiffRow, hv_MaskRow, "continued");
	ConvolImage(ho_zScaled, &ho_zDiffCol, hv_MaskCol, "continued");
	//
	//Calculate normal as cross product
	MultImage(ho_yDiffRow, ho_zDiffCol, &ho_ImageResult, 1.0, 0);
	MultImage(ho_zDiffRow, ho_yDiffCol, &ho_ImageResult2, -1.0, 0);
	AddImage(ho_ImageResult, ho_ImageResult2, &ho_NXRaw, 1.0, 0);
	//
	MultImage(ho_xDiffRow, ho_zDiffCol, &ho_ImageResult, -1.0, 0);
	MultImage(ho_zDiffRow, ho_xDiffCol, &ho_ImageResult2, 1.0, 0);
	AddImage(ho_ImageResult, ho_ImageResult2, &ho_NYRaw, 1.0, 0);
	//
	MultImage(ho_xDiffRow, ho_yDiffCol, &ho_ImageResult, 1.0, 0);
	MultImage(ho_yDiffRow, ho_xDiffCol, &ho_ImageResult2, -1.0, 0);
	AddImage(ho_ImageResult, ho_ImageResult2, &ho_NZRaw, 1.0, 0);

	//Smooth
	//-> 5 is used as it is used in surface_normals_object_model_3d - 'xyz_mapping'
	if (0 != hv_Smoothing)
	{
		MeanImage(ho_NXRaw, &ho_NXRaw, 5, 5);
		MeanImage(ho_NYRaw, &ho_NYRaw, 5, 5);
		MeanImage(ho_NZRaw, &ho_NZRaw, 5, 5);
	}

	//Normalize
	MultImage(ho_NXRaw, ho_NXRaw, &ho_NXSquare, 1.0, 0);
	MultImage(ho_NYRaw, ho_NYRaw, &ho_NYSquare, 1.0, 0);
	MultImage(ho_NZRaw, ho_NZRaw, &ho_NZSquare, 1.0, 0);
	AddImage(ho_NXSquare, ho_NYSquare, &ho_ImageResult1, 1.0, 0);
	AddImage(ho_ImageResult1, ho_NZSquare, &ho_ImageResult2, 1.0, 0);
	SqrtImage(ho_ImageResult2, &ho_SqrtImage);
	//
	DivImage(ho_NXRaw, ho_SqrtImage, &(*ho_NXImage), 1.0, 0);
	DivImage(ho_NYRaw, ho_SqrtImage, &(*ho_NYImage), 1.0, 0);
	DivImage(ho_NZRaw, ho_SqrtImage, &(*ho_NZImage), 1.0, 0);
	return;
}

// Chapter: Tools / Geometry
// Short Description: Convert the parameters of rectangles with format rectangle2 to the coordinates of its 4 corner-points. 
void convert_rect2_5to8param(HTuple hv_Row, HTuple hv_Col, HTuple hv_Length1, HTuple hv_Length2,
	HTuple hv_Phi, HTuple *hv_Row1, HTuple *hv_Col1, HTuple *hv_Row2, HTuple *hv_Col2,
	HTuple *hv_Row3, HTuple *hv_Col3, HTuple *hv_Row4, HTuple *hv_Col4)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_Co1, hv_Co2, hv_Si1, hv_Si2;

	//This procedure takes the parameters for a rectangle of type 'rectangle2'
	//and returns the coordinates of the four corners.
	//
	hv_Co1 = (hv_Phi.TupleCos())*hv_Length1;
	hv_Co2 = (hv_Phi.TupleCos())*hv_Length2;
	hv_Si1 = (hv_Phi.TupleSin())*hv_Length1;
	hv_Si2 = (hv_Phi.TupleSin())*hv_Length2;

	(*hv_Col1) = (hv_Co1 - hv_Si2) + hv_Col;
	(*hv_Row1) = ((-hv_Si1) - hv_Co2) + hv_Row;
	(*hv_Col2) = ((-hv_Co1) - hv_Si2) + hv_Col;
	(*hv_Row2) = (hv_Si1 - hv_Co2) + hv_Row;
	(*hv_Col3) = ((-hv_Co1) + hv_Si2) + hv_Col;
	(*hv_Row3) = (hv_Si1 + hv_Co2) + hv_Row;
	(*hv_Col4) = (hv_Co1 + hv_Si2) + hv_Col;
	(*hv_Row4) = ((-hv_Si1) + hv_Co2) + hv_Row;

	return;
}

// Chapter: Tools / Geometry
// Short Description: Convert for four-sided figures the coordinates of the 4 corner-points to the parameters of format rectangle2. 
void convert_rect2_8to5param(HTuple hv_Row1, HTuple hv_Col1, HTuple hv_Row2, HTuple hv_Col2,
	HTuple hv_Row3, HTuple hv_Col3, HTuple hv_Row4, HTuple hv_Col4, HTuple hv_ForceL1LargerL2,
	HTuple *hv_Row, HTuple *hv_Col, HTuple *hv_Length1, HTuple *hv_Length2, HTuple *hv_Phi)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_Hor, hv_Vert, hv_IdxSwap, hv_Tmp;

	//This procedure takes the corners of four-sided figures
	//and returns the parameters of type 'rectangle2'.
	//
	//Calculate center row and column.
	(*hv_Row) = (((hv_Row1 + hv_Row2) + hv_Row3) + hv_Row4) / 4.0;
	(*hv_Col) = (((hv_Col1 + hv_Col2) + hv_Col3) + hv_Col4) / 4.0;
	//Length1 and Length2.
	(*hv_Length1) = ((((hv_Row1 - hv_Row2)*(hv_Row1 - hv_Row2)) + ((hv_Col1 - hv_Col2)*(hv_Col1 - hv_Col2))).TupleSqrt()) / 2.0;
	(*hv_Length2) = ((((hv_Row2 - hv_Row3)*(hv_Row2 - hv_Row3)) + ((hv_Col2 - hv_Col3)*(hv_Col2 - hv_Col3))).TupleSqrt()) / 2.0;
	//Calculate the angle phi.
	hv_Hor = hv_Col1 - hv_Col2;
	hv_Vert = hv_Row2 - hv_Row1;
	if (0 != hv_ForceL1LargerL2)
	{
		//Swap length1 and length2 if necessary.
		hv_IdxSwap = (((*hv_Length2) - (*hv_Length1)).TupleGreaterElem(1e-9)).TupleFind(1);
		if (0 != (int(hv_IdxSwap != -1)))
		{
			hv_Tmp = HTuple((*hv_Length1)[hv_IdxSwap]);
			(*hv_Length1)[hv_IdxSwap] = HTuple((*hv_Length2)[hv_IdxSwap]);
			(*hv_Length2)[hv_IdxSwap] = hv_Tmp;
			hv_Hor[hv_IdxSwap] = HTuple(hv_Col2[hv_IdxSwap]) - HTuple(hv_Col3[hv_IdxSwap]);
			hv_Vert[hv_IdxSwap] = HTuple(hv_Row3[hv_IdxSwap]) - HTuple(hv_Row2[hv_IdxSwap]);
		}
	}
	(*hv_Phi) = hv_Vert.TupleAtan2(hv_Hor);
	//
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Crops a given image object based on the given domain handling. 
void crop_dl_sample_image(HObject ho_Domain, HTuple hv_DLSample, HTuple hv_Key,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho___Tmp_Obj_0;

	// Local control variables
	HTuple  hv_KeyExists, hv_Row1, hv_Column1, hv_Row2;
	HTuple  hv_Column2, hv___Tmp_Ctrl_Dict_Init_0;

	GetDictParam(hv_DLSample, "key_exists", hv_Key, &hv_KeyExists);
	if (0 != hv_KeyExists)
	{
		CreateDict(&hv___Tmp_Ctrl_Dict_Init_0);
		SetDictTuple(hv___Tmp_Ctrl_Dict_Init_0, "comp", "crop_domain");
		if (0 != ((hv_DLPreprocessParam.TupleConcat(hv___Tmp_Ctrl_Dict_Init_0)).TupleTestEqualDictItem("domain_handling", "comp")))
		{
			SmallestRectangle1(ho_Domain, &hv_Row1, &hv_Column1, &hv_Row2, &hv_Column2);
			CropPart(hv_DLSample.TupleGetDictObject(hv_Key), &ho___Tmp_Obj_0, hv_Row1,
				hv_Column1, (hv_Column2 - hv_Column1) + 1, (hv_Row2 - hv_Row1) + 1);
			SetDictObject(ho___Tmp_Obj_0, hv_DLSample, hv_Key);
		}
		hv___Tmp_Ctrl_Dict_Init_0 = HTuple::TupleConstant("HNULL");
	}
	return;
}

// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Filter the instance segmentation masks of a DL sample based on a given selection. 
void filter_dl_sample_instance_segmentation_masks(HTuple hv_DLSample, HTuple hv_BBoxSelectionMask)
{

	// Local iconic variables
	HObject  ho_EmptyMasks, ho_Masks;

	// Local control variables
	HTuple  hv_MaskKeyExists, hv_Indices;

	GetDictParam(hv_DLSample, "key_exists", "mask", &hv_MaskKeyExists);
	if (0 != hv_MaskKeyExists)
	{
		//Only if masks exist (-> instance segmentation).
		TupleFind(hv_BBoxSelectionMask, 1, &hv_Indices);
		if (0 != (int(hv_Indices == -1)))
		{
			//We define here that this case will result in an empty object value
			//for the mask key. Another option would be to remove the
			//key 'mask'. However, this would be an unwanted big change in the dictionary.
			GenEmptyObj(&ho_EmptyMasks);
			SetDictObject(ho_EmptyMasks, hv_DLSample, "mask");
		}
		else
		{
			GetDictObject(&ho_Masks, hv_DLSample, "mask");
			//Remove all unused masks.
			SelectObj(ho_Masks, &ho_Masks, hv_Indices + 1);
			SetDictObject(ho_Masks, hv_DLSample, "mask");
		}
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate ground truth characters if they don't exist and words to characters mapping. 
void gen_dl_ocr_detection_gt_chars(HTuple hv_DLSampleTargets, HTuple hv_DLSample,
	HTuple hv_ScaleWidth, HTuple hv_ScaleHeight, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_CharBoxIndex, hv_WordLengths, hv_J;
	HTuple  hv_Start, hv_End, hv_SplitRow, hv_SplitColumn, hv_SplitPhi;
	HTuple  hv_SplitLength1, hv_SplitLength2, hv_CharsIds, hv_EmptyWordStrings;

	(*hvec_WordsCharsMapping)[0] = HTupleVector(HTuple());
	if (0 != (int(((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) > 0)))
	{
		//Check if chars GT exist otherwise generate them.
		TupleFindFirst(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 1, &hv_CharBoxIndex);
		if (0 != (int(hv_CharBoxIndex == -1)))
		{
			hv_WordLengths = (hv_DLSample.TupleGetDictTuple("word")).TupleStrlen();
			(*hvec_WordsCharsMapping)[((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1] = HTupleVector(HTuple());
			{
				HTuple end_val7 = ((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1;
				HTuple step_val7 = 1;
				for (hv_J = 0; hv_J.Continue(end_val7, step_val7); hv_J += step_val7)
				{
					//For each word box
					if (0 != (int(HTuple((hv_DLSample.TupleGetDictTuple("bbox_label_id"))[hv_J]) == 0)))
					{
						if (0 != (int(HTuple(hv_WordLengths[hv_J]) != 0)))
						{
							hv_Start = (hv_DLSampleTargets.TupleGetDictTuple("bbox_label_id")).TupleLength();
							hv_End = (((hv_DLSampleTargets.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1) + HTuple(hv_WordLengths[hv_J]);
							(*hvec_WordsCharsMapping)[hv_J] = HTupleVector(HTuple::TupleGenSequence(hv_Start, hv_End, 1));
							split_rectangle2(HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_J]),
								HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_J]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_J]),
								HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_J]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_J]),
								HTuple(hv_WordLengths[hv_J]), &hv_SplitRow, &hv_SplitColumn, &hv_SplitPhi,
								&hv_SplitLength1, &hv_SplitLength2);
							TupleGenConst(HTuple(hv_WordLengths[hv_J]), 1, &hv_CharsIds);
							TupleGenConst(HTuple(hv_WordLengths[hv_J]), "", &hv_EmptyWordStrings);
							SetDictTuple(hv_DLSampleTargets, "bbox_label_id", (hv_DLSampleTargets.TupleGetDictTuple("bbox_label_id")).TupleConcat(hv_CharsIds));
							SetDictTuple(hv_DLSampleTargets, "bbox_row", (hv_DLSampleTargets.TupleGetDictTuple("bbox_row")).TupleConcat(hv_SplitRow));
							SetDictTuple(hv_DLSampleTargets, "bbox_col", (hv_DLSampleTargets.TupleGetDictTuple("bbox_col")).TupleConcat(hv_SplitColumn));
							SetDictTuple(hv_DLSampleTargets, "bbox_phi", (hv_DLSampleTargets.TupleGetDictTuple("bbox_phi")).TupleConcat(hv_SplitPhi));
							SetDictTuple(hv_DLSampleTargets, "bbox_length1", (hv_DLSampleTargets.TupleGetDictTuple("bbox_length1")).TupleConcat(hv_SplitLength1*hv_ScaleWidth));
							SetDictTuple(hv_DLSampleTargets, "bbox_length2", (hv_DLSampleTargets.TupleGetDictTuple("bbox_length2")).TupleConcat(hv_SplitLength2*hv_ScaleHeight));
							SetDictTuple(hv_DLSampleTargets, "word", (hv_DLSampleTargets.TupleGetDictTuple("word")).TupleConcat(hv_EmptyWordStrings));
						}
						else
						{
							throw HException(((("Sample with image id " + (hv_DLSample.TupleGetDictTuple("image_id"))) + " is not valid. The word bounding box at index ") + hv_J) + " has an empty string as the ground truth. This is not allowed. Please assign a word label to every word bounding box.");
						}
					}
				}
			}
		}
		else
		{
			gen_words_chars_mapping(hv_DLSample, &(*hvec_WordsCharsMapping));
		}
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate target link score map for ocr detection training. 
void gen_dl_ocr_detection_gt_link_map(HObject *ho_GtLinkMap, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSampleTargets, HTupleVector/*{eTupleVector,Dim=1}*/ hvec_WordToCharVec,
	HTuple hv_Alpha)
{

	// Local iconic variables
	HObject  ho_Lines, ho_Line, ho_LineDilated;

	// Local control variables
	HTuple  hv_InitImage, hv_CRow, hv_CCol, hv_DiameterC;
	HTuple  hv_IndexW, hv_CharBoxIndices, hv_CharCRows, hv_CharCCols;
	HTuple  hv_CharDistToWordCenter, hv_ExtremeCharIndex, hv_DistToExtreme;
	HTuple  hv_CharIndexSorted, hv_Box1Idx, hv_Box2Idx, hv_Diameter1;
	HTuple  hv_Diameter2, hv_DilationRadius, hv_NumLines, hv_Index;

	GenImageConst(&(*ho_GtLinkMap), "real", hv_ImageWidth, hv_ImageHeight);
	GetSystem("init_new_image", &hv_InitImage);
	if (0 != (int(hv_InitImage == HTuple("false"))))
	{
		OverpaintRegion((*ho_GtLinkMap), (*ho_GtLinkMap), 0.0, "fill");
	}
	//Compute box centers.
	hv_CRow = hv_DLSampleTargets.TupleGetDictTuple("bbox_row");
	hv_CCol = hv_DLSampleTargets.TupleGetDictTuple("bbox_col");
	hv_DiameterC = 2 * ((hv_DLSampleTargets.TupleGetDictTuple("bbox_length1")).TupleHypot(hv_DLSampleTargets.TupleGetDictTuple("bbox_length2")));
	//Loop over word boxes.
	{
		HTuple end_val10 = ((hv_DLSampleTargets.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1;
		HTuple step_val10 = 1;
		for (hv_IndexW = 0; hv_IndexW.Continue(end_val10, step_val10); hv_IndexW += step_val10)
		{
			//For each word box
			if (0 != (int(HTuple((hv_DLSampleTargets.TupleGetDictTuple("bbox_label_id"))[hv_IndexW]) == 0)))
			{
				hv_CharBoxIndices = hvec_WordToCharVec[hv_IndexW].T();
				if (0 != (int((hv_CharBoxIndices.TupleLength()) == 0)))
				{
					continue;
				}
				else if (0 != (int((hv_CharBoxIndices.TupleLength()) == 1)))
				{
					//Generate a dot in the char center.
					GenCircle(&ho_Lines, HTuple(hv_CRow[hv_CharBoxIndices]), HTuple(hv_CCol[hv_CharBoxIndices]),
						(((0.5*hv_Alpha)*HTuple(hv_DiameterC[hv_CharBoxIndices])).TupleRound()) + 0.5);
				}
				else
				{
					//Generate link lines between chars.
					hv_CharCRows = HTuple(hv_CRow[hv_CharBoxIndices]);
					hv_CharCCols = HTuple(hv_CCol[hv_CharBoxIndices]);
					//Sort the char boxes within the word.
					hv_CharDistToWordCenter = (hv_CharCRows - HTuple(hv_CRow[hv_IndexW])).TupleHypot(hv_CharCCols - HTuple(hv_CCol[hv_IndexW]));
					hv_ExtremeCharIndex = ((const HTuple&)HTuple(hv_CharDistToWordCenter.TupleSortIndex()))[(hv_CharDistToWordCenter.TupleLength()) - 1];
					hv_DistToExtreme = (hv_CharCRows - HTuple(hv_CharCRows[hv_ExtremeCharIndex])).TupleHypot(hv_CharCCols - HTuple(hv_CharCCols[hv_ExtremeCharIndex]));
					hv_CharIndexSorted = hv_DistToExtreme.TupleSortIndex();
					//Get the indices of adjacent characters.
					hv_Box1Idx = hv_CharIndexSorted.TupleSelectRange(0, (hv_CharIndexSorted.TupleLength()) - 2);
					hv_Box2Idx = hv_CharIndexSorted.TupleSelectRange(1, (hv_CharIndexSorted.TupleLength()) - 1);
					//Generate link lines between each pair of adjacent characters.
					GenRegionLine(&ho_Lines, HTuple(hv_CharCRows[hv_Box1Idx]), HTuple(hv_CharCCols[hv_Box1Idx]),
						HTuple(hv_CharCRows[hv_Box2Idx]), HTuple(hv_CharCCols[hv_Box2Idx]));
					//Dilate the lines by 0.5/1.5/2.5/... pixels, such that the line thickness is approximately Alpha*mean(D1, D2)
					hv_Diameter1 = HTuple(hv_DiameterC[HTuple(hv_CharBoxIndices[hv_Box1Idx])]);
					hv_Diameter2 = HTuple(hv_DiameterC[HTuple(hv_CharBoxIndices[hv_Box2Idx])]);
					hv_DilationRadius = (((0.25*hv_Alpha)*(hv_Diameter1 + hv_Diameter2)).TupleRound()) + 0.5;
					//dilation_circle only accepts a single radius, so we need to loop over the lines.
					CountObj(ho_Lines, &hv_NumLines);
					{
						HTuple end_val39 = hv_NumLines;
						HTuple step_val39 = 1;
						for (hv_Index = 1; hv_Index.Continue(end_val39, step_val39); hv_Index += step_val39)
						{
							SelectObj(ho_Lines, &ho_Line, hv_Index);
							DilationCircle(ho_Line, &ho_LineDilated, HTuple(hv_DilationRadius[hv_Index - 1]));
							ReplaceObj(ho_Lines, ho_LineDilated, &ho_Lines, hv_Index);
						}
					}
				}
				OverpaintRegion((*ho_GtLinkMap), ho_Lines, 1.0, "fill");
			}
		}
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate target orientation score maps for ocr detection training. 
void gen_dl_ocr_detection_gt_orientation_map(HObject *ho_GtOrientationMaps, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSample)
{

	// Local iconic variables
	HObject  ho_GtOrientationSin, ho_GtOrientationCos;
	HObject  ho_Region;

	// Local control variables
	HTuple  hv_InitImage, hv_Indices, hv_Phi;

	GenImageConst(&ho_GtOrientationSin, "real", hv_ImageWidth, hv_ImageHeight);
	GenImageConst(&ho_GtOrientationCos, "real", hv_ImageWidth, hv_ImageHeight);
	GetSystem("init_new_image", &hv_InitImage);
	if (0 != (int(hv_InitImage == HTuple("false"))))
	{
		OverpaintRegion(ho_GtOrientationSin, ho_GtOrientationSin, 0.0, "fill");
		OverpaintRegion(ho_GtOrientationCos, ho_GtOrientationCos, 0.0, "fill");
	}
	if (0 != (int(((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) > 0)))
	{
		//Process char boxes
		TupleFind(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 1, &hv_Indices);
		if (0 != (int(hv_Indices != -1)))
		{
			hv_Phi = hv_DLSample.TupleGetDictTuple("bbox_phi");
			GenRectangle2(&ho_Region, HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_Indices]));
			OverpaintRegion(ho_GtOrientationSin, ho_Region, HTuple(hv_Phi[hv_Indices]).TupleSin(),
				"fill");
			OverpaintRegion(ho_GtOrientationCos, ho_Region, HTuple(hv_Phi[hv_Indices]).TupleCos(),
				"fill");
		}
	}
	Compose2(ho_GtOrientationSin, ho_GtOrientationCos, &(*ho_GtOrientationMaps));
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate target text score map for ocr detection training. 
void gen_dl_ocr_detection_gt_score_map(HObject *ho_TargetText, HTuple hv_DLSample,
	HTuple hv_BoxCutoff, HTuple hv_RenderCutoff, HTuple hv_ImageWidth, HTuple hv_ImageHeight)
{

	// Local iconic variables
	HObject  ho_ExtendedRectangle;

	// Local control variables
	HTuple  hv_InitImage, hv_Index, hv_Sigma1, hv_Sigma2;
	HTuple  hv_ExtendedLength1, hv_ExtendedLength2, hv_Rows;
	HTuple  hv_Columns, hv_Area, hv_Row, hv_Column, hv_HomMat2D;
	HTuple  hv_DistRow, hv_DistCol, hv_ScaledGaussian, hv_Grayval;

	GenImageConst(&(*ho_TargetText), "real", hv_ImageWidth, hv_ImageHeight);
	GetSystem("init_new_image", &hv_InitImage);
	if (0 != (int(hv_InitImage == HTuple("false"))))
	{
		OverpaintRegion((*ho_TargetText), (*ho_TargetText), 0.0, "fill");
	}
	{
		HTuple end_val5 = ((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1;
		HTuple step_val5 = 1;
		for (hv_Index = 0; hv_Index.Continue(end_val5, step_val5); hv_Index += step_val5)
		{
			//For each char box
			if (0 != (HTuple(int(HTuple((hv_DLSample.TupleGetDictTuple("bbox_label_id"))[hv_Index]) == 1)).TupleAnd(int(hv_BoxCutoff != 0))))
			{
				//Compute the sigma of an unnormalized normal distribution, such that
				//a certain threshold value is reached at the interval of a certain size.
				hv_Sigma1 = HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_Index])*((-0.5 / (hv_BoxCutoff.TupleLog())).TupleSqrt());
				hv_Sigma2 = HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_Index])*((-0.5 / (hv_BoxCutoff.TupleLog())).TupleSqrt());
				if (0 != (HTuple(HTuple(int(hv_Sigma1 != 0)).TupleAnd(int(hv_Sigma2 != 0))).TupleAnd(int(hv_RenderCutoff != 0))))
				{
					//Compute the radius of an unnormalized normal distribution,
					//where a certain threshold value is reached at the end.
					hv_ExtendedLength1 = hv_Sigma1 * ((-2 * (hv_RenderCutoff.TupleLog())).TupleSqrt());
					hv_ExtendedLength2 = hv_Sigma2 * ((-2 * (hv_RenderCutoff.TupleLog())).TupleSqrt());
					GenRectangle2(&ho_ExtendedRectangle, HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_Index]),
						HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_Index]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_Index]),
						hv_ExtendedLength1, hv_ExtendedLength2);
					ClipRegion(ho_ExtendedRectangle, &ho_ExtendedRectangle, 0, 0, hv_ImageHeight - 1,
						hv_ImageWidth - 1);
					GetRegionPoints(ho_ExtendedRectangle, &hv_Rows, &hv_Columns);
					//Verify that the bounding box has an area to plot a gaussian
					AreaCenter(ho_ExtendedRectangle, &hv_Area, &hv_Row, &hv_Column);
					if (0 != (int(hv_Area > 1)))
					{
						HomMat2dIdentity(&hv_HomMat2D);
						HomMat2dTranslate(hv_HomMat2D, -HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_Index]),
							-HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_Index]), &hv_HomMat2D);
						HomMat2dRotate(hv_HomMat2D, -HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_Index]),
							0, 0, &hv_HomMat2D);
						AffineTransPoint2d(hv_HomMat2D, hv_Rows, hv_Columns, &hv_DistRow, &hv_DistCol);
						hv_ScaledGaussian = (-0.5*(((hv_DistCol*hv_DistCol) / (hv_Sigma1*hv_Sigma1)) + ((hv_DistRow*hv_DistRow) / (hv_Sigma2*hv_Sigma2)))).TupleExp();
						GetGrayval((*ho_TargetText), hv_Rows, hv_Columns, &hv_Grayval);
						SetGrayval((*ho_TargetText), hv_Rows, hv_Columns, hv_ScaledGaussian.TupleMax2(hv_Grayval));
					}
				}
			}
		}
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Preprocess dl samples and generate targets and weights for ocr detection training. 
void gen_dl_ocr_detection_targets(HTuple hv_DLSampleOriginal, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_TargetText, ho_TargetLink, ho_TargetOrientation;
	HObject  ho_TargetWeightText, ho_TargetWeightLink, ho_WeightedCharScore;
	HObject  ho_TargetWeightOrientation, ho_OriginalDomain, ho_Image;
	HObject  ho_DomainWeight, ho_Domain, ho_TargetOrientationOut;
	HObject  ho_TargetWeightOrientationOut, ho_TargetOrientationChannel;
	HObject  ho_TargetWeightOrientationChannel;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_Stride;
	HTuple  hv_ScaleHeight, hv_ScaleWidth, hv_BoxCutoff, hv_RenderCutoff;
	HTuple  hv_Alpha, hv_WSWeightRenderThreshold, hv_LinkZeroWeightRadius;
	HTuple  hv_Confidence, hv_ScoreMapsWidth, hv_ScoreMapsHeight;
	HTuple  hv_DLSample, hv_HomMat2DIdentity, hv_HomMat2DScale;
	HTuple  hv_DLSampleTargets, hv_OriginalDomainArea, hv__;
	HTuple  hv_OriginalWidth, hv_OriginalHeight, hv_IsOriginalDomainFull;
	HTuple  hv_ChannelIdx, hv___Tmp_Ctrl_0, hv___Tmp_Ctrl_1;
	HTupleVector  hvec_WordsCharsMapping(1);

	check_dl_preprocess_param(hv_DLPreprocessParam);
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	hv_Stride = 2;
	//Parameters used in the fallback weak supervision case.
	//They make the the uniformly sized char boxes a bit smaller, as we can expect a spacing between the characters.
	hv_ScaleHeight = 0.9;
	hv_ScaleWidth = 0.8;
	//Parameters relevant to plot the gaussian blobs in the score map.
	hv_BoxCutoff = 0.3;
	hv_RenderCutoff = 0.01;
	//Parameter used to determine the dilation of lines in link map.
	hv_Alpha = 0.1;
	//Parameter used to determine the dilation radius of word boxes in the weight score map.
	hv_WSWeightRenderThreshold = 0.05;
	//Parameter represents the dilation radius of word lines in the weight link map.
	hv_LinkZeroWeightRadius = 2.5;
	//Confidence is here only a place holder for the fallback weak supervision case.
	hv_Confidence = 1.0;
	if (0 != (int(hv_Stride == 0)))
	{
		throw HException("Stride must be greater than 0.");
	}
	//Calculate the size of score maps.
	hv_ScoreMapsWidth = hv_ImageWidth / hv_Stride;
	hv_ScoreMapsHeight = hv_ImageHeight / hv_Stride;
	//Copy DLSample to maintain the original bounding boxes dimensions.
	CopyDict(hv_DLSampleOriginal, HTuple(), HTuple(), &hv_DLSample);
	//Preprocess bounding boxes to match targets dimensions.
	HomMat2dIdentity(&hv_HomMat2DIdentity);
	HomMat2dScale(hv_HomMat2DIdentity, 1.0 / hv_Stride, 1.0 / hv_Stride, 0, 0, &hv_HomMat2DScale);
	AffineTransPoint2d(hv_HomMat2DScale, hv_DLSample.TupleGetDictTuple("bbox_col"),
		hv_DLSample.TupleGetDictTuple("bbox_row"), &hv___Tmp_Ctrl_0, &hv___Tmp_Ctrl_1);
	SetDictTuple(hv_DLSample, "bbox_row", hv___Tmp_Ctrl_1);
	SetDictTuple(hv_DLSample, "bbox_col", hv___Tmp_Ctrl_0);
	SetDictTuple(hv_DLSample, "bbox_length1", (hv_DLSample.TupleGetDictTuple("bbox_length1")) / hv_Stride);
	SetDictTuple(hv_DLSample, "bbox_length2", (hv_DLSample.TupleGetDictTuple("bbox_length2")) / hv_Stride);
	CopyDict(hv_DLSample, HTuple(), HTuple(), &hv_DLSampleTargets);
	gen_dl_ocr_detection_gt_chars(hv_DLSampleTargets, hv_DLSample, hv_ScaleWidth, hv_ScaleHeight,
		&hvec_WordsCharsMapping);
	//Generate target maps from WordRegions and CharBoxes.
	gen_dl_ocr_detection_gt_score_map(&ho_TargetText, hv_DLSampleTargets, hv_BoxCutoff,
		hv_RenderCutoff, hv_ScoreMapsWidth, hv_ScoreMapsHeight);
	gen_dl_ocr_detection_gt_link_map(&ho_TargetLink, hv_ScoreMapsWidth, hv_ScoreMapsHeight,
		hv_DLSampleTargets, hvec_WordsCharsMapping, hv_Alpha);
	gen_dl_ocr_detection_gt_orientation_map(&ho_TargetOrientation, hv_ScoreMapsWidth,
		hv_ScoreMapsHeight, hv_DLSampleTargets);
	//Generate weight maps from WordRegions and CharBoxes.
	gen_dl_ocr_detection_weight_score_map(&ho_TargetWeightText, hv_ScoreMapsWidth,
		hv_ScoreMapsHeight, hv_DLSampleTargets, hv_BoxCutoff, hv_WSWeightRenderThreshold,
		hv_Confidence);
	gen_dl_ocr_detection_weight_link_map(ho_TargetLink, ho_TargetWeightText, &ho_TargetWeightLink,
		hv_LinkZeroWeightRadius);
	MultImage(ho_TargetText, ho_TargetWeightText, &ho_WeightedCharScore, 1, 0);
	gen_dl_ocr_detection_weight_orientation_map(ho_WeightedCharScore, &ho_TargetWeightOrientation,
		hv_DLSampleTargets);
	//Take account of the image domain in DLSampleOriginal.
	GetDomain(hv_DLSampleOriginal.TupleGetDictObject("image"), &ho_OriginalDomain);
	AreaCenter(ho_OriginalDomain, &hv_OriginalDomainArea, &hv__, &hv__);
	GetImageSize(hv_DLSampleOriginal.TupleGetDictObject("image"), &hv_OriginalWidth,
		&hv_OriginalHeight);
	hv_IsOriginalDomainFull = int(hv_OriginalDomainArea == (hv_OriginalWidth*hv_OriginalHeight));
	if (0 != (hv_IsOriginalDomainFull.TupleNot()))
	{
		//Calculate the domain weight.
		GenImageConst(&ho_Image, "real", hv_OriginalWidth, hv_OriginalHeight);
		ChangeDomain(ho_Image, ho_OriginalDomain, &ho_Image);
		ZoomImageSize(ho_Image, &ho_DomainWeight, hv_ScoreMapsWidth, hv_ScoreMapsHeight,
			"constant");
		GetDomain(ho_DomainWeight, &ho_Domain);
		FullDomain(ho_DomainWeight, &ho_DomainWeight);
		OverpaintRegion(ho_DomainWeight, ho_DomainWeight, 0.0, "fill");
		OverpaintRegion(ho_DomainWeight, ho_Domain, 1.0, "fill");
		//Apply the domain weight.
		MultImage(ho_DomainWeight, ho_TargetText, &ho_TargetText, 1, 0);
		MultImage(ho_DomainWeight, ho_TargetLink, &ho_TargetLink, 1, 0);
		MultImage(ho_DomainWeight, ho_TargetWeightText, &ho_TargetWeightText, 1, 0);
		MultImage(ho_DomainWeight, ho_TargetWeightLink, &ho_TargetWeightLink, 1, 0);
		GenEmptyObj(&ho_TargetOrientationOut);
		GenEmptyObj(&ho_TargetWeightOrientationOut);
		for (hv_ChannelIdx = 1; hv_ChannelIdx <= 2; hv_ChannelIdx += 1)
		{
			AccessChannel(ho_TargetOrientation, &ho_TargetOrientationChannel, hv_ChannelIdx);
			AccessChannel(ho_TargetWeightOrientation, &ho_TargetWeightOrientationChannel,
				hv_ChannelIdx);
			MultImage(ho_DomainWeight, ho_TargetOrientationChannel, &ho_TargetOrientationChannel,
				1, 0);
			MultImage(ho_DomainWeight, ho_TargetWeightOrientationChannel, &ho_TargetWeightOrientationChannel,
				1, 0);
			AppendChannel(ho_TargetOrientationOut, ho_TargetOrientationChannel, &ho_TargetOrientationOut
			);
			AppendChannel(ho_TargetWeightOrientationOut, ho_TargetWeightOrientationChannel,
				&ho_TargetWeightOrientationOut);
		}
		ho_TargetOrientation = ho_TargetOrientationOut;
		ho_TargetWeightOrientation = ho_TargetWeightOrientationOut;
	}
	//Set targets in output sample.
	SetDictObject(ho_TargetText, hv_DLSampleOriginal, "target_text");
	SetDictObject(ho_TargetLink, hv_DLSampleOriginal, "target_link");
	SetDictObject(ho_TargetOrientation, hv_DLSampleOriginal, "target_orientation");
	SetDictObject(ho_TargetWeightText, hv_DLSampleOriginal, "target_weight_text");
	SetDictObject(ho_TargetWeightLink, hv_DLSampleOriginal, "target_weight_link");
	SetDictObject(ho_TargetWeightOrientation, hv_DLSampleOriginal, "target_weight_orientation");
}

// Chapter: OCR / Deep OCR
// Short Description: Generate link score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_link_map(HObject ho_LinkMap, HObject ho_TargetWeight,
	HObject *ho_TargetWeightLink, HTuple hv_LinkZeroWeightRadius)
{

	// Local iconic variables
	HObject  ho_LinkRegion, ho_RegionDilation, ho_RegionComplement;
	HObject  ho_RegionUnion, ho_RegionBorder;

	// Local control variables
	HTuple  hv_Width, hv_Height;

	if (0 != (int(hv_LinkZeroWeightRadius > 0)))
	{
		//Set zero weight around the link regions.
		Threshold(ho_LinkMap, &ho_LinkRegion, 0.01, "max");
		DilationCircle(ho_LinkRegion, &ho_RegionDilation, hv_LinkZeroWeightRadius);
		Complement(ho_RegionDilation, &ho_RegionComplement);
		GetImageSize(ho_TargetWeight, &hv_Width, &hv_Height);
		ClipRegion(ho_RegionComplement, &ho_RegionComplement, 0, 0, hv_Height - 1, hv_Width - 1);
		Union2(ho_LinkRegion, ho_RegionComplement, &ho_RegionUnion);
		Complement(ho_RegionUnion, &ho_RegionBorder);
		PaintRegion(ho_RegionBorder, ho_TargetWeight, &(*ho_TargetWeightLink), 0, "fill");
	}
	else
	{
		//Just copy the original weight map.
		CopyObj(ho_TargetWeight, &(*ho_TargetWeightLink), 1, 1);
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate orientation score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_orientation_map(HObject ho_InitialWeight, HObject *ho_OrientationTargetWeight,
	HTuple hv_DLSample)
{

	// Local iconic variables
	HObject  ho_CharRegions, ho_CharRegion, ho_BackgroundRegion;

	// Local control variables
	HTuple  hv_Indices;

	//Inside the valid regions, the initial weight is set to the initial weight.
	CopyImage(ho_InitialWeight, &(*ho_OrientationTargetWeight));
	FullDomain((*ho_OrientationTargetWeight), &(*ho_OrientationTargetWeight));
	//Set orientation weight to 0 outside the valid regions.
	if (0 != (int(((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) > 0)))
	{
		//Process char boxes
		TupleFind(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 1, &hv_Indices);
		if (0 != (int(hv_Indices != -1)))
		{
			GenRectangle2(&ho_CharRegions, HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_Indices]));
			Union1(ho_CharRegions, &ho_CharRegion);
			Complement(ho_CharRegion, &ho_BackgroundRegion);
			OverpaintRegion((*ho_OrientationTargetWeight), ho_BackgroundRegion, 0, "fill");
		}
	}
	//We need two channels: for Sin and Cos
	Compose2((*ho_OrientationTargetWeight), (*ho_OrientationTargetWeight), &(*ho_OrientationTargetWeight)
	);
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate text score map weight for ocr detection training. 
void gen_dl_ocr_detection_weight_score_map(HObject *ho_TargetWeightText, HTuple hv_ImageWidth,
	HTuple hv_ImageHeight, HTuple hv_DLSample, HTuple hv_BoxCutoff, HTuple hv_WSWeightRenderThreshold,
	HTuple hv_Confidence)
{

	// Local iconic variables
	HObject  ho_IgnoreRegion, ho_WordRegion, ho_WordRegionDilated;

	// Local control variables
	HTuple  hv_Indices, hv_WordIndex, hv_SigmaL2;
	HTuple  hv_WordLength2Ext, hv_DilationRadius;

	GenImageConst(&(*ho_TargetWeightText), "real", hv_ImageWidth, hv_ImageHeight);
	OverpaintRegion((*ho_TargetWeightText), (*ho_TargetWeightText), 1.0, "fill");
	if (0 != (int(((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) > 0)))
	{
		//Process ignore boxes
		TupleFind(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 2, &hv_Indices);
		if (0 != (int(hv_Indices != -1)))
		{
			GenRectangle2(&ho_IgnoreRegion, HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_Indices]),
				HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_Indices]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_Indices]));
			OverpaintRegion((*ho_TargetWeightText), ho_IgnoreRegion, 0.0, "fill");
		}
		{
			HTuple end_val9 = ((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1;
			HTuple step_val9 = 1;
			for (hv_WordIndex = 0; hv_WordIndex.Continue(end_val9, step_val9); hv_WordIndex += step_val9)
			{
				//For each word box
				if (0 != (int(HTuple((hv_DLSample.TupleGetDictTuple("bbox_label_id"))[hv_WordIndex]) == 0)))
				{
					if (0 != (HTuple(HTuple(int(hv_BoxCutoff == 0)).TupleOr(int(hv_WSWeightRenderThreshold == 0))).TupleNot()))
					{
						hv_SigmaL2 = HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_WordIndex])*((-0.5 / (hv_BoxCutoff.TupleLog())).TupleSqrt());
						hv_WordLength2Ext = hv_SigmaL2 * ((-2 * (hv_WSWeightRenderThreshold.TupleLog())).TupleSqrt());
						hv_DilationRadius = hv_WordLength2Ext - HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_WordIndex]);
					}
					else
					{
						hv_DilationRadius = 0;
					}
					GenRectangle2(&ho_WordRegion, HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_WordIndex]),
						HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_WordIndex]), HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_WordIndex]),
						HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_WordIndex]),
						HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_WordIndex]));
					//Slightly enlarge the weight region to suppress halos at the box borders.
					if (0 != (int(hv_DilationRadius >= 0.5)))
					{
						DilationCircle(ho_WordRegion, &ho_WordRegionDilated, hv_DilationRadius);
					}
					else
					{
						ho_WordRegionDilated = ho_WordRegion;
					}
					//Set the confidence as weight for the word region.
					OverpaintRegion((*ho_TargetWeightText), ho_WordRegionDilated, hv_Confidence,
						"fill");
				}
			}
		}
	}
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Store the given images in a tuple of dictionaries DLSamples. 
void gen_dl_samples_from_images(HObject ho_Images, HTuple *hv_DLSampleBatch)
{

	// Local iconic variables
	HObject  ho_Image;

	// Local control variables
	HTuple  hv_NumImages, hv_ImageIndex, hv_DLSample;

	//
	//This procedure creates DLSampleBatch, a tuple
	//containing a dictionary DLSample
	//for every image given in Images.
	//
	//Initialize output tuple.
	CountObj(ho_Images, &hv_NumImages);
	(*hv_DLSampleBatch) = HTuple(hv_NumImages, -1);
	//
	//Loop through all given images.
	{
		HTuple end_val10 = hv_NumImages - 1;
		HTuple step_val10 = 1;
		for (hv_ImageIndex = 0; hv_ImageIndex.Continue(end_val10, step_val10); hv_ImageIndex += step_val10)
		{
			SelectObj(ho_Images, &ho_Image, hv_ImageIndex + 1);
			//Create DLSample from image.
			CreateDict(&hv_DLSample);
			SetDictObject(ho_Image, hv_DLSample, "image");
			//
			//Collect the DLSamples.
			(*hv_DLSampleBatch)[hv_ImageIndex] = hv_DLSample;
		}
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Generate a word to characters mapping. 
void gen_words_chars_mapping(HTuple hv_DLSample, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_WordsIndices, hv_CharsIndices, hv_WordLengths;
	HTuple  hv_WordArea, hv_CharArea, hv_CharAreaThreshold;
	HTuple  hv_WordIndex, hv_AreaIntersection, hv_CIsInsideW;
	HTuple  hv_CIndex;

	//Procedure to generate the mapping: gen_words_chars_mapping
	if (0 != (int(((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) > 0)))
	{
		TupleFind(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 0, &hv_WordsIndices);
		TupleFind(hv_DLSample.TupleGetDictTuple("bbox_label_id"), 1, &hv_CharsIndices);
		if (0 != (HTuple(int(hv_CharsIndices != -1)).TupleAnd(int(hv_WordsIndices != -1))))
		{
			hv_WordLengths = HTuple((hv_DLSample.TupleGetDictTuple("word"))[hv_WordsIndices]).TupleStrlen();
			//Init vector.
			(*hvec_WordsCharsMapping)[((hv_DLSample.TupleGetDictTuple("bbox_label_id")).TupleLength()) - 1] = HTupleVector(HTuple());
			hv_WordArea = (4 * HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_WordsIndices]))*HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_WordsIndices]);
			hv_CharArea = (4 * HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_CharsIndices]))*HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_CharsIndices]);
			//TODO: This threshold is quite arbitrary and not stable.
			hv_CharAreaThreshold = hv_CharArea * 0.8;
			{
				HTuple end_val12 = (hv_WordsIndices.TupleLength()) - 1;
				HTuple step_val12 = 1;
				for (hv_WordIndex = 0; hv_WordIndex.Continue(end_val12, step_val12); hv_WordIndex += step_val12)
				{
					if (0 != (int(HTuple(hv_WordLengths[hv_WordIndex]) != 0)))
					{
						AreaIntersectionRectangle2(HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[HTuple(hv_WordsIndices[hv_WordIndex])]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[HTuple(hv_WordsIndices[hv_WordIndex])]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[HTuple(hv_WordsIndices[hv_WordIndex])]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[HTuple(hv_WordsIndices[hv_WordIndex])]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[HTuple(hv_WordsIndices[hv_WordIndex])]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_row"))[hv_CharsIndices]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_col"))[hv_CharsIndices]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_phi"))[hv_CharsIndices]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_length1"))[hv_CharsIndices]),
							HTuple((hv_DLSample.TupleGetDictTuple("bbox_length2"))[hv_CharsIndices]),
							&hv_AreaIntersection);
						hv_CIsInsideW = hv_AreaIntersection.TupleGreaterElem(hv_CharAreaThreshold);
						hv_CIndex = hv_CIsInsideW.TupleFind(1);
						if (0 != (int(hv_CIndex != -1)))
						{
							(*hvec_WordsCharsMapping)[HTuple(hv_WordsIndices[hv_WordIndex])] = HTupleVector(HTuple(hv_CharsIndices[hv_CIndex]));
						}
					}
					else
					{
						throw HException(((("Sample with image id " + (hv_DLSample.TupleGetDictTuple("image_id"))) + " is not valid. The word bounding box at index ") + hv_WordIndex) + " has an empty string as the ground truth. This is not allowed. Please assign a word label to every word bounding box.");
					}
				}
			}
		}
	}
	return;
}

//#include <thread>
//#include <mutex>
//std::mutex mtx;
void inferenceDL(HObject ho_image, HObject *ho_region_segment, HTuple hv_model,
	HTuple hv_preprocess, HTuple *hv_name_segment, HTuple *hv_area_segment, HTuple *hv_row_segment,
	HTuple *hv_column_segment, HTuple *hv_ret)
{

	// Local iconic variables
	HObject  ho_Object, ho_ImageZoom, ho_ClassRegions;
	HObject  ho_ClassRegion, ho_ConnectedRegions, ho_CurrentRegion;

	// Local control variables
	HTuple  hv_ClassNames, hv_ClassIDs, hv_DLSampleBatch;
	HTuple  hv_WindowHandleDict, hv_DatasetInfo, hv_GenParamDisplay;
	HTuple  hv_DLResultBatch, hv_ImageWidth, hv_ImageHeight;
	HTuple  hv_Areas, hv_ClassIndex, hv_Area, hv_Row, hv_Column;
	HTuple  hv_ConnectIndex, hv_AreaNow, hv_RowNow, hv_ColumnNow;

	GetDlModelParam(hv_model, "class_names", &hv_ClassNames);
	GetDlModelParam(hv_model, "class_ids", &hv_ClassIDs);

	//Generate the DLSampleBatch.
	gen_dl_samples_from_images(ho_image, &hv_DLSampleBatch);
	//
	//Preprocess the DLSampleBatch.
	preprocess_dl_samples(hv_DLSampleBatch, hv_preprocess);
	//Set parameters for visualization of results.
	CreateDict(&hv_WindowHandleDict);
	CreateDict(&hv_DatasetInfo);
	SetDictTuple(hv_DatasetInfo, "class_ids", hv_ClassIDs);
	SetDictTuple(hv_DatasetInfo, "class_names", hv_ClassNames);
	CreateDict(&hv_GenParamDisplay);
	SetDictTuple(hv_GenParamDisplay, "segmentation_exclude_class_ids", 0);
	SetDictTuple(hv_GenParamDisplay, "segmentation_transparency", "80");
	SetDictTuple(hv_GenParamDisplay, "font_size", 16);
	//Apply the DL model on the DLSampleBatch.
    int64 t1 = cv::getTickCount();



//    mtx.lock();

	ApplyDlModel(hv_model, hv_DLSampleBatch, (HTuple("segmentation_image").Append("segmentation_confidence")),
		&hv_DLResultBatch);
//    mtx.unlock();
    int64 t2 = cv::getTickCount();
    double m_time_use = (t2 - t1) * 1000 / getTickFrequency();
    logger->info( "ApplyDlModel  "+ std::to_string(m_time_use));


	GetDictObject(&ho_Object, hv_DLResultBatch, "segmentation_image");
	GetImageSize(ho_image, &hv_ImageWidth, &hv_ImageHeight);
	ZoomImageSize(ho_Object, &ho_ImageZoom, hv_ImageWidth, hv_ImageHeight, "constant");



	//
	//Postprocessing: Get segmented regions for each class.
	Threshold(ho_ImageZoom, &ho_ClassRegions, hv_ClassIDs, hv_ClassIDs);

	//Separate any components of the class regions
	//and display result regions as well as their area.
	//
	//Get area of class regions.
	RegionFeatures(ho_ClassRegions, "area", &hv_Areas);
	//
	//Here, we do not display the first class, since it is the class 'good'
	//and we only want to display the defect regions.

	GenEmptyObj(&(*ho_region_segment));
	(*hv_name_segment) = HTuple();
	(*hv_area_segment) = HTuple();
	(*hv_row_segment) = HTuple();
	(*hv_column_segment) = HTuple();

	{
		HTuple end_val44 = (hv_Areas.TupleLength()) - 1;
		HTuple step_val44 = 1;
		for (hv_ClassIndex = 1; hv_ClassIndex.Continue(end_val44, step_val44); hv_ClassIndex += step_val44)
		{
			if (0 != (int(HTuple(hv_Areas[hv_ClassIndex]) > 0)))
			{
				SelectObj(ho_ClassRegions, &ho_ClassRegion, hv_ClassIndex + 1);
				//Get connected components of the segmented class region.
				Connection(ho_ClassRegion, &ho_ConnectedRegions);
				AreaCenter(ho_ConnectedRegions, &hv_Area, &hv_Row, &hv_Column);
				{
					HTuple end_val50 = (hv_Area.TupleLength()) - 1;
					HTuple step_val50 = 1;
					for (hv_ConnectIndex = 0; hv_ConnectIndex.Continue(end_val50, step_val50); hv_ConnectIndex += step_val50)
					{
						SelectObj(ho_ConnectedRegions, &ho_CurrentRegion, hv_ConnectIndex + 1);
						AreaCenter(ho_CurrentRegion, &hv_AreaNow, &hv_RowNow, &hv_ColumnNow);
                        ConcatObj ((*ho_region_segment), ho_CurrentRegion, &(*ho_region_segment));
						TupleConcat((*hv_name_segment), HTuple(hv_ClassNames[hv_ClassIndex]), &(*hv_name_segment));
						TupleConcat((*hv_area_segment), hv_AreaNow, &(*hv_area_segment));
						TupleConcat((*hv_row_segment), hv_RowNow, &(*hv_row_segment));
						TupleConcat((*hv_column_segment), hv_ColumnNow, &(*hv_column_segment));
					}
				}
			}
		}
	}
	Connection((*ho_region_segment), &(*ho_region_segment));
	TupleLength((*hv_name_segment), &(*hv_ret));


	return;
}
void initInferenceDL (HTuple hv_RetrainedModelFileName, HTuple hv_PreprocessParamFileName,
    HTuple hv_index, HTuple *hv_Ret, HTuple *hv_DLModelHandle, HTuple *hv_DLPreprocessParam)
{

  // Local iconic variables

  // Local control variables
  HTuple  hv_BatchSizeInference;
  HTuple  hv_Length;

  //Read in the retrained model.
  ReadDlModel(hv_RetrainedModelFileName, &(*hv_DLModelHandle));
  //Batch Size used during inference.
//  hv_BatchSizeInference = 1;
  //
  //Set the batch size.
//  SetDlModelParam((*hv_DLModelHandle), "batch_size", hv_BatchSizeInference);
//  //
//  QueryAvailableDlDevices("runtime", "gpu", &hv_DLDeviceHandles);

  TupleLength(hv_DLDeviceHandles, &hv_Length);
  if (0 != (int(hv_Length<1)))
  {
    (*hv_Ret) = 0;
    return;
  }

//  if (0 != (int(hv_Length==2)))
//  {
//    //Initialize the model for inference.
//    if (0 != (int((hv_index%2)==0)))
//    {
//        logger->info("gpu0 load model");
//      SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[0]));
//    }
//    else
//    {
//        logger->info("gpu1 load model");
//      SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[1]));
//    }
//  }
//  if (0 != (int(hv_Length==1)))
//  {
//       logger->info("gpu0 load model");
//    //Initialize the model for inference.
    SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[0]));
//  }

  //
  //Get the parameters used for preprocessing.
  ReadDict(hv_PreprocessParamFileName, HTuple(), HTuple(), &(*hv_DLPreprocessParam));
  (*hv_Ret) = 1;
  return;
}

//void initInferenceDL (HTuple hv_RetrainedModelFileName, HTuple hv_PreprocessParamFileName,
//    HTuple hv_index, HTuple *hv_Ret, HTuple *hv_DLModelHandle, HTuple *hv_DLPreprocessParam)
//{

//  // Local iconic variables

//  // Local control variables
//  HTuple  hv_BatchSizeInference, hv_DLDeviceHandles;
//  HTuple  hv_Length;

//  //Read in the retrained model.
//  ReadDlModel(hv_RetrainedModelFileName, &(*hv_DLModelHandle));
//  //Batch Size used during inference.
//  hv_BatchSizeInference = 1;
//  //
//  //Set the batch size.
//  SetDlModelParam((*hv_DLModelHandle), "batch_size", hv_BatchSizeInference);
//  //
//  QueryAvailableDlDevices("runtime", "gpu", &hv_DLDeviceHandles);

//  TupleLength(hv_DLDeviceHandles, &hv_Length);
//  if (0 != (int(hv_Length<1)))
//  {
//    (*hv_Ret) = 0;
//    return;
//  }

//  if (0 != (int(hv_Length==2)))
//  {
//    //Initialize the model for inference.
//    if (0 != (int((hv_index%2)==0)))
//    {
//        logger->info("gpu0 load model");
//      SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[0]));
//    }
//    else
//    {
//        logger->info("gpu1 load model");
//      SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[1]));
//    }
//  }
//  if (0 != (int(hv_Length==1)))
//  {
//       logger->info("gpu0 load model");
//    //Initialize the model for inference.
//    SetDlModelParam((*hv_DLModelHandle), "device", HTuple(hv_DLDeviceHandles[0]));
//  }

//  //
//  //Get the parameters used for preprocessing.
//  ReadDict(hv_PreprocessParamFileName, HTuple(), HTuple(), &(*hv_DLPreprocessParam));
//  (*hv_Ret) = 1;
//  return;
//}
// Chapter: Deep Learning / Model
// Short Description: Preprocess 3D data for deep-learning-based training and inference. 
void preprocess_dl_model_3d_data(HTuple hv_DLSample, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_ImageZ, ho_Domain, ho_Region, ho_ImageReduced;
	HObject  ho_DomainComplement, ho_ImageX, ho_ImageY, ho_ImageXYZ;
	HObject  ho_NXImage, ho_NYImage, ho_NZImage, ho_MultiChannelImage;
	HObject  ho___Tmp_Obj_0;

	// Local control variables
	HTuple  hv_HasNormals, hv_XYZKeys, hv_HasXYZ;
	HTuple  hv_HasX, hv_HasY, hv_HasZ, hv_HasFullXYZ, hv_NumChannels;
	HTuple  hv_Type, hv_Index, hv_Key, hv_ZMinMaxExist, hv_GrayvalOutsideInit;
	HTuple  hv_NormalSizeExists, hv_NormalWidth, hv_NormalHeight;
	HTuple  hv_WidthZ, hv_HeightZ, hv_ZoomNormals, hv_Width;
	HTuple  hv_Height, hv_ScaleWidth, hv_ScaleHeight, hv_XIndex;
	HTuple  hv_YIndex;

	//
	//This procedure preprocesses 3D data of a DLSample.
	//
	//Check presence of inputs in DLSample.
	//
	GetDictParam(hv_DLSample, "key_exists", "normals", &hv_HasNormals);
	hv_XYZKeys.Clear();
	hv_XYZKeys[0] = "x";
	hv_XYZKeys[1] = "y";
	hv_XYZKeys[2] = "z";
	GetDictParam(hv_DLSample, "key_exists", hv_XYZKeys, &hv_HasXYZ);
	hv_HasX = ((const HTuple&)hv_HasXYZ)[0];
	hv_HasY = ((const HTuple&)hv_HasXYZ)[1];
	hv_HasZ = ((const HTuple&)hv_HasXYZ)[2];
	TupleMin(hv_HasXYZ, &hv_HasFullXYZ);
	if (0 != (hv_HasNormals.TupleNot()))
	{
		//XYZ are required because normals would need to be computed.
		if (0 != (hv_HasFullXYZ.TupleNot()))
		{
			throw HException(HTuple("The given input DLSample does not contain necessary images 'x','y' and 'z'. This is required if no normals are provided."));
		}
	}
	else
	{
		//At least Z is required if normals are given.
		if (0 != (hv_HasZ.TupleNot()))
		{
			throw HException(HTuple("The given input DLSample does not contain at least the depth image 'z'. This is required because normals are provided. Optionally, 'x' and 'y' images might be provided additionally."));
		}
		CountChannels(hv_DLSample.TupleGetDictObject("normals"), &hv_NumChannels);
		if (0 != (int(hv_NumChannels != 3)))
		{
			throw HException("The given input DLSample.normals has to have three channels.");
		}
		GetImageType(hv_DLSample.TupleGetDictObject("normals"), &hv_Type);
		if (0 != (int(hv_Type != HTuple("real"))))
		{
			throw HException("The given input DLSample.normals is not a real image.");
		}
	}
	{
		HTuple end_val31 = (hv_HasXYZ.TupleLength()) - 1;
		HTuple step_val31 = 1;
		for (hv_Index = 0; hv_Index.Continue(end_val31, step_val31); hv_Index += step_val31)
		{
			if (0 != (HTuple(hv_HasXYZ[hv_Index])))
			{
				hv_Key = HTuple(hv_XYZKeys[hv_Index]);
				CountChannels(hv_DLSample.TupleGetDictObject(hv_Key), &hv_NumChannels);
				if (0 != (int(hv_NumChannels != 1)))
				{
					throw HException(("The given input DLSample." + hv_Key) + " needs to have a single channel.");
				}
				GetImageType(hv_DLSample.TupleGetDictObject(hv_Key), &hv_Type);
				if (0 != (int(hv_Type != HTuple("real"))))
				{
					throw HException(("The given input DLSample." + hv_Key) + " is not a real image.");
				}
			}
		}
	}
	//
	ho_ImageZ = hv_DLSample.TupleGetDictObject("z");
	GetDomain(ho_ImageZ, &ho_Domain);
	//Reduce Z domain to user-defined min/max values for Z.
	GetDictParam(hv_DLPreprocessParam, "key_exists", (HTuple("min_z").Append("max_z")),
		&hv_ZMinMaxExist);
	if (0 != (HTuple(hv_ZMinMaxExist[0])))
	{
		Threshold(ho_ImageZ, &ho_Region, "min", hv_DLPreprocessParam.TupleGetDictTuple("min_z"));
		Difference(ho_Domain, ho_Region, &ho_Domain);
	}
	if (0 != (HTuple(hv_ZMinMaxExist[1])))
	{
		Threshold(ho_ImageZ, &ho_Region, hv_DLPreprocessParam.TupleGetDictTuple("max_z"),
			"max");
		Difference(ho_Domain, ho_Region, &ho_Domain);
	}
	//Reduce domain because it might have changed
	if (0 != (hv_ZMinMaxExist.TupleMax()))
	{
		ReduceDomain(ho_ImageZ, ho_Domain, &ho_ImageReduced);
	}
	Complement(ho_Domain, &ho_DomainComplement);
	//
	//Before we zoom any 3D images we want to set all pixels outside of the domain to
	//an invalid value.
	hv_GrayvalOutsideInit = 0;

	if (0 != hv_HasFullXYZ)
	{
		ho_ImageX = hv_DLSample.TupleGetDictObject("x");
		ho_ImageY = hv_DLSample.TupleGetDictObject("y");
		ho_ImageZ = hv_DLSample.TupleGetDictObject("z");

		FullDomain(ho_ImageX, &ho_ImageX);
		FullDomain(ho_ImageY, &ho_ImageY);
		FullDomain(ho_ImageZ, &ho_ImageZ);

		OverpaintRegion(ho_ImageX, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");
		OverpaintRegion(ho_ImageY, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");
		OverpaintRegion(ho_ImageZ, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");

		ReduceDomain(ho_ImageX, ho_Domain, &ho_ImageX);
		ReduceDomain(ho_ImageY, ho_Domain, &ho_ImageY);
		ReduceDomain(ho_ImageZ, ho_Domain, &ho_ImageZ);

		if (0 != (hv_HasNormals.TupleNot()))
		{
			//Get optional user-defined resolution of normal computation.
			GetDictParam(hv_DLPreprocessParam, "key_exists", (HTuple("normal_image_width").Append("normal_image_height")),
				&hv_NormalSizeExists);
			if (0 != (HTuple(hv_NormalSizeExists[0]).TupleNot()))
			{
				hv_NormalWidth = ((hv_DLPreprocessParam.TupleGetDictTuple("image_width"))*1.5).TupleInt();
			}
			else
			{
				hv_NormalWidth = hv_DLPreprocessParam.TupleGetDictTuple("normal_image_width");
			}
			if (0 != (HTuple(hv_NormalSizeExists[1]).TupleNot()))
			{
				hv_NormalHeight = ((hv_DLPreprocessParam.TupleGetDictTuple("image_height"))*1.5).TupleInt();
			}
			else
			{
				hv_NormalHeight = hv_DLPreprocessParam.TupleGetDictTuple("normal_image_height");
			}

			GetImageSize(ho_ImageZ, &hv_WidthZ, &hv_HeightZ);
			hv_ZoomNormals = HTuple(int(hv_NormalWidth != hv_WidthZ)).TupleOr(int(hv_NormalHeight != hv_HeightZ));

			if (0 != hv_ZoomNormals)
			{
				Compose3(ho_ImageX, ho_ImageY, ho_ImageZ, &ho_ImageXYZ);
				GetImageSize(ho_ImageXYZ, &hv_Width, &hv_Height);
				ZoomImageSize(ho_ImageXYZ, &ho_ImageXYZ, hv_NormalWidth, hv_NormalHeight,
					"nearest_neighbor");
				AccessChannel(ho_ImageXYZ, &ho_ImageX, 1);
				AccessChannel(ho_ImageXYZ, &ho_ImageY, 2);
				AccessChannel(ho_ImageXYZ, &ho_ImageZ, 3);
				hv_ScaleWidth = hv_NormalWidth / (hv_Width.TupleReal());
				hv_ScaleHeight = hv_NormalHeight / (hv_Height.TupleReal());
				ZoomRegion(ho_Domain, &ho_Domain, hv_ScaleWidth, hv_ScaleHeight);
				remove_invalid_3d_pixels(ho_ImageX, ho_ImageY, ho_ImageZ, ho_Domain, &ho_Domain,
					hv_GrayvalOutsideInit);
				Complement(ho_Domain, &ho_DomainComplement);
			}

			compute_normals_xyz(ho_ImageX, ho_ImageY, ho_ImageZ, &ho_NXImage, &ho_NYImage,
				&ho_NZImage, 1);
		}
		else
		{
			AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NXImage, 1);
			AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NYImage, 2);
			AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NZImage, 3);
		}
	}
	else
	{
		GenEmptyObj(&ho_ImageX);
		GenEmptyObj(&ho_ImageY);

		AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NXImage, 1);
		AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NYImage, 2);
		AccessChannel(hv_DLSample.TupleGetDictObject("normals"), &ho_NZImage, 3);
	}

	FullDomain(ho_ImageZ, &ho_ImageZ);

	FullDomain(ho_NXImage, &ho_NXImage);
	FullDomain(ho_NYImage, &ho_NYImage);
	FullDomain(ho_NZImage, &ho_NZImage);

	//full_domain does not change the pixels outside of the existing domain.
	//Hence we have to set a specific value
	OverpaintRegion(ho_NXImage, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");
	OverpaintRegion(ho_NYImage, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");
	OverpaintRegion(ho_NZImage, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");
	OverpaintRegion(ho_ImageZ, ho_DomainComplement, hv_GrayvalOutsideInit, "fill");

	Compose4(ho_NXImage, ho_NYImage, ho_NZImage, ho_ImageZ, &ho_MultiChannelImage);

	CountObj(ho_ImageX, &hv_HasX);
	if (0 != hv_HasX)
	{
		FullDomain(ho_ImageX, &ho_ImageX);
		AppendChannel(ho_MultiChannelImage, ho_ImageX, &ho_MultiChannelImage);
		CountChannels(ho_MultiChannelImage, &hv_XIndex);
	}
	CountObj(ho_ImageY, &hv_HasY);
	if (0 != hv_HasY)
	{
		FullDomain(ho_ImageY, &ho_ImageY);
		AppendChannel(ho_MultiChannelImage, ho_ImageY, &ho_MultiChannelImage);
		CountChannels(ho_MultiChannelImage, &hv_YIndex);
	}
	GetImageSize(ho_MultiChannelImage, &hv_Width, &hv_Height);
	ZoomImageSize(ho_MultiChannelImage, &ho_MultiChannelImage, hv_DLPreprocessParam.TupleGetDictTuple("image_width"),
		hv_DLPreprocessParam.TupleGetDictTuple("image_height"), "nearest_neighbor");

	Decompose4(ho_MultiChannelImage, &ho_NXImage, &ho_NYImage, &ho_NZImage, &ho_ImageZ
	);
	if (0 != hv_HasX)
	{
		AccessChannel(ho_MultiChannelImage, &ho_ImageX, hv_XIndex);
	}
	if (0 != hv_HasY)
	{
		AccessChannel(ho_MultiChannelImage, &ho_ImageY, hv_YIndex);
	}


	//Zoom the domain
	hv_ScaleWidth = (hv_DLPreprocessParam.TupleGetDictTuple("image_width")) / (hv_Width.TupleReal());
	hv_ScaleHeight = (hv_DLPreprocessParam.TupleGetDictTuple("image_height")) / (hv_Height.TupleReal());
	ZoomRegion(ho_Domain, &ho_Domain, hv_ScaleWidth, hv_ScaleHeight);
	remove_invalid_3d_pixels(ho_NXImage, ho_NYImage, ho_NZImage, ho_Domain, &ho_Domain,
		hv_GrayvalOutsideInit);

	ReduceDomain(ho_ImageX, ho_Domain, &ho_ImageX);
	ReduceDomain(ho_ImageY, ho_Domain, &ho_ImageY);
	ReduceDomain(ho_ImageZ, ho_Domain, &ho_ImageZ);
	Compose3(ho_NXImage, ho_NYImage, ho_NZImage, &ho___Tmp_Obj_0);
	SetDictObject(ho___Tmp_Obj_0, hv_DLSample, "normals");
	ReduceDomain(hv_DLSample.TupleGetDictObject("normals"), ho_Domain, &ho___Tmp_Obj_0
	);
	SetDictObject(ho___Tmp_Obj_0, hv_DLSample, "normals");

	//Overwrite preprocessed 3D data
	if (0 != hv_HasX)
	{
		SetDictObject(ho_ImageX, hv_DLSample, "x");
	}
	if (0 != hv_HasY)
	{
		SetDictObject(ho_ImageY, hv_DLSample, "y");
	}
	if (0 != hv_HasZ)
	{
		SetDictObject(ho_ImageZ, hv_DLSample, "z");
	}

	return;
}

// Chapter: Deep Learning / Model
// Short Description: Preprocess anomaly images for evaluation and visualization of deep-learning-based anomaly detection or Global Context Anomaly Detection. 
void preprocess_dl_model_anomaly(HObject ho_AnomalyImages, HObject *ho_AnomalyImagesPreprocessed,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_ImageRangeMin;
	HTuple  hv_ImageRangeMax, hv_DomainHandling, hv_ModelType;
	HTuple  hv_ImageNumChannels, hv_Min, hv_Max, hv_Range, hv_ImageWidthInput;
	HTuple  hv_ImageHeightInput, hv_EqualWidth, hv_EqualHeight;
	HTuple  hv_Type, hv_NumMatches, hv_NumImages, hv_EqualByte;
	HTuple  hv_NumChannelsAllImages, hv_ImageNumChannelsTuple;
	HTuple  hv_IndicesWrongChannels;

	//
	//This procedure preprocesses the anomaly images given by AnomalyImages
	//according to the parameters in the dictionary DLPreprocessParam.
	//Note that depending on the images,
	//additional preprocessing steps might be beneficial.
	//
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
	GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_ModelType);
	//
	hv_ImageNumChannels = 1;
	//
	//Preprocess the images.
	//
	if (0 != (int(hv_DomainHandling == HTuple("full_domain"))))
	{
		FullDomain(ho_AnomalyImages, &ho_AnomalyImages);
	}
	else if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		CropDomain(ho_AnomalyImages, &ho_AnomalyImages);
	}
	else if (0 != (HTuple(int(hv_DomainHandling == HTuple("keep_domain"))).TupleAnd(int(hv_ModelType == HTuple("anomaly_detection")))))
	{
		//The option 'keep_domain' is only supported for models of 'type' = 'anomaly_detection'
	}
	else
	{
		throw HException("Unsupported parameter value for 'domain_handling'");
	}
	//
	MinMaxGray(ho_AnomalyImages, ho_AnomalyImages, 0, &hv_Min, &hv_Max, &hv_Range);
	if (0 != (int(hv_Min < 0.0)))
	{
		throw HException("Values of anomaly image must not be smaller than 0.0.");
	}
	//
	//Zoom images only if they have a different size than the specified size.
	GetImageSize(ho_AnomalyImages, &hv_ImageWidthInput, &hv_ImageHeightInput);
	hv_EqualWidth = hv_ImageWidth.TupleEqualElem(hv_ImageWidthInput);
	hv_EqualHeight = hv_ImageHeight.TupleEqualElem(hv_ImageHeightInput);
	if (0 != (HTuple(int((hv_EqualWidth.TupleMin()) == 0)).TupleOr(int((hv_EqualHeight.TupleMin()) == 0))))
	{
		ZoomImageSize(ho_AnomalyImages, &ho_AnomalyImages, hv_ImageWidth, hv_ImageHeight,
			"nearest_neighbor");
	}
	//
	//Check the type of the input images.
	GetImageType(ho_AnomalyImages, &hv_Type);
	TupleRegexpTest(hv_Type, "byte|real", &hv_NumMatches);
	CountObj(ho_AnomalyImages, &hv_NumImages);
	if (0 != (int(hv_NumMatches != hv_NumImages)))
	{
		throw HException("Please provide only images of type 'byte' or 'real'.");
	}
	//
	//If the type is 'byte', convert it to 'real' and scale it.
	//The gray value scaling does not work on 'byte' images.
	//For 'real' images it is assumed that the range is already correct.
	hv_EqualByte = hv_Type.TupleEqualElem("byte");
	if (0 != (int((hv_EqualByte.TupleMax()) == 1)))
	{
		if (0 != (int((hv_EqualByte.TupleMin()) == 0)))
		{
			throw HException("Passing mixed type images is not supported.");
		}
		//Convert the image type from 'byte' to 'real',
		//because the model expects 'real' images.
		ConvertImageType(ho_AnomalyImages, &ho_AnomalyImages, "real");
	}
	//
	//Check the number of channels.
	CountObj(ho_AnomalyImages, &hv_NumImages);
	//Check all images for number of channels.
	CountChannels(ho_AnomalyImages, &hv_NumChannelsAllImages);
	TupleGenConst(hv_NumImages, hv_ImageNumChannels, &hv_ImageNumChannelsTuple);
	TupleFind(hv_NumChannelsAllImages.TupleNotEqualElem(hv_ImageNumChannelsTuple),
		1, &hv_IndicesWrongChannels);
	//
	//Check for anomaly image channels.
	//Only single channel images are accepted.
	if (0 != (int(hv_IndicesWrongChannels != -1)))
	{
		throw HException("Number of channels in anomaly image is not supported. Please check for anomaly images with a number of channels different from 1.");
	}
	//
	//Write preprocessed image to output variable.
	(*ho_AnomalyImagesPreprocessed) = ho_AnomalyImages;
	//
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Preprocess the provided DLSample image for augmentation purposes. 
void preprocess_dl_model_augmentation_data(HTuple hv_DLSample, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_InputImage, ho_ImageHighRes;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_ImageNumChannels;
	HTuple  hv_ModelType, hv_AugmentationKeyExists, hv_ImageKeyExists;
	HTuple  hv_NumImages, hv_NumChannels, hv_ImageType, hv_InputImageWidth;
	HTuple  hv_InputImageHeight, hv_InputImageWidthHeightRatio;
	HTuple  hv_ZoomHeight, hv_ZoomWidth, hv_HasPadding, hv_ZoomFactorWidth;
	HTuple  hv_ZoomFactorHeight, hv_UseZoomImage, hv_DLSampleHighRes;
	HTuple  hv_DLPreprocessParamHighRes, hv___Tmp_Ctrl_Dict_Init_0;
	HTuple  hv___Tmp_Ctrl_Dict_Init_1, hv___Tmp_Ctrl_Dict_Init_2;

	//This procedure preprocesses the provided DLSample image for augmentation purposes.
	//
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the required preprocessing parameters.
	hv_ImageWidth = hv_DLPreprocessParam.TupleGetDictTuple("image_width");
	hv_ImageHeight = hv_DLPreprocessParam.TupleGetDictTuple("image_height");
	hv_ImageNumChannels = hv_DLPreprocessParam.TupleGetDictTuple("image_num_channels");
	hv_ModelType = hv_DLPreprocessParam.TupleGetDictTuple("model_type");
	//
	//Determine whether the preprocessing is required or not.
	GetDictParam(hv_DLPreprocessParam, "key_exists", "augmentation", &hv_AugmentationKeyExists);
	if (0 != (hv_AugmentationKeyExists.TupleNot()))
	{
		return;
	}
	CreateDict(&hv___Tmp_Ctrl_Dict_Init_0);
	SetDictTuple(hv___Tmp_Ctrl_Dict_Init_0, "comp", "true");
	if (0 != (((hv_DLPreprocessParam.TupleConcat(hv___Tmp_Ctrl_Dict_Init_0)).TupleTestEqualDictItem("augmentation", "comp")).TupleNot()))
	{
		return;
	}
	hv___Tmp_Ctrl_Dict_Init_0 = HTuple::TupleConstant("HNULL");
	if (0 != (HTuple(int(hv_ModelType != HTuple("ocr_detection"))).TupleAnd(int(hv_ModelType != HTuple("ocr_recognition")))))
	{
		return;
	}
	//
	//Get the input image and its properties.
	GetDictParam(hv_DLSample, "key_exists", "image", &hv_ImageKeyExists);
	if (0 != (hv_ImageKeyExists.TupleNot()))
	{
		throw HException("The sample to process needs to include an image.");
	}
	ho_InputImage = hv_DLSample.TupleGetDictObject("image");
	CountObj(ho_InputImage, &hv_NumImages);
	if (0 != (int(hv_NumImages != 1)))
	{
		throw HException("The sample to process needs to include exactly 1 image.");
	}
	CountChannels(ho_InputImage, &hv_NumChannels);
	GetImageType(ho_InputImage, &hv_ImageType);
	GetImageSize(ho_InputImage, &hv_InputImageWidth, &hv_InputImageHeight);
	//
	//Execute model specific preprocessing.
	if (0 != (int(hv_ModelType == HTuple("ocr_recognition"))))
	{
		if (0 != (int(hv_ImageNumChannels != 1)))
		{
			throw HException("The only 'image_num_channels' value supported for ocr_recognition models is 1.");
		}
		if (0 != (int((hv_ImageType.TupleRegexpTest("byte|real")) != 1)))
		{
			throw HException("Please provide only images of type 'byte' or 'real' for ocr_recognition models.");
		}
		if (0 != (int((HTuple((hv_NumChannels.TupleEqualElem(1)).TupleOr(hv_NumChannels.TupleEqualElem(3))).TupleSum()) != 1)))
		{
			throw HException("Please provide only 1- or 3-channels images for ocr_recognition models.");
		}
		//
		FullDomain(ho_InputImage, &ho_ImageHighRes);
		if (0 != (int(hv_NumChannels == 3)))
		{
			Rgb1ToGray(ho_ImageHighRes, &ho_ImageHighRes);
		}
		hv_InputImageWidthHeightRatio = hv_InputImageWidth / (hv_InputImageHeight.TupleReal());
		hv_ZoomHeight = hv_InputImageHeight.TupleMin2(2 * hv_ImageHeight);
		hv_ZoomWidth = (hv_ZoomHeight*hv_InputImageWidthHeightRatio).TupleInt();
		hv_HasPadding = int(((hv_ImageHeight*hv_InputImageWidthHeightRatio).TupleInt()) < hv_ImageWidth);
		if (0 != (HTuple(int(hv_ZoomHeight > hv_ImageHeight)).TupleOr(hv_HasPadding)))
		{
			ZoomImageSize(ho_ImageHighRes, &ho_ImageHighRes, hv_ZoomWidth, hv_ZoomHeight,
				"constant");
			CreateDict(&hv___Tmp_Ctrl_Dict_Init_1);
			SetDictTuple(hv_DLSample, "augmentation_data", hv___Tmp_Ctrl_Dict_Init_1);
			hv___Tmp_Ctrl_Dict_Init_1 = HTuple::TupleConstant("HNULL");
			SetDictObject(ho_ImageHighRes, hv_DLSample.TupleGetDictTuple("augmentation_data"),
				"image_high_res");
			SetDictTuple(hv_DLSample.TupleGetDictTuple("augmentation_data"), "preprocess_params",
				hv_DLPreprocessParam);
		}
	}
	else if (0 != (int(hv_ModelType == HTuple("ocr_detection"))))
	{
		if (0 != (int(hv_ImageNumChannels != 3)))
		{
			throw HException("The only 'image_num_channels' value supported for ocr_detection models is 3.");
		}
		if (0 != (int((hv_ImageType.TupleRegexpTest("byte|real")) != 1)))
		{
			throw HException("Please provide only images of type 'byte' or 'real' for ocr_detection models.");
		}
		if (0 != (int((HTuple((hv_NumChannels.TupleEqualElem(1)).TupleOr(hv_NumChannels.TupleEqualElem(3))).TupleSum()) != 1)))
		{
			throw HException("Please provide only 1- or 3-channels images for ocr_detection models.");
		}
		//
		//Calculate aspect-ratio preserving zoom dimensions for high resolution.
		calculate_dl_image_zoom_factors(hv_InputImageWidth, hv_InputImageHeight, 2 * hv_ImageWidth,
			2 * hv_ImageHeight, hv_DLPreprocessParam, &hv_ZoomFactorWidth, &hv_ZoomFactorHeight);
		hv_ZoomHeight = (hv_ZoomFactorHeight*hv_InputImageHeight).TupleRound();
		hv_ZoomWidth = (hv_ZoomFactorWidth*hv_InputImageWidth).TupleRound();
		//
		//Use the better size for high resolution: 2x resolution size of preprocess image or input image size.
		hv_UseZoomImage = HTuple(int(hv_ZoomWidth < hv_InputImageWidth)).TupleOr(int(hv_ZoomHeight < hv_InputImageHeight));
		CopyDict(hv_DLSample, HTuple(), HTuple(), &hv_DLSampleHighRes);
		CopyDict(hv_DLPreprocessParam, HTuple(), HTuple(), &hv_DLPreprocessParamHighRes);
		//
		FullDomain(ho_InputImage, &ho_ImageHighRes);
		if (0 != hv_UseZoomImage)
		{
			SetDictTuple(hv_DLPreprocessParamHighRes, "image_width", hv_ZoomWidth);
			SetDictTuple(hv_DLPreprocessParamHighRes, "image_height", hv_ZoomHeight);
			preprocess_dl_model_bbox_rect2(ho_ImageHighRes, hv_DLSampleHighRes, hv_DLPreprocessParamHighRes);
			gen_dl_ocr_detection_targets(hv_DLSampleHighRes, hv_DLPreprocessParamHighRes);
			ZoomImageSize(ho_ImageHighRes, &ho_ImageHighRes, hv_ZoomWidth, hv_ZoomHeight,
				"constant");
		}
		else
		{
			SetDictTuple(hv_DLPreprocessParamHighRes, "image_width", hv_InputImageWidth);
			SetDictTuple(hv_DLPreprocessParamHighRes, "image_height", hv_InputImageHeight);
			gen_dl_ocr_detection_targets(hv_DLSampleHighRes, hv_DLPreprocessParamHighRes);
		}
		SetDictObject(ho_ImageHighRes, hv_DLSampleHighRes, "image");
		//
		CreateDict(&hv___Tmp_Ctrl_Dict_Init_2);
		SetDictTuple(hv_DLSample, "augmentation_data", hv___Tmp_Ctrl_Dict_Init_2);
		hv___Tmp_Ctrl_Dict_Init_2 = HTuple::TupleConstant("HNULL");
		SetDictTuple(hv_DLSample.TupleGetDictTuple("augmentation_data"), "sample_high_res",
			hv_DLSampleHighRes);
		SetDictTuple(hv_DLSample.TupleGetDictTuple("augmentation_data"), "preprocess_params",
			hv_DLPreprocessParam);
	}
	//
	return;
}

// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle1' for a given sample. 
void preprocess_dl_model_bbox_rect1(HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_DomainRaw;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_DomainHandling;
	HTuple  hv_BBoxCol1, hv_BBoxCol2, hv_BBoxRow1, hv_BBoxRow2;
	HTuple  hv_BBoxLabel, hv_Exception, hv_ImageId, hv_ExceptionMessage;
	HTuple  hv_BoxesInvalid, hv_DomainRow1, hv_DomainColumn1;
	HTuple  hv_DomainRow2, hv_DomainColumn2, hv_WidthRaw, hv_HeightRaw;
	HTuple  hv_Row1, hv_Col1, hv_Row2, hv_Col2, hv_MaskDelete;
	HTuple  hv_MaskNewBbox, hv_BBoxCol1New, hv_BBoxCol2New;
	HTuple  hv_BBoxRow1New, hv_BBoxRow2New, hv_BBoxLabelNew;
	HTuple  hv_FactorResampleWidth, hv_FactorResampleHeight;

	//
	//This procedure preprocesses the bounding boxes of type 'rectangle1' for a given sample.
	//
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	//
	//Get bounding box coordinates and labels.
	try
	{
		GetDictTuple(hv_DLSample, "bbox_col1", &hv_BBoxCol1);
		GetDictTuple(hv_DLSample, "bbox_col2", &hv_BBoxCol2);
		GetDictTuple(hv_DLSample, "bbox_row1", &hv_BBoxRow1);
		GetDictTuple(hv_DLSample, "bbox_row2", &hv_BBoxRow2);
		GetDictTuple(hv_DLSample, "bbox_label_id", &hv_BBoxLabel);
	}
	// catch (Exception) 
	catch (HException &HDevExpDefaultException)
	{
		HDevExpDefaultException.ToHTuple(&hv_Exception);
		GetDictTuple(hv_DLSample, "image_id", &hv_ImageId);
		if (0 != (int(HTuple(hv_Exception[0]) == 1302)))
		{
			hv_ExceptionMessage = "A bounding box coordinate key is missing.";
		}
		else
		{
			hv_ExceptionMessage = ((const HTuple&)hv_Exception)[2];
		}
		throw HException((("An error has occurred during preprocessing image_id " + hv_ImageId) + " when getting bounding box coordinates : ") + hv_ExceptionMessage);
	}
	//
	//Check that there are no invalid boxes.
	if (0 != (int((hv_BBoxRow1.TupleLength()) > 0)))
	{
		hv_BoxesInvalid = (hv_BBoxRow1.TupleGreaterEqualElem(hv_BBoxRow2)).TupleOr(hv_BBoxCol1.TupleGreaterEqualElem(hv_BBoxCol2));
		if (0 != (int((hv_BoxesInvalid.TupleSum()) > 0)))
		{
			GetDictTuple(hv_DLSample, "image_id", &hv_ImageId);
			throw HException(("An error has occurred during preprocessing image_id " + hv_ImageId) + HTuple(": Sample contains at least one box with zero-area, i.e. bbox_col1 >= bbox_col2 or bbox_row1 >= bbox_row2."));
		}
	}
	else
	{
		//There are no bounding boxes, hence nothing to do.
		return;
	}
	//
	//If the domain is cropped, crop bounding boxes.
	if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		//
		//Get domain.
		GetDomain(ho_ImageRaw, &ho_DomainRaw);
		//
		//Set the size of the raw image to the domain extensions.
		SmallestRectangle1(ho_DomainRaw, &hv_DomainRow1, &hv_DomainColumn1, &hv_DomainRow2,
			&hv_DomainColumn2);
		//The domain is always given as a pixel-precise region.
		hv_WidthRaw = (hv_DomainColumn2 - hv_DomainColumn1) + 1.0;
		hv_HeightRaw = (hv_DomainRow2 - hv_DomainRow1) + 1.0;
		//
		//Crop the bounding boxes.
		hv_Row1 = hv_BBoxRow1.TupleMax2(hv_DomainRow1 - .5);
		hv_Col1 = hv_BBoxCol1.TupleMax2(hv_DomainColumn1 - .5);
		hv_Row2 = hv_BBoxRow2.TupleMin2(hv_DomainRow2 + .5);
		hv_Col2 = hv_BBoxCol2.TupleMin2(hv_DomainColumn2 + .5);
		hv_MaskDelete = (hv_Row1.TupleGreaterEqualElem(hv_Row2)).TupleOr(hv_Col1.TupleGreaterEqualElem(hv_Col2));
		hv_MaskNewBbox = 1 - hv_MaskDelete;
		//Store the preprocessed bounding box entries.
		hv_BBoxCol1New = (hv_Col1.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainColumn1;
		hv_BBoxCol2New = (hv_Col2.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainColumn1;
		hv_BBoxRow1New = (hv_Row1.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainRow1;
		hv_BBoxRow2New = (hv_Row2.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainRow1;
		hv_BBoxLabelNew = hv_BBoxLabel.TupleSelectMask(hv_MaskNewBbox);
		//
		//If we remove/select bounding boxes we also need to filter the corresponding
		//instance segmentation masks if they exist.
		filter_dl_sample_instance_segmentation_masks(hv_DLSample, hv_MaskNewBbox);
	}
	else if (0 != (int(hv_DomainHandling == HTuple("full_domain"))))
	{
		//If the entire image is used, set the variables accordingly.
		//Get the original size.
		GetImageSize(ho_ImageRaw, &hv_WidthRaw, &hv_HeightRaw);
		//Set new coordinates to input coordinates.
		hv_BBoxCol1New = hv_BBoxCol1;
		hv_BBoxCol2New = hv_BBoxCol2;
		hv_BBoxRow1New = hv_BBoxRow1;
		hv_BBoxRow2New = hv_BBoxRow2;
		hv_BBoxLabelNew = hv_BBoxLabel;
	}
	else
	{
		throw HException("Unsupported parameter value for 'domain_handling'");
	}
	//
	//Rescale the bounding boxes.
	//
	//Get required images width and height.
	//
	//Only rescale bounding boxes if the required image dimensions are not the raw dimensions.
	if (0 != (HTuple(int(hv_ImageHeight != hv_HeightRaw)).TupleOr(int(hv_ImageWidth != hv_WidthRaw))))
	{
		//Calculate rescaling factor.
		hv_FactorResampleWidth = (hv_ImageWidth.TupleReal()) / hv_WidthRaw;
		hv_FactorResampleHeight = (hv_ImageHeight.TupleReal()) / hv_HeightRaw;
		//Rescale the bounding box coordinates.
		//As we use XLD-coordinates we temporarily move the boxes by (.5,.5) for rescaling.
		//Doing so, the center of the XLD-coordinate system (-0.5,-0.5) is used
		//for scaling, hence the scaling is performed w.r.t. the pixel coordinate system.
		hv_BBoxCol1New = ((hv_BBoxCol1New + .5)*hv_FactorResampleWidth) - .5;
		hv_BBoxCol2New = ((hv_BBoxCol2New + .5)*hv_FactorResampleWidth) - .5;
		hv_BBoxRow1New = ((hv_BBoxRow1New + .5)*hv_FactorResampleHeight) - .5;
		hv_BBoxRow2New = ((hv_BBoxRow2New + .5)*hv_FactorResampleHeight) - .5;
		//
	}
	//
	//Make a final check and remove bounding boxes that have zero area.
	if (0 != (int((hv_BBoxRow1New.TupleLength()) > 0)))
	{
		hv_MaskDelete = (hv_BBoxRow1New.TupleGreaterEqualElem(hv_BBoxRow2New)).TupleOr(hv_BBoxCol1New.TupleGreaterEqualElem(hv_BBoxCol2New));
		hv_BBoxCol1New = hv_BBoxCol1New.TupleSelectMask(1 - hv_MaskDelete);
		hv_BBoxCol2New = hv_BBoxCol2New.TupleSelectMask(1 - hv_MaskDelete);
		hv_BBoxRow1New = hv_BBoxRow1New.TupleSelectMask(1 - hv_MaskDelete);
		hv_BBoxRow2New = hv_BBoxRow2New.TupleSelectMask(1 - hv_MaskDelete);
		hv_BBoxLabelNew = hv_BBoxLabelNew.TupleSelectMask(1 - hv_MaskDelete);
		//
		//If we remove/select bounding boxes we also need to filter the corresponding
		//instance segmentation masks if they exist.
		filter_dl_sample_instance_segmentation_masks(hv_DLSample, 1 - hv_MaskDelete);
	}
	//
	//Set new bounding box coordinates in the dictionary.
	SetDictTuple(hv_DLSample, "bbox_col1", hv_BBoxCol1New);
	SetDictTuple(hv_DLSample, "bbox_col2", hv_BBoxCol2New);
	SetDictTuple(hv_DLSample, "bbox_row1", hv_BBoxRow1New);
	SetDictTuple(hv_DLSample, "bbox_row2", hv_BBoxRow2New);
	SetDictTuple(hv_DLSample, "bbox_label_id", hv_BBoxLabelNew);
	//
	return;
}

// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle2' for a given sample. 
void preprocess_dl_model_bbox_rect2(HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_DomainRaw, ho_Rectangle2XLD, ho_Rectangle2XLDSheared;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_DomainHandling;
	HTuple  hv_IgnoreDirection, hv_ClassIDsNoOrientation, hv_KeyExists;
	HTuple  hv_BBoxRow, hv_BBoxCol, hv_BBoxLength1, hv_BBoxLength2;
	HTuple  hv_BBoxPhi, hv_BBoxLabel, hv_Exception, hv_ImageId;
	HTuple  hv_ExceptionMessage, hv_BoxesInvalid, hv_DomainRow1;
	HTuple  hv_DomainColumn1, hv_DomainRow2, hv_DomainColumn2;
	HTuple  hv_WidthRaw, hv_HeightRaw, hv_MaskDelete, hv_MaskNewBbox;
	HTuple  hv_BBoxRowNew, hv_BBoxColNew, hv_BBoxLength1New;
	HTuple  hv_BBoxLength2New, hv_BBoxPhiNew, hv_BBoxLabelNew;
	HTuple  hv_ClassIDsNoOrientationIndices, hv_Index, hv_ClassIDsNoOrientationIndicesTmp;
	HTuple  hv_DirectionLength1Row, hv_DirectionLength1Col;
	HTuple  hv_DirectionLength2Row, hv_DirectionLength2Col;
	HTuple  hv_Corner1Row, hv_Corner1Col, hv_Corner2Row, hv_Corner2Col;
	HTuple  hv_FactorResampleWidth, hv_FactorResampleHeight;
	HTuple  hv_BBoxRow1, hv_BBoxCol1, hv_BBoxRow2, hv_BBoxCol2;
	HTuple  hv_BBoxRow3, hv_BBoxCol3, hv_BBoxRow4, hv_BBoxCol4;
	HTuple  hv_BBoxCol1New, hv_BBoxCol2New, hv_BBoxCol3New;
	HTuple  hv_BBoxCol4New, hv_BBoxRow1New, hv_BBoxRow2New;
	HTuple  hv_BBoxRow3New, hv_BBoxRow4New, hv_HomMat2DIdentity;
	HTuple  hv_HomMat2DScale, hv__, hv_BBoxPhiTmp, hv_PhiDelta;
	HTuple  hv_PhiDeltaNegativeIndices, hv_IndicesRot90, hv_IndicesRot180;
	HTuple  hv_IndicesRot270, hv_SwapIndices, hv_Tmp, hv_BBoxPhiNewIndices;
	HTuple  hv_PhiThreshold, hv_PhiToCorrect, hv_NumCorrections;

	//This procedure preprocesses the bounding boxes of type 'rectangle2' for a given sample.
	//
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get preprocess parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	//The keys 'ignore_direction' and 'class_ids_no_orientation' are optional.
	hv_IgnoreDirection = 0;
	hv_ClassIDsNoOrientation = HTuple();
	GetDictParam(hv_DLPreprocessParam, "key_exists", (HTuple("ignore_direction").Append("class_ids_no_orientation")),
		&hv_KeyExists);
	if (0 != (HTuple(hv_KeyExists[0])))
	{
		GetDictTuple(hv_DLPreprocessParam, "ignore_direction", &hv_IgnoreDirection);
		if (0 != (int(hv_IgnoreDirection == HTuple("true"))))
		{
			hv_IgnoreDirection = 1;
		}
		else if (0 != (int(hv_IgnoreDirection == HTuple("false"))))
		{
			hv_IgnoreDirection = 0;
		}
	}
	if (0 != (HTuple(hv_KeyExists[1])))
	{
		GetDictTuple(hv_DLPreprocessParam, "class_ids_no_orientation", &hv_ClassIDsNoOrientation);
	}
	//
	//Get bounding box coordinates and labels.
	try
	{
		GetDictTuple(hv_DLSample, "bbox_row", &hv_BBoxRow);
		GetDictTuple(hv_DLSample, "bbox_col", &hv_BBoxCol);
		GetDictTuple(hv_DLSample, "bbox_length1", &hv_BBoxLength1);
		GetDictTuple(hv_DLSample, "bbox_length2", &hv_BBoxLength2);
		GetDictTuple(hv_DLSample, "bbox_phi", &hv_BBoxPhi);
		GetDictTuple(hv_DLSample, "bbox_label_id", &hv_BBoxLabel);
	}
	// catch (Exception) 
	catch (HException &HDevExpDefaultException)
	{
		HDevExpDefaultException.ToHTuple(&hv_Exception);
		GetDictTuple(hv_DLSample, "image_id", &hv_ImageId);
		if (0 != (int(HTuple(hv_Exception[0]) == 1302)))
		{
			hv_ExceptionMessage = "A bounding box coordinate key is missing.";
		}
		else
		{
			hv_ExceptionMessage = ((const HTuple&)hv_Exception)[2];
		}
		throw HException((("An error has occurred during preprocessing image_id " + hv_ImageId) + " when getting bounding box coordinates : ") + hv_ExceptionMessage);
	}
	//
	//Check that there are no invalid boxes.
	if (0 != (int((hv_BBoxRow.TupleLength()) > 0)))
	{
		hv_BoxesInvalid = ((hv_BBoxLength1.TupleEqualElem(0)).TupleSum()) + ((hv_BBoxLength2.TupleEqualElem(0)).TupleSum());
		if (0 != (int(hv_BoxesInvalid > 0)))
		{
			GetDictTuple(hv_DLSample, "image_id", &hv_ImageId);
			throw HException(("An error has occurred during preprocessing image_id " + hv_ImageId) + HTuple(": Sample contains at least one bounding box with zero-area, i.e. bbox_length1 == 0 or bbox_length2 == 0!"));
		}
	}
	else
	{
		//There are no bounding boxes, hence nothing to do.
		return;
	}
	//
	//If the domain is cropped, crop bounding boxes.
	if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		//
		//Get domain.
		GetDomain(ho_ImageRaw, &ho_DomainRaw);
		//
		//Set the size of the raw image to the domain extensions.
		SmallestRectangle1(ho_DomainRaw, &hv_DomainRow1, &hv_DomainColumn1, &hv_DomainRow2,
			&hv_DomainColumn2);
		hv_WidthRaw = (hv_DomainColumn2 - hv_DomainColumn1) + 1;
		hv_HeightRaw = (hv_DomainRow2 - hv_DomainRow1) + 1;
		//
		//Crop the bounding boxes.
		//Remove the boxes with center outside of the domain.
		hv_MaskDelete = HTuple(HTuple((hv_BBoxRow.TupleLessElem(hv_DomainRow1)).TupleOr(hv_BBoxCol.TupleLessElem(hv_DomainColumn1))).TupleOr(hv_BBoxRow.TupleGreaterElem(hv_DomainRow2))).TupleOr(hv_BBoxCol.TupleGreaterElem(hv_DomainColumn2));
		hv_MaskNewBbox = 1 - hv_MaskDelete;
		//Store the preprocessed bounding box entries.
		hv_BBoxRowNew = (hv_BBoxRow.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainRow1;
		hv_BBoxColNew = (hv_BBoxCol.TupleSelectMask(hv_MaskNewBbox)) - hv_DomainColumn1;
		hv_BBoxLength1New = hv_BBoxLength1.TupleSelectMask(hv_MaskNewBbox);
		hv_BBoxLength2New = hv_BBoxLength2.TupleSelectMask(hv_MaskNewBbox);
		hv_BBoxPhiNew = hv_BBoxPhi.TupleSelectMask(hv_MaskNewBbox);
		hv_BBoxLabelNew = hv_BBoxLabel.TupleSelectMask(hv_MaskNewBbox);
		//
		//If we remove/select bounding boxes we also need to filter the corresponding
		//instance segmentation masks if they exist.
		filter_dl_sample_instance_segmentation_masks(hv_DLSample, hv_MaskNewBbox);
		//
	}
	else if (0 != (int(hv_DomainHandling == HTuple("full_domain"))))
	{
		//If the entire image is used, set the variables accordingly.
		//Get the original size.
		GetImageSize(ho_ImageRaw, &hv_WidthRaw, &hv_HeightRaw);
		//Set new coordinates to input coordinates.
		hv_BBoxRowNew = hv_BBoxRow;
		hv_BBoxColNew = hv_BBoxCol;
		hv_BBoxLength1New = hv_BBoxLength1;
		hv_BBoxLength2New = hv_BBoxLength2;
		hv_BBoxPhiNew = hv_BBoxPhi;
		hv_BBoxLabelNew = hv_BBoxLabel;
	}
	else
	{
		throw HException("Unsupported parameter value for 'domain_handling'");
	}
	//
	//Generate smallest enclosing axis-aligned bounding box for classes in ClassIDsNoOrientation.
	hv_ClassIDsNoOrientationIndices = HTuple();
	{
		HTuple end_val98 = (hv_ClassIDsNoOrientation.TupleLength()) - 1;
		HTuple step_val98 = 1;
		for (hv_Index = 0; hv_Index.Continue(end_val98, step_val98); hv_Index += step_val98)
		{
			hv_ClassIDsNoOrientationIndicesTmp = (hv_BBoxLabelNew.TupleEqualElem(HTuple(hv_ClassIDsNoOrientation[hv_Index]))).TupleFind(1);
			if (0 != (int(hv_ClassIDsNoOrientationIndicesTmp != -1)))
			{
				hv_ClassIDsNoOrientationIndices = hv_ClassIDsNoOrientationIndices.TupleConcat(hv_ClassIDsNoOrientationIndicesTmp);
			}
		}
	}
	if (0 != (int((hv_ClassIDsNoOrientationIndices.TupleLength()) > 0)))
	{
		//Calculate length1 and length2 using position of corners.
		hv_DirectionLength1Row = -(HTuple(hv_BBoxPhiNew[hv_ClassIDsNoOrientationIndices]).TupleSin());
		hv_DirectionLength1Col = HTuple(hv_BBoxPhiNew[hv_ClassIDsNoOrientationIndices]).TupleCos();
		hv_DirectionLength2Row = -hv_DirectionLength1Col;
		hv_DirectionLength2Col = hv_DirectionLength1Row;
		hv_Corner1Row = (HTuple(hv_BBoxLength1New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength1Row) + (HTuple(hv_BBoxLength2New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength2Row);
		hv_Corner1Col = (HTuple(hv_BBoxLength1New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength1Col) + (HTuple(hv_BBoxLength2New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength2Col);
		hv_Corner2Row = (HTuple(hv_BBoxLength1New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength1Row) - (HTuple(hv_BBoxLength2New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength2Row);
		hv_Corner2Col = (HTuple(hv_BBoxLength1New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength1Col) - (HTuple(hv_BBoxLength2New[hv_ClassIDsNoOrientationIndices])*hv_DirectionLength2Col);
		//
		hv_BBoxPhiNew[hv_ClassIDsNoOrientationIndices] = 0.0;
		hv_BBoxLength1New[hv_ClassIDsNoOrientationIndices] = (hv_Corner1Col.TupleAbs()).TupleMax2(hv_Corner2Col.TupleAbs());
		hv_BBoxLength2New[hv_ClassIDsNoOrientationIndices] = (hv_Corner1Row.TupleAbs()).TupleMax2(hv_Corner2Row.TupleAbs());
	}
	//
	//Rescale bounding boxes.
	//
	//Get required images width and height.
	//
	//Only rescale bounding boxes if the required image dimensions are not the raw dimensions.
	if (0 != (HTuple(int(hv_ImageHeight != hv_HeightRaw)).TupleOr(int(hv_ImageWidth != hv_WidthRaw))))
	{
		//
		//Calculate rescaling factor.
		calculate_dl_image_zoom_factors(hv_WidthRaw, hv_HeightRaw, hv_ImageWidth, hv_ImageHeight,
			hv_DLPreprocessParam, &hv_FactorResampleWidth, &hv_FactorResampleHeight);
		//
		if (0 != (HTuple(int(hv_FactorResampleHeight != hv_FactorResampleWidth)).TupleAnd(int((hv_BBoxRowNew.TupleLength()) > 0))))
		{
			//In order to preserve the correct orientation we have to transform the points individually.
			//Get the coordinates of the four corner points.
			convert_rect2_5to8param(hv_BBoxRowNew, hv_BBoxColNew, hv_BBoxLength1New, hv_BBoxLength2New,
				hv_BBoxPhiNew, &hv_BBoxRow1, &hv_BBoxCol1, &hv_BBoxRow2, &hv_BBoxCol2,
				&hv_BBoxRow3, &hv_BBoxCol3, &hv_BBoxRow4, &hv_BBoxCol4);
			//
			//Rescale the coordinates.
			hv_BBoxCol1New = hv_BBoxCol1 * hv_FactorResampleWidth;
			hv_BBoxCol2New = hv_BBoxCol2 * hv_FactorResampleWidth;
			hv_BBoxCol3New = hv_BBoxCol3 * hv_FactorResampleWidth;
			hv_BBoxCol4New = hv_BBoxCol4 * hv_FactorResampleWidth;
			hv_BBoxRow1New = hv_BBoxRow1 * hv_FactorResampleHeight;
			hv_BBoxRow2New = hv_BBoxRow2 * hv_FactorResampleHeight;
			hv_BBoxRow3New = hv_BBoxRow3 * hv_FactorResampleHeight;
			hv_BBoxRow4New = hv_BBoxRow4 * hv_FactorResampleHeight;
			//
			//The rectangles will get sheared, that is why new rectangles have to be found.
			//Generate homography to scale rectangles.
			HomMat2dIdentity(&hv_HomMat2DIdentity);
			HomMat2dScale(hv_HomMat2DIdentity, hv_FactorResampleHeight, hv_FactorResampleWidth,
				0, 0, &hv_HomMat2DScale);
			//Generate XLD contours for the rectangles.
			GenRectangle2ContourXld(&ho_Rectangle2XLD, hv_BBoxRowNew, hv_BBoxColNew, hv_BBoxPhiNew,
				hv_BBoxLength1New, hv_BBoxLength2New);
			//Scale the XLD contours --> results in sheared regions.
			AffineTransContourXld(ho_Rectangle2XLD, &ho_Rectangle2XLDSheared, hv_HomMat2DScale);
			SmallestRectangle2Xld(ho_Rectangle2XLDSheared, &hv_BBoxRowNew, &hv_BBoxColNew,
				&hv_BBoxPhiNew, &hv_BBoxLength1New, &hv_BBoxLength2New);
			//
			//smallest_rectangle2_xld might change the orientation of the bounding box.
			//Hence, take the orientation that is closest to the one obtained out of the 4 corner points.
			convert_rect2_8to5param(hv_BBoxRow1New, hv_BBoxCol1New, hv_BBoxRow2New, hv_BBoxCol2New,
				hv_BBoxRow3New, hv_BBoxCol3New, hv_BBoxRow4New, hv_BBoxCol4New, hv_IgnoreDirection,
				&hv__, &hv__, &hv__, &hv__, &hv_BBoxPhiTmp);
			hv_PhiDelta = (hv_BBoxPhiTmp - hv_BBoxPhiNew).TupleFmod(HTuple(360).TupleRad());
			//Guarantee that angles are positive.
			hv_PhiDeltaNegativeIndices = (hv_PhiDelta.TupleLessElem(0.0)).TupleFind(1);
			if (0 != (int(hv_PhiDeltaNegativeIndices != -1)))
			{
				hv_PhiDelta[hv_PhiDeltaNegativeIndices] = HTuple(hv_PhiDelta[hv_PhiDeltaNegativeIndices]) + (HTuple(360).TupleRad());
			}
			hv_IndicesRot90 = HTuple((hv_PhiDelta.TupleGreaterElem(HTuple(45).TupleRad())).TupleAnd(hv_PhiDelta.TupleLessEqualElem(HTuple(135).TupleRad()))).TupleFind(1);
			hv_IndicesRot180 = HTuple((hv_PhiDelta.TupleGreaterElem(HTuple(135).TupleRad())).TupleAnd(hv_PhiDelta.TupleLessEqualElem(HTuple(225).TupleRad()))).TupleFind(1);
			hv_IndicesRot270 = HTuple((hv_PhiDelta.TupleGreaterElem(HTuple(225).TupleRad())).TupleAnd(hv_PhiDelta.TupleLessEqualElem(HTuple(315).TupleRad()))).TupleFind(1);
			hv_SwapIndices = HTuple();
			if (0 != (int(hv_IndicesRot90 != -1)))
			{
				hv_BBoxPhiNew[hv_IndicesRot90] = HTuple(hv_BBoxPhiNew[hv_IndicesRot90]) + (HTuple(90).TupleRad());
				hv_SwapIndices = hv_SwapIndices.TupleConcat(hv_IndicesRot90);
			}
			if (0 != (int(hv_IndicesRot180 != -1)))
			{
				hv_BBoxPhiNew[hv_IndicesRot180] = HTuple(hv_BBoxPhiNew[hv_IndicesRot180]) + (HTuple(180).TupleRad());
			}
			if (0 != (int(hv_IndicesRot270 != -1)))
			{
				hv_BBoxPhiNew[hv_IndicesRot270] = HTuple(hv_BBoxPhiNew[hv_IndicesRot270]) + (HTuple(270).TupleRad());
				hv_SwapIndices = hv_SwapIndices.TupleConcat(hv_IndicesRot270);
			}
			if (0 != (int(hv_SwapIndices != HTuple())))
			{
				hv_Tmp = HTuple(hv_BBoxLength1New[hv_SwapIndices]);
				hv_BBoxLength1New[hv_SwapIndices] = HTuple(hv_BBoxLength2New[hv_SwapIndices]);
				hv_BBoxLength2New[hv_SwapIndices] = hv_Tmp;
			}
			//Change angles such that they lie in the range (-180бу, 180бу].
			hv_BBoxPhiNewIndices = (hv_BBoxPhiNew.TupleGreaterElem(HTuple(180).TupleRad())).TupleFind(1);
			if (0 != (int(hv_BBoxPhiNewIndices != -1)))
			{
				hv_BBoxPhiNew[hv_BBoxPhiNewIndices] = HTuple(hv_BBoxPhiNew[hv_BBoxPhiNewIndices]) - (HTuple(360).TupleRad());
			}
			//
		}
		else
		{
			hv_BBoxColNew = hv_BBoxColNew * hv_FactorResampleWidth;
			hv_BBoxRowNew = hv_BBoxRowNew * hv_FactorResampleWidth;
			hv_BBoxLength1New = hv_BBoxLength1New * hv_FactorResampleWidth;
			hv_BBoxLength2New = hv_BBoxLength2New * hv_FactorResampleWidth;
			//Phi stays the same.
		}
		//
	}
	//
	//Adapt the bounding box angles such that they are within the correct range,
	//which is (-180бу,180бу] for 'ignore_direction'==false and (-90бу,90бу] else.
	hv_PhiThreshold = (HTuple(180).TupleRad()) - (hv_IgnoreDirection*(HTuple(90).TupleRad()));
	hv_PhiDelta = 2 * hv_PhiThreshold;
	//Correct angles that are too large.
	hv_PhiToCorrect = (hv_BBoxPhiNew.TupleGreaterElem(hv_PhiThreshold)).TupleFind(1);
	if (0 != (HTuple(int(hv_PhiToCorrect != -1)).TupleAnd(int(hv_PhiToCorrect != HTuple()))))
	{
		hv_NumCorrections = (((HTuple(hv_BBoxPhiNew[hv_PhiToCorrect]) - hv_PhiThreshold) / hv_PhiDelta).TupleInt()) + 1;
		hv_BBoxPhiNew[hv_PhiToCorrect] = HTuple(hv_BBoxPhiNew[hv_PhiToCorrect]) - (hv_NumCorrections*hv_PhiDelta);
	}
	//Correct angles that are too small.
	hv_PhiToCorrect = (hv_BBoxPhiNew.TupleLessEqualElem(-hv_PhiThreshold)).TupleFind(1);
	if (0 != (HTuple(int(hv_PhiToCorrect != -1)).TupleAnd(int(hv_PhiToCorrect != HTuple()))))
	{
		hv_NumCorrections = ((((HTuple(hv_BBoxPhiNew[hv_PhiToCorrect]) + hv_PhiThreshold).TupleAbs()) / hv_PhiDelta).TupleInt()) + 1;
		hv_BBoxPhiNew[hv_PhiToCorrect] = HTuple(hv_BBoxPhiNew[hv_PhiToCorrect]) + (hv_NumCorrections*hv_PhiDelta);
	}
	//
	//Check that there are no invalid boxes.
	if (0 != (int((hv_BBoxRowNew.TupleLength()) > 0)))
	{
		hv_BoxesInvalid = ((hv_BBoxLength1New.TupleEqualElem(0)).TupleSum()) + ((hv_BBoxLength2New.TupleEqualElem(0)).TupleSum());
		if (0 != (int(hv_BoxesInvalid > 0)))
		{
			GetDictTuple(hv_DLSample, "image_id", &hv_ImageId);
			throw HException(("An error has occurred during preprocessing image_id " + hv_ImageId) + HTuple(": Sample contains at least one box with zero-area, i.e. bbox_length1 == 0 or bbox_length2 == 0!"));
		}
	}
	SetDictTuple(hv_DLSample, "bbox_row", hv_BBoxRowNew);
	SetDictTuple(hv_DLSample, "bbox_col", hv_BBoxColNew);
	SetDictTuple(hv_DLSample, "bbox_length1", hv_BBoxLength1New);
	SetDictTuple(hv_DLSample, "bbox_length2", hv_BBoxLength2New);
	SetDictTuple(hv_DLSample, "bbox_phi", hv_BBoxPhiNew);
	SetDictTuple(hv_DLSample, "bbox_label_id", hv_BBoxLabelNew);
	//
	return;

}

// Chapter: Deep Learning / Model
// Short Description: Preprocess images for deep-learning-based training and inference. 
void preprocess_dl_model_images(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_PreservedDomains, ho_ImageSelected;
	HObject  ho_DomainSelected, ho_ImagesScaled, ho_ImageScaled;
	HObject  ho_Channel, ho_ChannelScaled, ho_ThreeChannelImage;
	HObject  ho_SingleChannelImage;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_ImageNumChannels;
	HTuple  hv_ImageRangeMin, hv_ImageRangeMax, hv_DomainHandling;
	HTuple  hv_NormalizationType, hv_ModelType, hv_NumImages;
	HTuple  hv_Type, hv_NumMatches, hv_InputNumChannels, hv_OutputNumChannels;
	HTuple  hv_NumChannels1, hv_NumChannels3, hv_AreInputNumChannels1;
	HTuple  hv_AreInputNumChannels3, hv_AreInputNumChannels1Or3;
	HTuple  hv_ValidNumChannels, hv_ValidNumChannelsText, hv_PreserveDomain;
	HTuple  hv_Row1, hv_Column1, hv_Row2, hv_Column2, hv_UniqRow1;
	HTuple  hv_UniqColumn1, hv_UniqRow2, hv_UniqColumn2, hv_RectangleIndex;
	HTuple  hv_OriginalWidth, hv_OriginalHeight, hv_UniqWidth;
	HTuple  hv_UniqHeight, hv_ScaleWidth, hv_ScaleHeight, hv_ScaleIndex;
	HTuple  hv_ImageIndex, hv_NumChannels, hv_ChannelIndex;
	HTuple  hv_Min, hv_Max, hv_Range, hv_Scale, hv_Shift, hv_MeanValues;
	HTuple  hv_DeviationValues, hv_UseDefaultNormalizationValues;
	HTuple  hv_Exception, hv_Indices, hv_RescaleRange, hv_CurrentNumChannels;
	HTuple  hv_DiffNumChannelsIndices, hv_Index, hv_DiffNumChannelsIndex;
	HTuple  hv_NumDomains, hv_DomainIndex;

	//
	//This procedure preprocesses the provided Images according to the parameters in
	//the dictionary DLPreprocessParam. Note that depending on the images, additional
	//preprocessing steps might be beneficial.
	//
	//Validate the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "image_num_channels", &hv_ImageNumChannels);
	GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
	GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	GetDictTuple(hv_DLPreprocessParam, "normalization_type", &hv_NormalizationType);
	GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_ModelType);
	//
	//Validate the type of the input images.
	CountObj(ho_Images, &hv_NumImages);
	if (0 != (int(hv_NumImages == 0)))
	{
		throw HException("Please provide some images to preprocess.");
	}
	GetImageType(ho_Images, &hv_Type);
	TupleRegexpTest(hv_Type, "byte|int|real", &hv_NumMatches);
	if (0 != (int(hv_NumMatches != hv_NumImages)))
	{
		throw HException(HTuple("Please provide only images of type 'byte', 'int1', 'int2', 'uint2', 'int4', 'int8', or 'real'."));
	}
	//
	//Handle ocr_recognition models.
	if (0 != (int(hv_ModelType == HTuple("ocr_recognition"))))
	{
		preprocess_dl_model_images_ocr_recognition(ho_Images, &(*ho_ImagesPreprocessed),
			hv_DLPreprocessParam);
		return;
	}
	//
	//Handle ocr_detection models.
	if (0 != (int(hv_ModelType == HTuple("ocr_detection"))))
	{
		preprocess_dl_model_images_ocr_detection(ho_Images, &(*ho_ImagesPreprocessed),
			hv_DLPreprocessParam);
		return;
	}
	//
	//Validate the number channels of the input images.
	CountChannels(ho_Images, &hv_InputNumChannels);
	hv_OutputNumChannels = HTuple(hv_NumImages, hv_ImageNumChannels);
	//Only for 'image_num_channels' 1 and 3 combinations of 1- and 3-channel images are allowed.
	if (0 != (HTuple(int(hv_ImageNumChannels == 1)).TupleOr(int(hv_ImageNumChannels == 3))))
	{
		hv_NumChannels1 = HTuple(hv_NumImages, 1);
		hv_NumChannels3 = HTuple(hv_NumImages, 3);
		hv_AreInputNumChannels1 = hv_InputNumChannels.TupleEqualElem(hv_NumChannels1);
		hv_AreInputNumChannels3 = hv_InputNumChannels.TupleEqualElem(hv_NumChannels3);
		hv_AreInputNumChannels1Or3 = hv_AreInputNumChannels1 + hv_AreInputNumChannels3;
		hv_ValidNumChannels = int(hv_AreInputNumChannels1Or3 == hv_NumChannels1);
		hv_ValidNumChannelsText = "Valid numbers of channels for the specified model are 1 or 3.";
	}
	else
	{
		hv_ValidNumChannels = int(hv_InputNumChannels == hv_OutputNumChannels);
		hv_ValidNumChannelsText = ("Valid number of channels for the specified model is " + hv_ImageNumChannels) + ".";
	}
	if (0 != (hv_ValidNumChannels.TupleNot()))
	{
		throw HException("Please provide images with a valid number of channels. " + hv_ValidNumChannelsText);
	}
	//Preprocess the images.
	//
	//For models of type '3d_gripping_point_detection' and 'gc_anomaly_detection' (case of keep_domain), the preprocessing steps need to be performed on full
	//domain images while the domains are preserved and set back into the images after the preprocessing.
	hv_PreserveDomain = 0;
	if (0 != (HTuple(HTuple(int(hv_ModelType == HTuple("3d_gripping_point_detection"))).TupleAnd(HTuple(int(hv_DomainHandling == HTuple("crop_domain"))).TupleOr(int(hv_DomainHandling == HTuple("keep_domain"))))).TupleOr(HTuple(int(hv_ModelType == HTuple("gc_anomaly_detection"))).TupleAnd(int(hv_DomainHandling == HTuple("keep_domain"))))))
	{
		hv_PreserveDomain = 1;
		GetDomain(ho_Images, &ho_PreservedDomains);
		FullDomain(ho_Images, &ho_Images);
	}
	//
	//Apply the domain to the images.
	if (0 != (int(hv_DomainHandling == HTuple("full_domain"))))
	{
		FullDomain(ho_Images, &ho_Images);
	}
	else if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		if (0 != hv_PreserveDomain)
		{
			//In case of preserved domain, the crop is performed with the smallest rectangle of the
			//domain to avoid out of domain pixels being set to 0.
			SmallestRectangle1(ho_PreservedDomains, &hv_Row1, &hv_Column1, &hv_Row2, &hv_Column2);
			TupleUniq(hv_Row1, &hv_UniqRow1);
			TupleUniq(hv_Column1, &hv_UniqColumn1);
			TupleUniq(hv_Row2, &hv_UniqRow2);
			TupleUniq(hv_Column2, &hv_UniqColumn2);
			if (0 != (HTuple(HTuple(HTuple(int((hv_UniqRow1.TupleLength()) == 1)).TupleAnd(int((hv_UniqColumn1.TupleLength()) == 1))).TupleAnd(int((hv_UniqRow2.TupleLength()) == 1))).TupleAnd(int((hv_UniqColumn2.TupleLength()) == 1))))
			{
				CropRectangle1(ho_Images, &ho_Images, hv_UniqRow1, hv_UniqColumn1, hv_UniqRow2,
					hv_UniqColumn2);
				MoveRegion(ho_PreservedDomains, &ho_PreservedDomains, -hv_UniqRow1, -hv_UniqColumn1);
			}
			else
			{
				{
					HTuple end_val87 = (hv_Row1.TupleLength()) - 1;
					HTuple step_val87 = 1;
					for (hv_RectangleIndex = 0; hv_RectangleIndex.Continue(end_val87, step_val87); hv_RectangleIndex += step_val87)
					{
						SelectObj(ho_Images, &ho_ImageSelected, hv_RectangleIndex + 1);
						CropRectangle1(ho_ImageSelected, &ho_ImageSelected, HTuple(hv_Row1[hv_RectangleIndex]),
							HTuple(hv_Column1[hv_RectangleIndex]), HTuple(hv_Row2[hv_RectangleIndex]),
							HTuple(hv_Column2[hv_RectangleIndex]));
						ReplaceObj(ho_Images, ho_ImageSelected, &ho_Images, hv_RectangleIndex + 1);
						SelectObj(ho_PreservedDomains, &ho_DomainSelected, hv_RectangleIndex + 1);
						MoveRegion(ho_DomainSelected, &ho_DomainSelected, -HTuple(hv_Row1[hv_RectangleIndex]),
							-HTuple(hv_Column1[hv_RectangleIndex]));
						ReplaceObj(ho_PreservedDomains, ho_DomainSelected, &ho_PreservedDomains,
							hv_RectangleIndex + 1);
					}
				}
			}
		}
		else
		{
			CropDomain(ho_Images, &ho_Images);
		}
	}
	else if (0 != (HTuple(int(hv_DomainHandling == HTuple("keep_domain"))).TupleAnd(HTuple(HTuple(int(hv_ModelType == HTuple("anomaly_detection"))).TupleOr(int(hv_ModelType == HTuple("gc_anomaly_detection")))).TupleOr(int(hv_ModelType == HTuple("3d_gripping_point_detection"))))))
	{
		//The option 'keep_domain' is only supported for some model types.
	}
	else
	{
		throw HException("Unsupported parameter value for 'domain_handling'.");
	}
	//
	//Zoom preserved domains before zooming the images.
	if (0 != hv_PreserveDomain)
	{
		GetImageSize(ho_Images, &hv_OriginalWidth, &hv_OriginalHeight);
		TupleUniq(hv_OriginalWidth, &hv_UniqWidth);
		TupleUniq(hv_OriginalHeight, &hv_UniqHeight);
		if (0 != (HTuple(int((hv_UniqWidth.TupleLength()) == 1)).TupleAnd(int((hv_UniqHeight.TupleLength()) == 1))))
		{
			hv_ScaleWidth = hv_ImageWidth / (hv_UniqWidth.TupleReal());
			hv_ScaleHeight = hv_ImageHeight / (hv_UniqHeight.TupleReal());
			ZoomRegion(ho_PreservedDomains, &ho_PreservedDomains, hv_ScaleWidth, hv_ScaleHeight);
		}
		else
		{
			hv_ScaleWidth = hv_ImageWidth / (hv_OriginalWidth.TupleReal());
			hv_ScaleHeight = hv_ImageHeight / (hv_OriginalHeight.TupleReal());
			{
				HTuple end_val117 = (hv_ScaleWidth.TupleLength()) - 1;
				HTuple step_val117 = 1;
				for (hv_ScaleIndex = 0; hv_ScaleIndex.Continue(end_val117, step_val117); hv_ScaleIndex += step_val117)
				{
					SelectObj(ho_PreservedDomains, &ho_DomainSelected, hv_ScaleIndex + 1);
					ZoomRegion(ho_DomainSelected, &ho_DomainSelected, HTuple(hv_ScaleWidth[hv_ScaleIndex]),
						HTuple(hv_ScaleHeight[hv_ScaleIndex]));
					ReplaceObj(ho_PreservedDomains, ho_DomainSelected, &ho_PreservedDomains,
						hv_ScaleIndex + 1);
				}
			}
		}
	}
	//
	//Convert the images to real and zoom the images.
	//Zoom first to speed up if all image types are supported by zoom_image_size.
	if (0 != (int((hv_Type.TupleRegexpTest("int1|int4|int8")) == 0)))
	{
		ZoomImageSize(ho_Images, &ho_Images, hv_ImageWidth, hv_ImageHeight, "constant");
		ConvertImageType(ho_Images, &ho_Images, "real");
	}
	else
	{
		ConvertImageType(ho_Images, &ho_Images, "real");
		ZoomImageSize(ho_Images, &ho_Images, hv_ImageWidth, hv_ImageHeight, "constant");
	}
	//
	if (0 != (int(hv_NormalizationType == HTuple("all_channels"))))
	{
		//Scale for each image the gray values of all channels to ImageRangeMin-ImageRangeMax.
		GenEmptyObj(&ho_ImagesScaled);
		{
			HTuple end_val138 = hv_NumImages;
			HTuple step_val138 = 1;
			for (hv_ImageIndex = 1; hv_ImageIndex.Continue(end_val138, step_val138); hv_ImageIndex += step_val138)
			{
				SelectObj(ho_Images, &ho_ImageSelected, hv_ImageIndex);
				CountChannels(ho_ImageSelected, &hv_NumChannels);
				GenEmptyObj(&ho_ImageScaled);
				{
					HTuple end_val142 = hv_NumChannels;
					HTuple step_val142 = 1;
					for (hv_ChannelIndex = 1; hv_ChannelIndex.Continue(end_val142, step_val142); hv_ChannelIndex += step_val142)
					{
						AccessChannel(ho_ImageSelected, &ho_Channel, hv_ChannelIndex);
						MinMaxGray(ho_Channel, ho_Channel, 0, &hv_Min, &hv_Max, &hv_Range);
						if (0 != (int((hv_Max - hv_Min) == 0)))
						{
							hv_Scale = 1;
						}
						else
						{
							hv_Scale = (hv_ImageRangeMax - hv_ImageRangeMin) / (hv_Max - hv_Min);
						}
						hv_Shift = ((-hv_Scale)*hv_Min) + hv_ImageRangeMin;
						ScaleImage(ho_Channel, &ho_ChannelScaled, hv_Scale, hv_Shift);
						AppendChannel(ho_ImageScaled, ho_ChannelScaled, &ho_ImageScaled);
					}
				}
				ConcatObj(ho_ImagesScaled, ho_ImageScaled, &ho_ImagesScaled);
			}
		}
		ho_Images = ho_ImagesScaled;
	}
	else if (0 != (int(hv_NormalizationType == HTuple("first_channel"))))
	{
		//Scale for each image the gray values of first channel to ImageRangeMin-ImageRangeMax.
		GenEmptyObj(&ho_ImagesScaled);
		{
			HTuple end_val160 = hv_NumImages;
			HTuple step_val160 = 1;
			for (hv_ImageIndex = 1; hv_ImageIndex.Continue(end_val160, step_val160); hv_ImageIndex += step_val160)
			{
				SelectObj(ho_Images, &ho_ImageSelected, hv_ImageIndex);
				MinMaxGray(ho_ImageSelected, ho_ImageSelected, 0, &hv_Min, &hv_Max, &hv_Range);
				if (0 != (int((hv_Max - hv_Min) == 0)))
				{
					hv_Scale = 1;
				}
				else
				{
					hv_Scale = (hv_ImageRangeMax - hv_ImageRangeMin) / (hv_Max - hv_Min);
				}
				hv_Shift = ((-hv_Scale)*hv_Min) + hv_ImageRangeMin;
				ScaleImage(ho_ImageSelected, &ho_ImageSelected, hv_Scale, hv_Shift);
				ConcatObj(ho_ImagesScaled, ho_ImageSelected, &ho_ImagesScaled);
			}
		}
		ho_Images = ho_ImagesScaled;
	}
	else if (0 != (int(hv_NormalizationType == HTuple("constant_values"))))
	{
		//Scale for each image the gray values of all channels to the corresponding channel DeviationValues[].
		try
		{
			GetDictTuple(hv_DLPreprocessParam, "mean_values_normalization", &hv_MeanValues);
			GetDictTuple(hv_DLPreprocessParam, "deviation_values_normalization", &hv_DeviationValues);
			hv_UseDefaultNormalizationValues = 0;
		}
		// catch (Exception) 
		catch (HException &HDevExpDefaultException)
		{
			HDevExpDefaultException.ToHTuple(&hv_Exception);
			hv_MeanValues.Clear();
			hv_MeanValues[0] = 123.675;
			hv_MeanValues[1] = 116.28;
			hv_MeanValues[2] = 103.53;
			hv_DeviationValues.Clear();
			hv_DeviationValues[0] = 58.395;
			hv_DeviationValues[1] = 57.12;
			hv_DeviationValues[2] = 57.375;
			hv_UseDefaultNormalizationValues = 1;
		}
		GenEmptyObj(&ho_ImagesScaled);
		{
			HTuple end_val185 = hv_NumImages;
			HTuple step_val185 = 1;
			for (hv_ImageIndex = 1; hv_ImageIndex.Continue(end_val185, step_val185); hv_ImageIndex += step_val185)
			{
				SelectObj(ho_Images, &ho_ImageSelected, hv_ImageIndex);
				CountChannels(ho_ImageSelected, &hv_NumChannels);
				//Ensure that the number of channels is equal |DeviationValues| and |MeanValues|
				if (0 != hv_UseDefaultNormalizationValues)
				{
					if (0 != (int(hv_NumChannels == 1)))
					{
						Compose3(ho_ImageSelected, ho_ImageSelected, ho_ImageSelected, &ho_ImageSelected
						);
						CountChannels(ho_ImageSelected, &hv_NumChannels);
					}
					else if (0 != (int(hv_NumChannels != 3)))
					{
						throw HException("Using default values for normalization type 'constant_values' is allowed only for 1- and 3-channel images.");
					}
				}
				if (0 != (HTuple(int((hv_MeanValues.TupleLength()) != hv_NumChannels)).TupleOr(int((hv_DeviationValues.TupleLength()) != hv_NumChannels))))
				{
					throw HException("The length of mean and deviation values for normalization type 'constant_values' have to be the same size as the number of channels of the image.");
				}
				GenEmptyObj(&ho_ImageScaled);
				{
					HTuple end_val201 = hv_NumChannels;
					HTuple step_val201 = 1;
					for (hv_ChannelIndex = 1; hv_ChannelIndex.Continue(end_val201, step_val201); hv_ChannelIndex += step_val201)
					{
						AccessChannel(ho_ImageSelected, &ho_Channel, hv_ChannelIndex);
						hv_Scale = 1.0 / HTuple(hv_DeviationValues[hv_ChannelIndex - 1]);
						hv_Shift = (-hv_Scale)*HTuple(hv_MeanValues[hv_ChannelIndex - 1]);
						ScaleImage(ho_Channel, &ho_ChannelScaled, hv_Scale, hv_Shift);
						AppendChannel(ho_ImageScaled, ho_ChannelScaled, &ho_ImageScaled);
					}
				}
				ConcatObj(ho_ImagesScaled, ho_ImageScaled, &ho_ImagesScaled);
			}
		}
		ho_Images = ho_ImagesScaled;
	}
	else if (0 != (int(hv_NormalizationType == HTuple("none"))))
	{
		TupleFind(hv_Type, "byte", &hv_Indices);
		if (0 != (int(hv_Indices != -1)))
		{
			//Shift the gray values from [0-255] to the expected range for byte images.
			hv_RescaleRange = (hv_ImageRangeMax - hv_ImageRangeMin) / 255.0;
			SelectObj(ho_Images, &ho_ImageSelected, hv_Indices + 1);
			ScaleImage(ho_ImageSelected, &ho_ImageSelected, hv_RescaleRange, hv_ImageRangeMin);
			ReplaceObj(ho_Images, ho_ImageSelected, &ho_Images, hv_Indices + 1);
		}
	}
	else if (0 != (int(hv_NormalizationType != HTuple("none"))))
	{
		throw HException("Unsupported parameter value for 'normalization_type'");
	}
	//
	//Ensure that the number of channels of the resulting images is consistent with the
	//number of channels of the given model. The only exceptions that are adapted below
	//are combinations of 1- and 3-channel images if ImageNumChannels is either 1 or 3.
	if (0 != (HTuple(int(hv_ImageNumChannels == 1)).TupleOr(int(hv_ImageNumChannels == 3))))
	{
		CountChannels(ho_Images, &hv_CurrentNumChannels);
		TupleFind(hv_CurrentNumChannels.TupleNotEqualElem(hv_OutputNumChannels), 1, &hv_DiffNumChannelsIndices);
		if (0 != (int(hv_DiffNumChannelsIndices != -1)))
		{
			{
				HTuple end_val231 = (hv_DiffNumChannelsIndices.TupleLength()) - 1;
				HTuple step_val231 = 1;
				for (hv_Index = 0; hv_Index.Continue(end_val231, step_val231); hv_Index += step_val231)
				{
					hv_DiffNumChannelsIndex = HTuple(hv_DiffNumChannelsIndices[hv_Index]);
					hv_ImageIndex = hv_DiffNumChannelsIndex + 1;
					hv_NumChannels = HTuple(hv_CurrentNumChannels[hv_ImageIndex - 1]);
					SelectObj(ho_Images, &ho_ImageSelected, hv_ImageIndex);
					if (0 != (HTuple(int(hv_NumChannels == 1)).TupleAnd(int(hv_ImageNumChannels == 3))))
					{
						//Conversion from 1- to 3-channel image required
						Compose3(ho_ImageSelected, ho_ImageSelected, ho_ImageSelected, &ho_ThreeChannelImage
						);
						ReplaceObj(ho_Images, ho_ThreeChannelImage, &ho_Images, hv_ImageIndex);
					}
					else if (0 != (HTuple(int(hv_NumChannels == 3)).TupleAnd(int(hv_ImageNumChannels == 1))))
					{
						//Conversion from 3- to 1-channel image required
						Rgb1ToGray(ho_ImageSelected, &ho_SingleChannelImage);
						ReplaceObj(ho_Images, ho_SingleChannelImage, &ho_Images, hv_ImageIndex);
					}
					else
					{
						throw HException(((("Unexpected error adapting the number of channels. The number of channels of the resulting image is " + hv_NumChannels) + HTuple(", but the number of channels of the model is ")) + hv_ImageNumChannels) + ".");
					}
				}
			}
		}
	}
	//
	//In case the image domains were preserved, they need to be set back into the images.
	if (0 != hv_PreserveDomain)
	{
		CountObj(ho_PreservedDomains, &hv_NumDomains);
		{
			HTuple end_val254 = hv_NumDomains;
			HTuple step_val254 = 1;
			for (hv_DomainIndex = 1; hv_DomainIndex.Continue(end_val254, step_val254); hv_DomainIndex += step_val254)
			{
				SelectObj(ho_Images, &ho_ImageSelected, hv_DomainIndex);
				SelectObj(ho_PreservedDomains, &ho_DomainSelected, hv_DomainIndex);
				ReduceDomain(ho_ImageSelected, ho_DomainSelected, &ho_ImageSelected);
				ReplaceObj(ho_Images, ho_ImageSelected, &ho_Images, hv_DomainIndex);
			}
		}
	}
	//
	//Write preprocessed images to output variable.
	(*ho_ImagesPreprocessed) = ho_Images;
	//
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR detection models. 
void preprocess_dl_model_images_ocr_detection(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_Image, ho_ImageScaled, ho_Channel;
	HObject  ho_ChannelScaled, ho_ImageG, ho_ImageB;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_ImageNumChannels;
	HTuple  hv_ImageRangeMin, hv_ImageRangeMax, hv_DomainHandling;
	HTuple  hv_NormalizationType, hv_ModelType, hv_NumImages;
	HTuple  hv_NumChannels, hv_ImageTypes, hv_InputImageWidths;
	HTuple  hv_InputImageHeights, hv_ImageRange, hv_I, hv_InputImageWidth;
	HTuple  hv_InputImageHeight, hv_ZoomFactorWidth, hv_ZoomFactorHeight;
	HTuple  hv_ZoomHeight, hv_ZoomWidth, hv_ChannelIndex, hv_Min;
	HTuple  hv_Max, hv_Range, hv_Scale, hv_Shift;

	//This procedure preprocesses the provided images according to the parameters
	//in the dictionary DLPreprocessParam for an ocr_detection model.
	//
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "image_num_channels", &hv_ImageNumChannels);
	GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
	GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	GetDictTuple(hv_DLPreprocessParam, "normalization_type", &hv_NormalizationType);
	GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_ModelType);
	//
	//Check the preprocessing parameters.
	if (0 != (int(hv_ModelType != HTuple("ocr_detection"))))
	{
		throw HException("The only 'model_type' value supported is'ocr_detection'.");
	}
	if (0 != (int(hv_ImageNumChannels != 3)))
	{
		throw HException("The only 'image_num_channels' value supported for ocr_detection models is 3.");
	}
	if (0 != (int(hv_DomainHandling != HTuple("full_domain"))))
	{
		throw HException("The only 'domain_handling' value supported for ocr_detection models is 'full_domain'.");
	}
	if (0 != (HTuple(int(hv_NormalizationType != HTuple("none"))).TupleAnd(int(hv_NormalizationType != HTuple("all_channels")))))
	{
		throw HException("The 'normalization_type' values supported for ocr_detection models are 'all_channels' and 'none'.");
	}
	//
	//Get the image properties.
	CountObj(ho_Images, &hv_NumImages);
	CountChannels(ho_Images, &hv_NumChannels);
	GetImageType(ho_Images, &hv_ImageTypes);
	GetImageSize(ho_Images, &hv_InputImageWidths, &hv_InputImageHeights);
	//
	//Check the image properties.
	if (0 != (int(hv_NumImages == 0)))
	{
		throw HException("Please provide some images to preprocess.");
	}
	if (0 != (int(hv_NumImages != (hv_ImageTypes.TupleRegexpTest("byte")))))
	{
		throw HException("Please provide only images of type 'byte'.");
	}
	if (0 != (int(hv_NumImages != (HTuple((hv_NumChannels.TupleEqualElem(1)).TupleOr(hv_NumChannels.TupleEqualElem(3))).TupleSum()))))
	{
		throw HException("Please provide only 1- or 3-channels images for ocr_detection models.");
	}
	//
	//Preprocess the images.
	hv_ImageRange = (hv_ImageRangeMax - hv_ImageRangeMin).TupleReal();
	{
		HTuple end_val49 = hv_NumImages - 1;
		HTuple step_val49 = 1;
		for (hv_I = 0; hv_I.Continue(end_val49, step_val49); hv_I += step_val49)
		{
			hv_InputImageWidth = HTuple(hv_InputImageWidths[hv_I]);
			hv_InputImageHeight = HTuple(hv_InputImageHeights[hv_I]);
			//
			SelectObj(ho_Images, &ho_Image, hv_I + 1);
			//
			//Calculate aspect-ratio preserving zoom factors
			calculate_dl_image_zoom_factors(hv_InputImageWidth, hv_InputImageHeight, hv_ImageWidth,
				hv_ImageHeight, hv_DLPreprocessParam, &hv_ZoomFactorWidth, &hv_ZoomFactorHeight);
			//
			//Zoom image
			hv_ZoomHeight = (hv_ZoomFactorHeight*hv_InputImageHeight).TupleRound();
			hv_ZoomWidth = (hv_ZoomFactorWidth*hv_InputImageWidth).TupleRound();
			ZoomImageSize(ho_Image, &ho_Image, hv_ZoomWidth, hv_ZoomHeight, "constant");
			//
			//Convert to real and normalize
			ConvertImageType(ho_Image, &ho_Image, "real");
			if (0 != (int(hv_NormalizationType == HTuple("all_channels"))))
			{
				GenEmptyObj(&ho_ImageScaled);
				{
					HTuple end_val67 = HTuple(hv_NumChannels[hv_I]);
					HTuple step_val67 = 1;
					for (hv_ChannelIndex = 1; hv_ChannelIndex.Continue(end_val67, step_val67); hv_ChannelIndex += step_val67)
					{
						AccessChannel(ho_Image, &ho_Channel, hv_ChannelIndex);
						MinMaxGray(ho_Channel, ho_Channel, 0, &hv_Min, &hv_Max, &hv_Range);
						if (0 != (int((hv_Max - hv_Min) == 0)))
						{
							hv_Scale = 1;
						}
						else
						{
							hv_Scale = (hv_ImageRangeMax - hv_ImageRangeMin) / (hv_Max - hv_Min);
						}
						hv_Shift = ((-hv_Scale)*hv_Min) + hv_ImageRangeMin;
						ScaleImage(ho_Channel, &ho_ChannelScaled, hv_Scale, hv_Shift);
						AppendChannel(ho_ImageScaled, ho_ChannelScaled, &ho_ImageScaled);
					}
				}
				ho_Image = ho_ImageScaled;
			}
			else if (0 != (int(hv_NormalizationType == HTuple("none"))))
			{
				ScaleImage(ho_Image, &ho_Image, hv_ImageRange / 255.0, hv_ImageRangeMin);
			}
			//
			//Obtain an RGB image.
			if (0 != (int(HTuple(hv_NumChannels[hv_I]) == 1)))
			{
				CopyImage(ho_Image, &ho_ImageG);
				CopyImage(ho_Image, &ho_ImageB);
				Compose3(ho_Image, ho_ImageG, ho_ImageB, &ho_Image);
			}
			//
			//Apply padding to fit the desired image size.
			//The padding value is zero, corresponding to the
			//border handling of the convolution layers.
			ChangeFormat(ho_Image, &ho_Image, hv_ImageWidth, hv_ImageHeight);
			ReplaceObj(ho_Images, ho_Image, &ho_Images, hv_I + 1);
		}
	}
	//
	//Return the preprocessed images.
	(*ho_ImagesPreprocessed) = ho_Images;
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR recognition models. 
void preprocess_dl_model_images_ocr_recognition(HObject ho_Images, HObject *ho_ImagesPreprocessed,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_TargetImage, ho_Image;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_ImageNumChannels;
	HTuple  hv_ImageRangeMin, hv_ImageRangeMax, hv_DomainHandling;
	HTuple  hv_NormalizationType, hv_ModelType, hv_NumImages;
	HTuple  hv_NumChannels, hv_ImageTypes, hv_InputImageWidths;
	HTuple  hv_InputImageHeights, hv_PaddingGrayval, hv_ImageRange;
	HTuple  hv_I, hv_InputImageWidth, hv_InputImageHeight, hv_InputImageWidthHeightRatio;
	HTuple  hv_ZoomHeight, hv_ZoomWidth, hv_GrayvalMin, hv_GrayvalMax;
	HTuple  hv_Range, hv_GrayvalRange, hv_Scale, hv_Shift;

	//This procedure preprocesses the provided Images according to the parameters
	//in the dictionary DLPreprocessParam for an ocr_recognition model.
	//
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "image_num_channels", &hv_ImageNumChannels);
	GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
	GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	GetDictTuple(hv_DLPreprocessParam, "normalization_type", &hv_NormalizationType);
	GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_ModelType);
	//
	//Check the preprocessing parameters.
	if (0 != (int(hv_ModelType != HTuple("ocr_recognition"))))
	{
		throw HException("The only 'model_type' value supported is'ocr_recognition'.");
	}
	if (0 != (int(hv_ImageNumChannels != 1)))
	{
		throw HException("The only 'image_num_channels' value supported for ocr_recognition models is 1.");
	}
	if (0 != (int(hv_DomainHandling != HTuple("full_domain"))))
	{
		throw HException("The only 'domain_handling' value supported for ocr_recognition models is 'full_domain'.");
	}
	if (0 != (HTuple(HTuple(int(hv_NormalizationType != HTuple("none"))).TupleAnd(int(hv_NormalizationType != HTuple("first_channel")))).TupleAnd(int(hv_NormalizationType != HTuple("all_channels")))))
	{
		throw HException(HTuple("The 'normalization_type' values supported for ocr_recognition models are 'first_channel', 'all_channels' and 'none'."));
	}
	//
	//Get the image properties.
	CountObj(ho_Images, &hv_NumImages);
	CountChannels(ho_Images, &hv_NumChannels);
	GetImageType(ho_Images, &hv_ImageTypes);
	GetImageSize(ho_Images, &hv_InputImageWidths, &hv_InputImageHeights);
	//
	//Check the image properties.
	if (0 != (int(hv_NumImages == 0)))
	{
		throw HException("Please provide some images to preprocess.");
	}
	if (0 != (int(hv_NumImages != (hv_ImageTypes.TupleRegexpTest("byte|real")))))
	{
		throw HException("Please provide only images of type 'byte' or 'real'.");
	}
	if (0 != (int(hv_NumImages != (HTuple((hv_NumChannels.TupleEqualElem(1)).TupleOr(hv_NumChannels.TupleEqualElem(3))).TupleSum()))))
	{
		throw HException("Please provide only 1- or 3-channels images for ocr_recognition models.");
	}
	//
	//Preprocess the images.
	hv_PaddingGrayval = 0.0;
	hv_ImageRange = (hv_ImageRangeMax - hv_ImageRangeMin).TupleReal();
	GenImageConst(&ho_TargetImage, "real", hv_ImageWidth, hv_ImageHeight);
	OverpaintRegion(ho_TargetImage, ho_TargetImage, hv_PaddingGrayval, "fill");
	{
		HTuple end_val52 = hv_NumImages - 1;
		HTuple step_val52 = 1;
		for (hv_I = 0; hv_I.Continue(end_val52, step_val52); hv_I += step_val52)
		{
			hv_InputImageWidth = HTuple(hv_InputImageWidths[hv_I]);
			hv_InputImageHeight = HTuple(hv_InputImageHeights[hv_I]);
			hv_InputImageWidthHeightRatio = hv_InputImageWidth / (hv_InputImageHeight.TupleReal());
			//
			SelectObj(ho_Images, &ho_Image, hv_I + 1);
			FullDomain(ho_Image, &ho_Image);
			if (0 != (int(HTuple(hv_NumChannels[hv_I]) == 3)))
			{
				Rgb1ToGray(ho_Image, &ho_Image);
			}
			//
			hv_ZoomHeight = hv_ImageHeight;
			hv_ZoomWidth = hv_ImageWidth.TupleMin2((hv_ImageHeight*hv_InputImageWidthHeightRatio).TupleInt());
			ZoomImageSize(ho_Image, &ho_Image, hv_ZoomWidth, hv_ZoomHeight, "constant");
			if (0 != (int(HTuple(hv_ImageTypes[hv_I]) == HTuple("byte"))))
			{
				ConvertImageType(ho_Image, &ho_Image, "real");
			}
			if (0 != (HTuple(int(hv_NormalizationType == HTuple("first_channel"))).TupleOr(int(hv_NormalizationType == HTuple("all_channels")))))
			{
				MinMaxGray(ho_Image, ho_Image, 0, &hv_GrayvalMin, &hv_GrayvalMax, &hv_Range);
				hv_GrayvalRange = (hv_GrayvalMax - hv_GrayvalMin).TupleReal();
				if (0 != (int(hv_GrayvalRange == 0.0)))
				{
					hv_Scale = 1.0;
				}
				else
				{
					hv_Scale = hv_ImageRange / hv_GrayvalRange;
				}
				hv_Shift = ((-hv_Scale)*hv_GrayvalMin) + hv_ImageRangeMin;
				ScaleImage(ho_Image, &ho_Image, hv_Scale, hv_Shift);
			}
			else if (0 != (int(hv_NormalizationType == HTuple("none"))))
			{
				if (0 != (int(HTuple(hv_ImageTypes[hv_I]) == HTuple("byte"))))
				{
					ScaleImage(ho_Image, &ho_Image, hv_ImageRange / 255.0, hv_ImageRangeMin);
				}
			}
			//
			OverpaintGray(ho_TargetImage, ho_Image);
			ReduceDomain(ho_TargetImage, ho_Image, &ho_TargetImage);
			ReplaceObj(ho_Images, ho_TargetImage, &ho_Images, hv_I + 1);
		}
	}
	//
	//Return the preprocessed images.
	(*ho_ImagesPreprocessed) = ho_Images;
	return;
}

// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the instance segmentation masks for a sample given by the dictionary DLSample. 
void preprocess_dl_model_instance_masks(HObject ho_ImageRaw, HTuple hv_DLSample,
	HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_InstanceMasks, ho_Domain;

	// Local control variables
	HTuple  hv_ImageWidth, hv_ImageHeight, hv_DomainHandling;
	HTuple  hv_NumMasks, hv_WidthRaw, hv_HeightRaw, hv_DomainRow1;
	HTuple  hv_DomainColumn1, hv_DomainRow2, hv_DomainColumn2;
	HTuple  hv_FactorResampleWidth, hv_FactorResampleHeight;

	//
	//This procedure preprocesses the instance masks of a DLSample.
	//
	//Check preprocess parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get relevant preprocess parameters.
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	//
	//Get the preprocessed instance masks.
	GetDictObject(&ho_InstanceMasks, hv_DLSample, "mask");
	//
	//Get the number of instance masks.
	CountObj(ho_InstanceMasks, &hv_NumMasks);
	//
	//Domain handling of the image to be preprocessed.
	//
	GetImageSize(ho_ImageRaw, &hv_WidthRaw, &hv_HeightRaw);
	if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		//Clip and translate masks w.r.t. the image domain
		GetDomain(ho_ImageRaw, &ho_Domain);
		SmallestRectangle1(ho_Domain, &hv_DomainRow1, &hv_DomainColumn1, &hv_DomainRow2,
			&hv_DomainColumn2);
		//
		//Clip the remaining regions to the domain.
		ClipRegion(ho_InstanceMasks, &ho_InstanceMasks, hv_DomainRow1, hv_DomainColumn1,
			hv_DomainRow2, hv_DomainColumn2);
		hv_WidthRaw = (hv_DomainColumn2 - hv_DomainColumn1) + 1.0;
		hv_HeightRaw = (hv_DomainRow2 - hv_DomainRow1) + 1.0;
		//We need to move the remaining regions back to the origin,
		//because crop_domain will be applied to the image
		MoveRegion(ho_InstanceMasks, &ho_InstanceMasks, -hv_DomainRow1, -hv_DomainColumn1);
	}
	else if (0 != (int(hv_DomainHandling != HTuple("full_domain"))))
	{
		throw HException("Unsupported parameter value for 'domain_handling'");
	}
	//
	//Zoom masks only if the image has a different size than the specified size.
	if (0 != ((hv_ImageWidth.TupleNotEqualElem(hv_WidthRaw)).TupleOr(hv_ImageHeight.TupleNotEqualElem(hv_HeightRaw))))
	{
		//Calculate rescaling factor.
		hv_FactorResampleWidth = (hv_ImageWidth.TupleReal()) / hv_WidthRaw;
		hv_FactorResampleHeight = (hv_ImageHeight.TupleReal()) / hv_HeightRaw;

		//Zoom the masks.
		ZoomRegion(ho_InstanceMasks, &ho_InstanceMasks, hv_FactorResampleWidth, hv_FactorResampleHeight);
	}
	//
	//Set the preprocessed instance masks.
	SetDictObject(ho_InstanceMasks, hv_DLSample, "mask");
	//
	//
	return;
}

// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Preprocess segmentation and weight images for deep-learning-based segmentation training and inference. 
void preprocess_dl_model_segmentations(HObject ho_ImagesRaw, HObject ho_Segmentations,
	HObject *ho_SegmentationsPreprocessed, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_Domain, ho_SelectedSeg, ho_SelectedDomain;

	// Local control variables
	HTuple  hv_NumberImages, hv_NumberSegmentations;
	HTuple  hv_Width, hv_Height, hv_WidthSeg, hv_HeightSeg;
	HTuple  hv_DLModelType, hv_ImageWidth, hv_ImageHeight, hv_ImageNumChannels;
	HTuple  hv_ImageRangeMin, hv_ImageRangeMax, hv_DomainHandling;
	HTuple  hv_SetBackgroundID, hv_ClassesToBackground, hv_IgnoreClassIDs;
	HTuple  hv_IsInt, hv_IndexImage, hv_ImageWidthRaw, hv_ImageHeightRaw;
	HTuple  hv_EqualWidth, hv_EqualHeight, hv_Type, hv_EqualReal;

	//
	//This procedure preprocesses the segmentation or weight images
	//given by Segmentations so that they can be handled by
	//train_dl_model_batch and apply_dl_model.
	//
	//Check input data.
	//Examine number of images.
	CountObj(ho_ImagesRaw, &hv_NumberImages);
	CountObj(ho_Segmentations, &hv_NumberSegmentations);
	if (0 != (int(hv_NumberImages != hv_NumberSegmentations)))
	{
		throw HException("Equal number of images given in ImagesRaw and Segmentations required");
	}
	//Size of images.
	GetImageSize(ho_ImagesRaw, &hv_Width, &hv_Height);
	GetImageSize(ho_Segmentations, &hv_WidthSeg, &hv_HeightSeg);
	if (0 != (HTuple(int(hv_Width != hv_WidthSeg)).TupleOr(int(hv_Height != hv_HeightSeg))))
	{
		throw HException("Equal size of the images given in ImagesRaw and Segmentations required.");
	}
	//Check the validity of the preprocessing parameters.
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//Get the relevant preprocessing parameters.
	GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_DLModelType);
	GetDictTuple(hv_DLPreprocessParam, "image_width", &hv_ImageWidth);
	GetDictTuple(hv_DLPreprocessParam, "image_height", &hv_ImageHeight);
	GetDictTuple(hv_DLPreprocessParam, "image_num_channels", &hv_ImageNumChannels);
	GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
	GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
	GetDictTuple(hv_DLPreprocessParam, "domain_handling", &hv_DomainHandling);
	//Segmentation specific parameters.
	GetDictTuple(hv_DLPreprocessParam, "set_background_id", &hv_SetBackgroundID);
	GetDictTuple(hv_DLPreprocessParam, "class_ids_background", &hv_ClassesToBackground);
	GetDictTuple(hv_DLPreprocessParam, "ignore_class_ids", &hv_IgnoreClassIDs);
	//
	//Check the input parameter for setting the background ID.
	if (0 != (int(hv_SetBackgroundID != HTuple())))
	{
		//Check that the model is a segmentation model.
		if (0 != (int(hv_DLModelType != HTuple("segmentation"))))
		{
			throw HException("Setting class IDs to background is only implemented for segmentation.");
		}
		//Check the background ID.
		TupleIsIntElem(hv_SetBackgroundID, &hv_IsInt);
		if (0 != (int((hv_SetBackgroundID.TupleLength()) != 1)))
		{
			throw HException("Only one class_id as 'set_background_id' allowed.");
		}
		else if (0 != (hv_IsInt.TupleNot()))
		{
			//Given class_id has to be of type int.
			throw HException("The class_id given as 'set_background_id' has to be of type int.");
		}
		//Check the values of ClassesToBackground.
		if (0 != (int((hv_ClassesToBackground.TupleLength()) == 0)))
		{
			//Check that the given classes are of length > 0.
			throw HException(HTuple("If 'set_background_id' is given, 'class_ids_background' must at least contain this class ID."));
		}
		else if (0 != (int((hv_ClassesToBackground.TupleIntersection(hv_IgnoreClassIDs)) != HTuple())))
		{
			//Check that class_ids_background is not included in the ignore_class_ids of the DLModel.
			throw HException("The given 'class_ids_background' must not be included in the 'ignore_class_ids' of the model.");
		}
	}
	//
	//Domain handling of the image to be preprocessed.
	//
	if (0 != (HTuple(int(hv_DomainHandling == HTuple("full_domain"))).TupleOr(int(hv_DomainHandling == HTuple("keep_domain")))))
	{
		FullDomain(ho_Segmentations, &ho_Segmentations);
	}
	else if (0 != (int(hv_DomainHandling == HTuple("crop_domain"))))
	{
		//If the domain should be cropped the domain has to be transferred
		//from the raw image to the segmentation image.
		GetDomain(ho_ImagesRaw, &ho_Domain);
		{
			HTuple end_val66 = hv_NumberImages;
			HTuple step_val66 = 1;
			for (hv_IndexImage = 1; hv_IndexImage.Continue(end_val66, step_val66); hv_IndexImage += step_val66)
			{
				SelectObj(ho_Segmentations, &ho_SelectedSeg, hv_IndexImage);
				SelectObj(ho_Domain, &ho_SelectedDomain, hv_IndexImage);
				ChangeDomain(ho_SelectedSeg, ho_SelectedDomain, &ho_SelectedSeg);
				ReplaceObj(ho_Segmentations, ho_SelectedSeg, &ho_Segmentations, hv_IndexImage);
			}
		}
		CropDomain(ho_Segmentations, &ho_Segmentations);
	}
	else
	{
		throw HException("Unsupported parameter value for 'domain_handling'");
	}
	//
	//Preprocess the segmentation images.
	//
	//Set all background classes to the given background class ID.
	if (0 != (int(hv_SetBackgroundID != HTuple())))
	{
		reassign_pixel_values(ho_Segmentations, &ho_Segmentations, hv_ClassesToBackground,
			hv_SetBackgroundID);
	}
	//
	//Zoom images only if they have a different size than the specified size.
	GetImageSize(ho_Segmentations, &hv_ImageWidthRaw, &hv_ImageHeightRaw);
	hv_EqualWidth = hv_ImageWidth.TupleEqualElem(hv_ImageWidthRaw);
	hv_EqualHeight = hv_ImageHeight.TupleEqualElem(hv_ImageHeightRaw);
	if (0 != (HTuple(int((hv_EqualWidth.TupleMin()) == 0)).TupleOr(int((hv_EqualHeight.TupleMin()) == 0))))
	{
		ZoomImageSize(ho_Segmentations, &ho_Segmentations, hv_ImageWidth, hv_ImageHeight,
			"nearest_neighbor");
	}
	//
	//Check the type of the input images
	//and convert if necessary.
	GetImageType(ho_Segmentations, &hv_Type);
	hv_EqualReal = hv_Type.TupleEqualElem("real");
	//
	if (0 != (int((hv_EqualReal.TupleMin()) == 0)))
	{
		//Convert the image type to 'real',
		//because the model expects 'real' images.
		ConvertImageType(ho_Segmentations, &ho_Segmentations, "real");
	}
	//
	//Write preprocessed Segmentations to output variable.
	(*ho_SegmentationsPreprocessed) = ho_Segmentations;
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Preprocess given DLSamples according to the preprocessing parameters given in DLPreprocessParam. 
void preprocess_dl_samples(HTuple hv_DLSampleBatch, HTuple hv_DLPreprocessParam)
{

	// Local iconic variables
	HObject  ho_ImageRaw, ho_ImagePreprocessed, ho_AnomalyImageRaw;
	HObject  ho_AnomalyImagePreprocessed, ho_SegmentationRaw;
	HObject  ho_SegmentationPreprocessed, ho_ImageRawDomain;

	// Local control variables
	HTuple  hv_SampleIndex, hv_DLSample, hv_ImageExists;
	HTuple  hv_KeysExists, hv_AnomalyParamExist, hv_Rectangle1ParamExist;
	HTuple  hv_Rectangle2ParamExist, hv_InstanceMaskParamExist;
	HTuple  hv_SegmentationParamExist, hv_OCRParamExist;

	//
	//This procedure preprocesses all images of the sample dictionaries
	//in the tuple DLSampleBatch.
	//The images are preprocessed according to the parameters provided
	//in DLPreprocessParam.
	//
	//Check the validity of the preprocessing parameters.
	//The procedure check_dl_preprocess_param might change DLPreprocessParam.
	//To avoid race conditions when preprocess_dl_samples is used from
	//multiple threads with the same DLPreprocessParam dictionary,
	//work on a copy.
	CopyDict(hv_DLPreprocessParam, HTuple(), HTuple(), &hv_DLPreprocessParam);
	check_dl_preprocess_param(hv_DLPreprocessParam);
	//
	//
	//
	//Preprocess the sample entries.
	//
	{
		HTuple end_val18 = (hv_DLSampleBatch.TupleLength()) - 1;
		HTuple step_val18 = 1;
		for (hv_SampleIndex = 0; hv_SampleIndex.Continue(end_val18, step_val18); hv_SampleIndex += step_val18)
		{
			hv_DLSample = HTuple(hv_DLSampleBatch[hv_SampleIndex]);
			//
			//Preprocess augmentation data.
			preprocess_dl_model_augmentation_data(hv_DLSample, hv_DLPreprocessParam);
			//
			//Check the existence of the sample keys.
			GetDictParam(hv_DLSample, "key_exists", "image", &hv_ImageExists);
			//
			//Preprocess the images.
			if (0 != hv_ImageExists)
			{
				//
				//Get the image.
				GetDictObject(&ho_ImageRaw, hv_DLSample, "image");
				//
				//Preprocess the image.
				preprocess_dl_model_images(ho_ImageRaw, &ho_ImagePreprocessed, hv_DLPreprocessParam);
				//
				//Replace the image in the dictionary.
				SetDictObject(ho_ImagePreprocessed, hv_DLSample, "image");
				//
				//Check existence of model specific sample keys:
				//- 'anomaly_ground_truth':
				//  For model 'type' = 'anomaly_detection' and
				//  model 'type' = 'gc_anomaly_detection'
				//- 'bbox_row1':
				//  For 'instance_type' = 'rectangle1' and
				//  model 'type' = 'detection'
				//- 'bbox_phi':
				//  For 'instance_type' = 'rectangle2' and
				//  model 'type' = 'detection'
				//- 'mask':
				//  For 'instance_type' = 'rectangle1',
				//  model 'type' = 'detection', and
				//  'instance_segmentation' = true
				//- 'segmentation_image':
				//  For model 'type' = 'segmentation'
				GetDictParam(hv_DLSample, "key_exists", (((((HTuple("anomaly_ground_truth").Append("bbox_row1")).Append("bbox_phi")).Append("mask")).Append("segmentation_image")).Append("word")),
					&hv_KeysExists);
				hv_AnomalyParamExist = ((const HTuple&)hv_KeysExists)[0];
				hv_Rectangle1ParamExist = ((const HTuple&)hv_KeysExists)[1];
				hv_Rectangle2ParamExist = ((const HTuple&)hv_KeysExists)[2];
				hv_InstanceMaskParamExist = ((const HTuple&)hv_KeysExists)[3];
				hv_SegmentationParamExist = ((const HTuple&)hv_KeysExists)[4];
				hv_OCRParamExist = ((const HTuple&)hv_KeysExists)[5];
				//
				//Preprocess the anomaly ground truth for
				//model 'type' = 'anomaly_detection' or
				//model 'type' = 'gc_anomaly_detection' if present.
				if (0 != hv_AnomalyParamExist)
				{
					//
					//Get the anomaly image.
					GetDictObject(&ho_AnomalyImageRaw, hv_DLSample, "anomaly_ground_truth");
					//
					//Preprocess the anomaly image.
					preprocess_dl_model_anomaly(ho_AnomalyImageRaw, &ho_AnomalyImagePreprocessed,
						hv_DLPreprocessParam);
					//
					//Set preprocessed anomaly image.
					SetDictObject(ho_AnomalyImagePreprocessed, hv_DLSample, "anomaly_ground_truth");
				}
				//
				//Preprocess depending on the model type.
				//If bounding boxes are given, rescale them as well.
				if (0 != hv_Rectangle1ParamExist)
				{
					//
					//Preprocess the bounding boxes of type 'rectangle1'.
					preprocess_dl_model_bbox_rect1(ho_ImageRaw, hv_DLSample, hv_DLPreprocessParam);
				}
				else if (0 != hv_Rectangle2ParamExist)
				{
					//
					//Preprocess the bounding boxes of type 'rectangle2'.
					preprocess_dl_model_bbox_rect2(ho_ImageRaw, hv_DLSample, hv_DLPreprocessParam);
				}
				if (0 != hv_InstanceMaskParamExist)
				{
					//
					//Preprocess the instance masks.
					preprocess_dl_model_instance_masks(ho_ImageRaw, hv_DLSample, hv_DLPreprocessParam);
				}
				//
				//Preprocess the segmentation image if present.
				if (0 != hv_SegmentationParamExist)
				{
					//
					//Get the segmentation image.
					GetDictObject(&ho_SegmentationRaw, hv_DLSample, "segmentation_image");
					//
					//Preprocess the segmentation image.
					preprocess_dl_model_segmentations(ho_ImageRaw, ho_SegmentationRaw, &ho_SegmentationPreprocessed,
						hv_DLPreprocessParam);
					//
					//Set preprocessed segmentation image.
					SetDictObject(ho_SegmentationPreprocessed, hv_DLSample, "segmentation_image");
				}
				//
				//Preprocess the word bounding boxes and generate targets.
				if (0 != (hv_OCRParamExist.TupleAnd(hv_Rectangle2ParamExist)))
				{
					//
					//Preprocess Sample.
					gen_dl_ocr_detection_targets(hv_DLSample, hv_DLPreprocessParam);
				}
				//
				//Preprocess 3D relevant data if present.
				GetDictParam(hv_DLSample, "key_exists", (((HTuple("x").Append("y")).Append("z")).Append("normals")),
					&hv_KeysExists);
				if (0 != (hv_KeysExists.TupleMax()))
				{
					//We need to handle crop_domain before preprocess_dl_model_3d_data
					//if it is necessary.
					//Note, we are not cropping the image of DLSample because it has
					//been done by preprocess_dl_model_images.
					//Since we always keep the domain of 3D data we do not need to handle
					//'keep_domain' or 'full_domain'.
					GetDomain(ho_ImageRaw, &ho_ImageRawDomain);
					crop_dl_sample_image(ho_ImageRawDomain, hv_DLSample, "x", hv_DLPreprocessParam);
					crop_dl_sample_image(ho_ImageRawDomain, hv_DLSample, "y", hv_DLPreprocessParam);
					crop_dl_sample_image(ho_ImageRawDomain, hv_DLSample, "z", hv_DLPreprocessParam);
					crop_dl_sample_image(ho_ImageRawDomain, hv_DLSample, "normals", hv_DLPreprocessParam);
					//
					preprocess_dl_model_3d_data(hv_DLSample, hv_DLPreprocessParam);
				}
			}
			else
			{
				throw HException((HTuple("All samples processed need to include an image, but the sample with index ") + hv_SampleIndex) + " does not.");
			}
		}
	}
	//
	return;
}

// Chapter: Image / Manipulation
// Short Description: Change value of ValuesToChange in Image to NewValue. 
void reassign_pixel_values(HObject ho_Image, HObject *ho_ImageOut, HTuple hv_ValuesToChange,
	HTuple hv_NewValue)
{

	// Local iconic variables
	HObject  ho_RegionToChange, ho_RegionClass;

	// Local control variables
	HTuple  hv_IndexReset;

	//
	//This procedure sets all pixels of Image
	//with the values given in ValuesToChange to the given value NewValue.
	//
	GenEmptyRegion(&ho_RegionToChange);
	{
		HTuple end_val5 = (hv_ValuesToChange.TupleLength()) - 1;
		HTuple step_val5 = 1;
		for (hv_IndexReset = 0; hv_IndexReset.Continue(end_val5, step_val5); hv_IndexReset += step_val5)
		{
			Threshold(ho_Image, &ho_RegionClass, HTuple(hv_ValuesToChange[hv_IndexReset]),
				HTuple(hv_ValuesToChange[hv_IndexReset]));
			Union2(ho_RegionToChange, ho_RegionClass, &ho_RegionToChange);
		}
	}
	OverpaintRegion(ho_Image, ho_RegionToChange, hv_NewValue, "fill");
	(*ho_ImageOut) = ho_Image;
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Remove invalid 3D pixels from a given domain. 
void remove_invalid_3d_pixels(HObject ho_ImageX, HObject ho_ImageY, HObject ho_ImageZ,
	HObject ho_Domain, HObject *ho_DomainOut, HTuple hv_InvalidPixelValue)
{

	// Local iconic variables
	HObject  ho_ImageXOut, ho_ImageYOut, ho_ImageZOut;
	HObject  ho_RegionInvalX, ho_RegionInvalY, ho_RegionInvalZ;
	HObject  ho_RegionInvalXY, ho_RegionInval, ho_RegionInvalComplement;

	(*ho_DomainOut) = ho_Domain;
	ho_ImageXOut = ho_ImageX;
	ho_ImageYOut = ho_ImageY;
	ho_ImageZOut = ho_ImageZ;
	ReduceDomain(ho_ImageXOut, (*ho_DomainOut), &ho_ImageXOut);
	ReduceDomain(ho_ImageYOut, (*ho_DomainOut), &ho_ImageYOut);
	ReduceDomain(ho_ImageZOut, (*ho_DomainOut), &ho_ImageZOut);
	Threshold(ho_ImageXOut, &ho_RegionInvalX, hv_InvalidPixelValue, hv_InvalidPixelValue);
	Threshold(ho_ImageYOut, &ho_RegionInvalY, hv_InvalidPixelValue, hv_InvalidPixelValue);
	Threshold(ho_ImageZOut, &ho_RegionInvalZ, hv_InvalidPixelValue, hv_InvalidPixelValue);
	Intersection(ho_RegionInvalX, ho_RegionInvalY, &ho_RegionInvalXY);
	Intersection(ho_RegionInvalXY, ho_RegionInvalZ, &ho_RegionInval);
	Complement(ho_RegionInval, &ho_RegionInvalComplement);
	Intersection((*ho_DomainOut), ho_RegionInvalComplement, &(*ho_DomainOut));
	return;
}

// Chapter: Deep Learning / Model
// Short Description: Replace legacy preprocessing parameters or values. 
void replace_legacy_preprocessing_parameters(HTuple hv_DLPreprocessParam)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_Exception, hv_NormalizationTypeExists;
	HTuple  hv_NormalizationType, hv_LegacyNormalizationKeyExists;
	HTuple  hv_ContrastNormalization;

	//
	//This procedure adapts the dictionary DLPreprocessParam
	//if a legacy preprocessing parameter is set.
	//
	//Map legacy value set to new parameter.
	hv_Exception = 0;
	try
	{
		GetDictParam(hv_DLPreprocessParam, "key_exists", "normalization_type", &hv_NormalizationTypeExists);
		//
		if (0 != hv_NormalizationTypeExists)
		{
			GetDictTuple(hv_DLPreprocessParam, "normalization_type", &hv_NormalizationType);
			if (0 != (int(hv_NormalizationType == HTuple("true"))))
			{
				hv_NormalizationType = "first_channel";
			}
			else if (0 != (int(hv_NormalizationType == HTuple("false"))))
			{
				hv_NormalizationType = "none";
			}
			SetDictTuple(hv_DLPreprocessParam, "normalization_type", hv_NormalizationType);
		}
	}
	// catch (Exception) 
	catch (HException &HDevExpDefaultException)
	{
		HDevExpDefaultException.ToHTuple(&hv_Exception);
	}
	//
	//Map legacy parameter to new parameter and corresponding value.
	hv_Exception = 0;
	try
	{
		GetDictParam(hv_DLPreprocessParam, "key_exists", "contrast_normalization", &hv_LegacyNormalizationKeyExists);
		if (0 != hv_LegacyNormalizationKeyExists)
		{
			GetDictTuple(hv_DLPreprocessParam, "contrast_normalization", &hv_ContrastNormalization);
			//Replace 'contrast_normalization' by 'normalization_type'.
			if (0 != (int(hv_ContrastNormalization == HTuple("false"))))
			{
				SetDictTuple(hv_DLPreprocessParam, "normalization_type", "none");
			}
			else if (0 != (int(hv_ContrastNormalization == HTuple("true"))))
			{
				SetDictTuple(hv_DLPreprocessParam, "normalization_type", "first_channel");
			}
			RemoveDictKey(hv_DLPreprocessParam, "contrast_normalization");
		}
	}
	// catch (Exception) 
	catch (HException &HDevExpDefaultException)
	{
		HDevExpDefaultException.ToHTuple(&hv_Exception);
	}
	return;
}

// Chapter: OCR / Deep OCR
// Short Description: Split rectangle2 into a number of rectangles. 
void split_rectangle2(HTuple hv_Row, HTuple hv_Column, HTuple hv_Phi, HTuple hv_Length1,
	HTuple hv_Length2, HTuple hv_NumSplits, HTuple *hv_SplitRow, HTuple *hv_SplitColumn,
	HTuple *hv_SplitPhi, HTuple *hv_SplitLength1Out, HTuple *hv_SplitLength2Out)
{

	// Local iconic variables

	// Local control variables
	HTuple  hv_SplitLength, hv_TRow, hv_TCol, hv_HomMat2D;

	if (0 != (int(hv_NumSplits > 0)))
	{
		hv_SplitLength = hv_Length1 / (hv_NumSplits.TupleReal());
		//Assume center (0,0), transform afterwards.
		hv_TRow = HTuple(hv_NumSplits, 0.0);
		hv_TCol = ((-hv_Length1) + hv_SplitLength) + ((HTuple::TupleGenSequence(0, hv_NumSplits - 1, 1) * 2)*hv_SplitLength);
		HomMat2dIdentity(&hv_HomMat2D);
		HomMat2dRotate(hv_HomMat2D, hv_Phi, 0, 0, &hv_HomMat2D);
		HomMat2dTranslate(hv_HomMat2D, hv_Row, hv_Column, &hv_HomMat2D);
		(*hv_SplitLength1Out) = HTuple(hv_NumSplits, hv_SplitLength);
		(*hv_SplitLength2Out) = HTuple(hv_NumSplits, hv_Length2);
		(*hv_SplitPhi) = HTuple(hv_NumSplits, hv_Phi);
		AffineTransPoint2d(hv_HomMat2D, hv_TRow, hv_TCol, &(*hv_SplitRow), &(*hv_SplitColumn));
	}
	else
	{
		throw HException("Number of splits must be greater than 0.");
	}
	return;
}

#pragma endregion



FunctionDL::FunctionDL(QDialog* parent, QSettings* qsettings, QString section, int camera_index, int detection_index) : QDialog(parent)
{
	QSettings* m_settings_para = new QSettings(QCoreApplication::applicationDirPath() + "/para" + "/para.ini", QSettings::IniFormat);
	m_settings_para->setIniCodec("UTF-8");



	m_qsettings = qsettings;
	m_section = section;
	m_camera_index = camera_index;
	m_detection_index = detection_index;

	this->setWindowTitle(("Deep Learning"));
	QRect desktop_rect = QApplication::desktop()->geometry();
	double ratio_x = desktop_rect.width() / 1920.0;
	double ratio_y = desktop_rect.height() / 1080.0;

	move(0, 0);
	setFixedSize(1920 * ratio_x, 1010 * ratio_y);



	m_view = new QtQGraphicsView(this);
    m_view->setGeometry(920 * ratio_x, 100*ratio_y, 900* ratio_x, 700* ratio_y);

	connect(m_view->m_pixmapItem, &QtQGraphicsPixmapItem::drawRectangleFinish, this, [=](QGraphicsItem* p, int draw_type)
	{
		if (draw_type == 1)
			m_vector_graphics_shape_rectangle.push_back((GraphicsShapeRectangleCirce*)p);
		if (draw_type == 2)
		{
			GraphicsShapeRectangleCirce* p_ear = (GraphicsShapeRectangleCirce*)p;
			p_ear->m_message = "ear" + QString::number(m_vector_rectangle_ear.size());
			m_vector_rectangle_ear.push_back((GraphicsShapeRectangleCirce*)p);

		}

	});


	QSize control_size(220 * ratio_x, 40 * ratio_y);

	QWidget* p = new QWidget(this);
	p->move(0 * ratio_x, 0 * ratio_y);
    p->setFixedSize(900*ratio_x,1010*ratio_y);

	QGridLayout *qgridLayout = new QGridLayout();
	p->setLayout(qgridLayout);

	int index = 0;

	QSize ButtonSize(220 * ratio_x, 40 * ratio_y);
	m_pushbutton_open_model = new QPushButton();
	m_pushbutton_open_model->setFixedSize(control_size);
	m_pushbutton_open_model->setText("SetModelPath");
	connect(m_pushbutton_open_model, &QPushButton::clicked, this, [=]() {
		QString filename;
		filename = QFileDialog::getOpenFileName(this,
			tr("Select Model"),
			"",
			tr("(*.hdl)"));
		if (filename.isEmpty())
		{
			return;
		}
		RetrainedModelFileName = filename;
		m_qlabel_model_path->setText(RetrainedModelFileName);

	});

	qgridLayout->addWidget(m_pushbutton_open_model, index, 0);
    index++;

	QPushButton* p_QPushButton;

	m_qlabel_model_path = new QLabel();
	m_qlabel_model_path->setText("");
    m_qlabel_model_path->setFixedSize(900 * ratio_x, 40 * ratio_y);
    qgridLayout->addWidget(m_qlabel_model_path, index, 0,1,2);
	index++;

	
	m_pushbutton_preprocess = new QPushButton();
	m_pushbutton_preprocess->setFixedSize(control_size);
	m_pushbutton_preprocess->setText("SetPreprocessPath");
	connect(m_pushbutton_preprocess, &QPushButton::clicked, this, [=]() {
		QString filename;
		filename = QFileDialog::getOpenFileName(this,
			tr("Select Model"),
			"",
			tr("(*.hdict )"));
		//            tr("(*.a *.b)"));
		if (filename.isEmpty())
		{
			return;
		}
		PreprocessFileName = filename;
		m_qlabel_preprocess_path->setText(PreprocessFileName);

	});

    qgridLayout->addWidget(m_pushbutton_preprocess, index, 0);
    index++;

	m_qlabel_preprocess_path = new QLabel();
	m_qlabel_preprocess_path->setText("");
    m_qlabel_preprocess_path->setFixedSize(900 * ratio_x, 40 * ratio_y);
    qgridLayout->addWidget(m_qlabel_preprocess_path, index, 0,1,2);
	index++;


	

//	if (m_camera_index == 0 || m_camera_index == 1 || m_camera_index == 2)
//	{
//		p_QPushButton = new QPushButton(this);
//		p_QPushButton->setFixedSize(control_size);
//		p_QPushButton->setText("Draw ROI");
//		connect(p_QPushButton, &QPushButton::clicked, this, [=]() {
//			m_view->m_pixmapItem->m_shape_type = 1;
//			m_view->m_pixmapItem->m_color = Qt::blue;
//			m_view->m_pixmapItem->m_bool_filled = false;
//		});
//		qgridLayout->addWidget(p_QPushButton, index, 0);


//		p_QPushButton = new QPushButton(this);
//		p_QPushButton->setFixedSize(control_size);
//		p_QPushButton->setText("Delete ROI");
//		qgridLayout->addWidget(p_QPushButton, index, 1);
//		connect(p_QPushButton, &QPushButton::clicked, this, [=]() {
//			for (GraphicsShapeRectangleCirce *item : m_vector_graphics_shape_rectangle) {
//				m_view->scene()->removeItem(item);
//				delete item;
//			}
//			m_vector_graphics_shape_rectangle.clear();
//		});
//		index++;
//	}

//    if (m_camera_index == 3 || m_camera_index == 4 )
//    {
//        if(m_detection_index == 0)
//        {
//            m_qcombox_unuse_use_ear = new QComboBox(this);
//            m_qcombox_unuse_use_ear->setFixedSize(control_size);
//            m_qcombox_unuse_use_ear->addItem("Ear Stop");
//            m_qcombox_unuse_use_ear->addItem("Ear Start");
//            qgridLayout->addWidget(m_qcombox_unuse_use_ear, index, 0);
//            index++;

//            p_QPushButton = new QPushButton(this);
//            p_QPushButton->setFixedSize(control_size);
//            p_QPushButton->setText("Draw Ear");
//            connect(p_QPushButton, &QPushButton::clicked, this, [=]() {
//                m_view->m_pixmapItem->m_shape_type = 2;
//                m_view->m_pixmapItem->m_color = Qt::darkYellow;
//                m_view->m_pixmapItem->m_bool_filled = false;
//            });
//            qgridLayout->addWidget(p_QPushButton, index, 0);


//            p_QPushButton = new QPushButton(this);
//            p_QPushButton->setFixedSize(control_size);
//            p_QPushButton->setText("Delete Ear");
//            qgridLayout->addWidget(p_QPushButton, index, 1);
//            connect(p_QPushButton, &QPushButton::clicked, this, [=]() {
//                for (GraphicsShapeRectangleCirce *item : m_vector_rectangle_ear) {
//                    m_view->scene()->removeItem(item);
//                    delete item;
//                }
//                m_vector_rectangle_ear.clear();
//            });
//            index++;
//        }
//    }



	m_pushbutton_show_orign_image = new QPushButton(this);
	m_pushbutton_show_orign_image->setFixedSize(control_size);
	m_pushbutton_show_orign_image->setText("ShowOrign");
	connect(m_pushbutton_show_orign_image, &QPushButton::clicked, this, [=]() {
		QImage q_image = matToQImage(m_image);
		m_view->setImage(q_image, "");
		return true;
	});
	qgridLayout->addWidget(m_pushbutton_show_orign_image, index, 0);

	m_pushbutton_detection = new QPushButton(this);
	m_pushbutton_detection->setFixedSize(control_size);
	m_pushbutton_detection->setText("Detection");
	connect(m_pushbutton_detection, &QPushButton::clicked, this, &FunctionDL::detectionImage);
	qgridLayout->addWidget(m_pushbutton_detection, index, 1);
	index++;

	m_pushbutton_save_parameter = new QPushButton(this);
	m_pushbutton_save_parameter->setFixedSize(control_size);
	m_pushbutton_save_parameter->setText("Save");
	connect(m_pushbutton_save_parameter, &QPushButton::clicked, this, &FunctionDL::saveParameter);
	qgridLayout->addWidget(m_pushbutton_save_parameter, index, 0);

	m_pushbutton_return = new QPushButton(this);
	m_pushbutton_return->setFixedSize(control_size);
	m_pushbutton_return->setText("Exit");
	qgridLayout->addWidget(m_pushbutton_return, index, 1);
	connect(m_pushbutton_return, &QPushButton::clicked, this, [=]() {
		this->hide();
	});
	index++;


    m_qwidget_dl_para1 = new  QWidgetDlPara1(nullptr,m_section);
    m_qwidget_dl_para1->setFixedSize(800 * ratio_x, 600 * ratio_y);
    m_qwidget_dl_para1->m_qtableview->setColumnWidth(0, 100* ratio_x);  // 设置第一列宽度为100像素
    m_qwidget_dl_para1->m_qtableview->setColumnWidth(1, 360* ratio_x);  // 设置第一列宽度为100像素
    m_qwidget_dl_para1->m_qtableview->setColumnWidth(2, 150* ratio_x);  // 设置第一列宽度为100像素
    m_qwidget_dl_para1->m_qtableview->setColumnWidth(3, 150* ratio_x);  // 设置第一列宽度为100像素
    m_qwidget_dl_para1->m_qtableview->horizontalHeader()->setStretchLastSection(true);
    m_qwidget_dl_para1->m_qtableview->horizontalHeader()->setSectionResizeMode(QHeaderView::Fixed);

    qgridLayout->addWidget(m_qwidget_dl_para1, index, 0);
    index++;
//    if ( m_camera_index == 3 || m_camera_index == 4)
//    {
//        if (m_detection_index == 0)
//        {
//            m_qwidget_dl_para2 = new  QWidgetDlPara2(nullptr, m_section + "_ear");
//            m_qwidget_dl_para2->setFixedSize(800 * ratio_x, 200 * ratio_y);
//            qgridLayout->addWidget(m_qwidget_dl_para2, index, 0);
//        }
//    }


	loadParameter();

}

FunctionDL::~FunctionDL()
{
    ;
}

bool FunctionDL::loadmodel()
{
	try
	{
		HTuple  hv_ret;
		
		QByteArray qbytearray1 = RetrainedModelFileName.toUtf8();
		QByteArray qbytearray2 = PreprocessFileName.toUtf8();
        initInferenceDL(qbytearray1.constData(), qbytearray2.constData(),m_camera_index, &hv_ret, &DLModelHandle, &DLPreprocessParam);

		int ret = hv_ret[0].I();

		if (ret > 0)
		{
			return true;
		}
		else
		{
			
			return false;
		}
	}
	catch (HException &HDevExpDefaultException)
	{
        QString message = HDevExpDefaultException.ErrorMessage().Text();

		QMessageBox::information(nullptr, "", message);
		return false;
	}
    return true ;
}

bool FunctionDL::detection( HObject image_in,QString& message)
{
   

	try
	{
        int64 t1 = cv::getTickCount();


		HTuple hv_name_segment,hv_area_segment,  hv_row_segment, hv_column_segment ,hv_ret;
		inferenceDL(image_in, &m_region_segment, DLModelHandle, DLPreprocessParam, &hv_name_segment, &hv_area_segment, &hv_row_segment, &hv_column_segment, &hv_ret);

        int64 t2 = cv::getTickCount();

        double m_time_use = (t2 - t1) * 1000 / getTickFrequency();
        logger->info(std::to_string(m_camera_index) + "  camera "+ std::to_string(m_time_use));

		int number = hv_ret[0].I();
		/////////////7、获取结果/////////////
		vector<HalconAIResult> result_all;
		for (int i=0;i<number;i++)
		{
			
			
			
			

			HalconAIResult tem;
            tem.name = hv_name_segment[i].S();
			tem.threshold_score = 0.5;
            tem.area = hv_area_segment[i].I();
			
            tem.x = hv_column_segment[i].D();
            tem.y = hv_row_segment[i].D();
			 
			SelectObj(m_region_segment, &tem.image, i+1);

			
			result_all.push_back(tem);
		}
		

		for (auto& obj : result_all) {
			int standard_index = -1;
			for (auto& obj_standard : m_vector_defect_standard)
			{
				if (obj.name == obj_standard.name)
				{
					standard_index = 0;
					if (obj.threshold_score < obj_standard.threshold_score)
					{


						break;
					}

					if (obj.area < obj_standard.threshold_size)
					{
						break;
					}

					if (m_vector_graphics_shape_rectangle.size() >= 1) {

						if (obj.x < column1 || obj.x > column2 || obj.y < row1 || obj.y > row2)
						{
							break;
						}

					}
					

					result_defect.push_back(obj);
					message += QString::fromStdString(obj.name)+ ":size" + QString::number(obj.area) + "score" + QString::number(obj.threshold_score) + "\n";
					break;
				}
			}
			if (standard_index == -1)
			{
				if (m_vector_graphics_shape_rectangle.size() >= 1) {

					if (obj.x < column1 || obj.x > column2 || obj.y < row1 || obj.y > row2)
					{
						break;
					}

				}
				
				result_defect.push_back(obj);
				message += QString::fromStdString(obj.name) + ":size" + QString::number(obj.area) + "score" + QString::number(obj.threshold_score) + "\n";
			}
		}
		if (result_defect.size() > 0)
		{
			return 0;
		}
		return 1;
	}
	catch (HException &HDevExpDefaultException)
	{
		message = HDevExpDefaultException.ErrorMessage();
		return false;
	}
    return true;
}

void FunctionDL::detectionImage()
{
	if (m_image.empty())
	{
		QMessageBox::information(nullptr, "", "no image");
		return;
	}

	QString message;

	HObject image = matToHImage(m_image);

	bool state = detection(image, message);

	if (state)
	{
		message = "OK" + message;
	}
	else
	{
		message = "NG" + message;
	}

	Mat image_show;

	draw(m_image, image_show);

	QImage q_image = matToQImage(image_show);
	if (!q_image.isNull())
	{
		m_view->setImage(q_image, message);
	}
  
}

void FunctionDL::draw( Mat image_in,Mat& image_out)
{
	image_out = image_in.clone();
	if (image_out.channels() == 1)
	{
		cvtColor(image_out, image_out, COLOR_GRAY2BGR);
	}
	for (auto obj : result_defect)
	{
		
        image_out = drawRegion(image_out, obj.image, Scalar(0, 0, 255));
	}
	result_defect.clear();

//	for (auto obj : result_ear)
//	{
//        image_out = drawRegion(image_out, obj.image, Scalar(0, 0, 255));
//	}
//	result_ear.clear();
}

void FunctionDL::loadParameter()
{
	try
	{
		int roi_number = m_qsettings->value(m_section + "/" + "roi_number", 0).toInt();
		if (roi_number > 0)
		{
			int i = 0;
			row1 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "row1", 100).toInt();
			column1 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "column1", 100).toInt();
			row2 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "row2", 200).toInt();
			column2 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "column2", 200).toInt();

			GraphicsShapeRectangleCirce* p = new GraphicsShapeRectangleCirce(column1, row1, column2 - column1, row2 - row1, Qt::blue);
			p->setShapeType(0);
			p->setFilled(false);
			p->setZValue(101);
			m_view->m_scene->addItem(p);

			m_vector_graphics_shape_rectangle.push_back(p);
		}

		int ear_number = m_qsettings->value(m_section + "/" + "ear_number", 0).toInt();
		for (int i = 0; i < ear_number; i++)
		{
			int m_ear_row1 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "row1", 100).toInt();
			int m_ear_column1 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "column1", 100).toInt();
			int m_ear_row2 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "row2", 200).toInt();
			int m_ear_column2 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "column2", 200).toInt();

			GraphicsShapeRectangleCirce* p = new GraphicsShapeRectangleCirce(m_ear_column1, m_ear_row1, m_ear_column2 - m_ear_column1, m_ear_row2 - m_ear_row1, Qt::darkYellow);

			p->m_message = "ear" + QString::number(i);
			p->setShapeType(0);
			p->setFilled(false);
			p->setZValue(102);

			m_view->m_scene->addItem(p);

			m_vector_rectangle_ear.push_back(p);
		}



		RetrainedModelFileName = m_qsettings->value(m_section + "/" + "RetrainedModelFileName", "").toString();
		m_qlabel_model_path->setText(RetrainedModelFileName);

		PreprocessFileName = m_qsettings->value(m_section + "/" + "PreprocessFileName", "").toString();
		m_qlabel_preprocess_path->setText(PreprocessFileName);



//        if(m_qcombox_unuse_use_ear!=nullptr)
//        {
//            m_unuse_use_ear = m_qsettings->value(m_section + "/" + "m_unuse_use_ear").toInt();
//            m_qcombox_unuse_use_ear->setCurrentIndex(m_unuse_use_ear);
//        }


		save_dl_prar1(m_qwidget_dl_para1, m_vector_defect_standard);
//		if (m_camera_index == 3 || m_camera_index == 4)
//		{
//			if (m_detection_index == 0)
//				save_dl_prar2(m_qwidget_dl_para2, m_vector_ear_standard);
//		}
	}
	catch (exception& error)
	{
		QMessageBox::critical(nullptr, error.what(), m_section + "para error");
	}

	return;
}

void FunctionDL::saveParameter()
{
	try
	{

        int roi_number = m_qsettings->value(m_section + "/" + "roi_number", 0).toInt();
        if (roi_number > 0)
        {
            int i = 0;
            row1 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "row1", 100).toInt();
            column1 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "column1", 100).toInt();
            row2 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "row2", 200).toInt();
            column2 = m_qsettings->value(m_section + "/" + "ROI" + QString::number(i) + "column2", 200).toInt();

            GraphicsShapeRectangleCirce* p = new GraphicsShapeRectangleCirce( column1, row1,  column2 - column1, row2 - row1, Qt::blue);
            p->setShapeType(0);
            p->setFilled(false);
            p->setZValue(101);
            m_view->m_scene->addItem(p);

            m_vector_graphics_shape_rectangle.push_back(p);
        }

        int ear_number = m_qsettings->value(m_section + "/" + "ear_number", 0).toInt();
        for(int i=0;i<ear_number;i++)
        {
            int m_ear_row1 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "row1", 100).toInt();
            int m_ear_column1 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "column1", 100).toInt();
            int m_ear_row2 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "row2", 200).toInt();
            int m_ear_column2 = m_qsettings->value(m_section + "/" + "ear" + QString::number(i) + "column2", 200).toInt();

            GraphicsShapeRectangleCirce* p = new GraphicsShapeRectangleCirce(m_ear_column1, m_ear_row1, m_ear_column2 - m_ear_column1, m_ear_row2 - m_ear_row1, Qt::darkYellow);

            p->m_message = "ear"+QString::number(i);
            p->setShapeType(0);
            p->setFilled(false);
            p->setZValue(102);

            m_view->m_scene->addItem(p);

            m_vector_rectangle_ear.push_back(p);
        }



		RetrainedModelFileName = m_qlabel_model_path->text();
		

		PreprocessFileName = m_qlabel_preprocess_path->text();



		if (my_para.m_login_privilege >= 2)
		{
			m_qsettings->setValue(m_section + "/" + "RetrainedModelFileName", RetrainedModelFileName);
			m_qsettings->setValue(m_section + "/" + "PreprocessFileName", PreprocessFileName);
		}


        /*if(m_qcombox_unuse_use_ear!=nullptr)
        {
            m_unuse_use_ear = m_qsettings->value(m_section + "/" + "m_unuse_use_ear").toInt();
            m_qcombox_unuse_use_ear->setCurrentIndex(m_unuse_use_ear);
        }*/

        save_dl_prar1(m_qwidget_dl_para1, m_vector_defect_standard);
		
//        if (m_camera_index == 3 || m_camera_index == 4)
//        {
//            if(m_detection_index==0)
//                save_dl_prar2(m_qwidget_dl_para2, m_vector_ear_standard);
//        }

//		if (m_vector_rectangle_ear.size() != m_vector_ear_standard.size())
//		{
//			QMessageBox::information(nullptr, "", "ear roi != ear standard");
//		}
	}
	catch (exception& error)
	{
		QMessageBox::critical(nullptr, error.what(), m_section + "para error");
	}
}

bool FunctionDL::showDialog()
{
	QImage q_image = matToQImage(m_image);
	m_view->setImage(q_image, "");
    return true;
}


void FunctionDL::save_dl_prar1(QWidgetDlPara1* roi_sqllite, vector<DefectStandard>& vector_defect_standard)
{
	
	vector_defect_standard.clear();
	for (int j = 0; j < roi_sqllite->m_qsltablemodel->rowCount(); j++)
	{
		DefectStandard defect_standard;
		defect_standard.name = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 1)).toString().toLocal8Bit().toStdString();
		defect_standard.threshold_score = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 2)).toDouble();
		defect_standard.threshold_size = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 3)).toDouble();
		vector_defect_standard.push_back(defect_standard);
	}
	if (my_para.m_login_privilege >= 2)
		roi_sqllite->submit();
}

//void FunctionDL::save_dl_prar2(QWidgetDlPara2* roi_sqllite, vector<DetectionStandard>& vector_defect_standard)
//{
	
//	m_vector_ear_standard.clear();
//	for (int j = 0; j < roi_sqllite->m_qsltablemodel->rowCount(); j++)
//	{
//		DetectionStandard defect_standard;
//		defect_standard.name = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 1)).toString().toLocal8Bit().toStdString();
//		defect_standard.threshold_score = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 2)).toDouble();
//		defect_standard.threshold_size_min = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 3)).toDouble();
//		defect_standard.threshold_size_max = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 4)).toDouble();
//		defect_standard.threshold_number = roi_sqllite->m_qsltablemodel->data(roi_sqllite->m_qsltablemodel->index(j, 5)).toDouble();;
//		m_vector_ear_standard.push_back(defect_standard);
//	}
//	if (my_para.m_login_privilege >= 2)
//		roi_sqllite->submit();
//}
