<?xml version="1.0"?>
<doc>
    <assembly>
        <name>Emgu.CV.ML</name>
    </assembly>
    <members>
        <member name="T:Emgu.CV.ML.MlEnum.EM_COVARIAN_MATRIX_TYPE">
            <summary>
            The type of the mixture covariation matrices
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_COVARIAN_MATRIX_TYPE.COV_MAT_SPHERICAL">
            <summary>
            A covariation matrix of each mixture is a scaled identity matrix, ?k*I, so the only parameter to be estimated is ?k. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (e.g. in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with cov_mat_type=COV_MAT_DIAGONAL
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_COVARIAN_MATRIX_TYPE.COV_MAT_DIAGONAL">
            <summary>
            A covariation matrix of each mixture may be arbitrary diagonal matrix with positive diagonal elements, that is, non-diagonal elements are forced to be 0's, so the number of free parameters is d  for each matrix. This is most commonly used option yielding good estimation results
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_COVARIAN_MATRIX_TYPE.COV_MAT_GENERIC">
            <summary>
            A covariation matrix of each mixture may be arbitrary symmetrical positively defined matrix, so the number of free parameters in each matrix is about d2/2. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.EM_INIT_STEP_TYPE">
            <summary>
            The initial step the algorithm starts from
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_INIT_STEP_TYPE.START_E_STEP">
            <summary>
            The algorithm starts with E-step. 
            At least, the initial values of mean vectors, CvEMParams.Means must be passed. 
            Optionally, the user may also provide initial values for weights (CvEMParams.Weights) and/or covariation matrices (CvEMParams.Covs).
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_INIT_STEP_TYPE.START_M_STEP">
            <summary>
            The algorithm starts with M-step. The initial probabilities p_{i,k} must be provided
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.EM_INIT_STEP_TYPE.START_AUTO_STEP">
            <summary>
            No values are required from the user, k-means algorithm is used to estimate initial mixtures parameters
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.SVM_TYPE">
            <summary>
            Type of SVM
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC">
            <summary>
            n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_TYPE.NU_SVC">
            <summary>
            n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_TYPE.ONE_CLASS">
            <summary>
            one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_TYPE.EPS_SVR">
            <summary>
            Regression. The distance between feature vectors from the training set and the fitting hyper-plane must be less than p. For outliers the penalty multiplier C is used
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_TYPE.NU_SVR">
            <summary>
            Regression; nu is used instead of p.
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE">
            <summary>
            The type of SVM parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.C">
            <summary>
            C
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.GAMMA">
            <summary>
            Gamma
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.P">
            <summary>
            P
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.NU">
            <summary>
            NU
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.COEF">
            <summary>
            COEF
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE.DEGREE">
            <summary>
            DEGREE
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE">
            <summary>
            SVM kernel type
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR">
            <summary>
            No mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x y == (x,y)
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY">
            <summary>
            polynomial kernel: d(x,y) = (gamma*(xy)+coef0)^degree
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF">
            <summary>
            Radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|^2)
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.SIGMOID">
            <summary>
            sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(xy)+coef0)
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD">
            <summary>
            Training method for ANN_MLP
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP">
            <summary>
            Back-propagation algorithmn
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.RPROP">
            <summary>
            Batch RPROP algorithm
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION">
            <summary>
            Possible activation functions
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.IDENTITY">
            <summary>
            Identity
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM">
            <summary>
            sigmoif symetric
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.GAUSSIAN">
            <summary>
            Gaussian
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG">
            <summary>
            The flags for the neural network training function
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT">
            <summary>
            
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.UPDATE_WEIGHTS">
            <summary>
            
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.NO_INPUT_SCALE">
            <summary>
            
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.NO_OUTPUT_SCALE">
            <summary>
            
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE">
            <summary>
            The data layout type
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE.COL_SAMPLE">
            <summary>
            Column sample
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE.ROW_SAMPLE">
            <summary>
            Row sample
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.BOOST_TYPE">
            <summary>
            Boosting type
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_TYPE.DISCRETE">
            <summary>
            Discrete AdaBoost
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_TYPE.REAL">
            <summary>
            Real AdaBoost
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_TYPE.LOGIT">
            <summary>
            LogitBoost
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_TYPE.GENTLE">
            <summary>
            Gentle AdaBoost
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlEnum.BOOST_SPLIT_CREITERIA">
            <summary>
            Splitting criteria, used to choose optimal splits during a weak tree construction
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_SPLIT_CREITERIA.DEFAULT">
            <summary>
            Use the default criteria for the particular boosting method, see below
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_SPLIT_CREITERIA.GINI">
            <summary>
            Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_SPLIT_CREITERIA.MISCLASS">
            <summary>
            Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlEnum.BOOST_SPLIT_CREITERIA.SQERR">
            <summary>
            Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvBoostParams">
            <summary>
            An OpenCV Boost Tree parameters
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.Structure.MCvBoostParams.GetDefaultParameter">
            <summary>
            Get the default Decision tree training parameters
            </summary>
            <returns>The default Decision tree training parameters</returns>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.maxCategories">
             <summary>
             If a discrete variable, on which the training procedure tries to make a split, takes more than max_categories values, the precise best subset estimation may take a very long time (as the algorithm is exponential). Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into max_categories clusters (i.e. some categories are merged together).
            Note that this technique is used only in N(>2)-class classification problems. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
             </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.maxDepth">
            <summary>
            This parameter specifies the maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than max_depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.minSampleCount">
            <summary>
            A node is not split if the number of samples directed to the node is less than the parameter value. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.cvFolds">
            <summary>
            If this parameter is &gt;1, the tree is pruned using cv_folds-fold cross validation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.useSurrogates">
            <summary>
            If true, surrogate splits are built. Surrogate splits are needed to handle missing measurements and for variable importance estimation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.use1seRule">
            <summary>
            If true, the tree is truncated a bit more by the pruning procedure. That leads to compact, and more resistant to the training data noise, but a bit less accurate decision tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.truncatePrunedTree">
            <summary>
            If true, the cut off nodes (with Tn&lt;=CvDTree::pruned_tree_idx) are physically removed from the tree. Otherwise they are kept, and by decreasing CvDTree::pruned_tree_idx (e.g. setting it to -1) it is still possible to get the results from the original un-pruned (or pruned less aggressively) tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.regressionAccuracy">
            <summary>
            Another stop criteria - only for regression trees. As soon as the estimated node value differs from the node training samples responses by less than the parameter value, the node is not split further.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.priors">
            <summary>
            The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if users want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. 
            </summary>
            <remarks>A note about memory management: the field priors  is a pointer to the array of floats. The array should be allocated by user, and released just after the CvDTreeParams structure is passed to CvDTreeTrainData or CvDTree constructors/methods (as the methods make a copy of the array).</remarks>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.boostType">
            <summary>
            Boosting type
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.weakCount">
            <summary>
            The number of weak classifiers to build
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.splitCriteria">
            <summary>
            Splitting criteria, used to choose optimal splits during a weak tree construction
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvBoostParams.weightTrimRate">
            <summary>
            The weight trimming ratio, within 0..1. See the discussion of it above. If the parameter is ?0 or >1, the trimming is not used, all the samples are used at each iteration. The default value is 0.95
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.EMParams">
            <summary>
            The parameters for the EM model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.EMParams.#ctor">
            <summary>
            Create EM parameters with default value
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.CovMatType">
            <summary>
            Get or set the type of Covariance matrix
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.Covs">
            <summary>
            Get or Set the Covariance matrices
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.Means">
            <summary>
            Get or set the means
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.Nclusters">
            <summary>
            Get or Set the number of clusters
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.Probs">
            <summary>
            Get or Set the probabilities
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.StartStep">
            <summary>
            Get or Set the start step
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.TermCrit">
            <summary>
            Get or Set the termination criteria
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EMParams.Weights">
            <summary>
            Get or Set the weights
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.MlInvoke">
            <summary>
            This class contains functions to call into machine learning library
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.MlInvoke.EXTERN_LIBRARY">
            <summary>
            Call the same extern library as defined in Emgu.CV.CvInvoke
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.StatModelSave(System.IntPtr,System.String,System.IntPtr)">
            <summary>
            Save the statistic model to the specific file
            </summary>
            <param name="statModel">The statistic model to save</param>
            <param name="fileName">The file name to save to</param>
            <param name="name">Pass IntPtr.Zero</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.StatModelLoad(System.IntPtr,System.String,System.IntPtr)">
            <summary>
            Load the statistic model from the specific file
            </summary>
            <param name="statModel">The statistic model to save</param>
            <param name="fileName">The file name to load from</param>
            <param name="name">Pass IntPtr.Zero</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.StatModelClear(System.IntPtr)">
            <summary>
            Clear the statistic model
            </summary>
            <param name="statModel">The model to be cleared</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvNormalBayesClassifierDefaultCreate">
            <summary>
            Create a normal bayes classifier
            </summary>
            <returns>The normal bayes classifier</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvNormalBayesClassifierCreate(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr)">
            <summary>
            Create a normal Bayes classifier using the specific training data
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <returns>The normal Beyes classifier</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvNormalBayesClassifierRelease(System.IntPtr)">
            <summary>
            Release the memory associated with the bayes classifier
            </summary>
            <param name="classifier">The classifier to release</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvNormalBayesClassifierTrain(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.Boolean)">
            <summary>
            Train the classifier using the specific data
            </summary>
            <param name="classifier">The NormalBayesClassifier</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="update">If true, the training data is used to update the classifier; Otherwise, the data in the classifier are cleared before training is performed</param>
            <returns>The number of done iterations</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvNormalBayesClassifierPredict(System.IntPtr,System.IntPtr,System.IntPtr)">
            <summary>
            Given the NormalBayesClassifier <paramref name="model"/>, predit the probability of the <paramref name="samples"/>
            </summary>
            <param name="model">The NormalBayesClassifier classifier model</param>
            <param name="samples">The input samples</param>
            <param name="results">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvKNearestDefaultCreate">
            <summary>
            Create a KNearest classifier
            </summary>
            <returns>The KNearest classifier</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvKNearestRelease(System.IntPtr)">
            <summary>
            Release the KNearest classifer
            </summary>
            <param name="knearest">The classifier to release</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvKNearestCreate(System.IntPtr,System.IntPtr,System.IntPtr,System.Boolean,System.Int32)">
            <summary>
            Create the KNearest classifier using the specific traing data.
            </summary>
            <param name="isRegression">Specify the output variables type. It can be either categorical (isRegression=false) or ordered (isRegression=true)</param>
            <param name="maxK">The number of maximum neighbors that may be passed to the method findNearest.</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <returns>The KNearest classifier</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvKNearestTrain(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.Boolean,System.Int32,System.Boolean)">
            <summary>
            Update the KNearest classifier using the specific traing data.
            </summary>
            <param name="isRegression">Specify the output variables type. It can be either categorical (isRegression=false) or ordered (isRegression=true)</param>
            <param name="maxK">The number of maximum neighbors that may be passed to the method findNearest.</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="classifier">The KNearest classifier to be updated</param>
            <param name="updateBase">
            If true, the existing classifer is updated using the new training data;
            Otherwise, the classifier is trained from scratch</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvKNearestFindNearest(System.IntPtr,System.IntPtr,System.Int32,System.IntPtr,System.IntPtr[],System.IntPtr,System.IntPtr)">
            <summary>
            For each input vector (which are rows of the matrix <paramref name="samples"/>) the method finds k &lt;= get_max_k() nearest neighbor. In case of regression, the predicted result will be a mean value of the particular vector's neighbor responses. In case of classification the class is determined by voting.
            </summary>
            <param name="classifier">The KNearest classifier</param>
            <param name="samples">The sample matrix where each row is a sample</param>
            <param name="k">The number of nearest neighbor to find</param>
            <param name="results">
            Can be IntPtr.Zero if not needed.
            If regression, return a mean value of the particular vector's neighbor responses;
            If classification, return the class determined by voting.
            </param>
            <param name="kNearestNeighbors">Should be IntPtr.Zero if not needed. Setting it to non-null values incures a performance panalty. A matrix of (k * samples.Rows) rows and (samples.Cols) columns that will be filled the data of the K nearest-neighbor for each sample</param>
            <param name="neighborResponses">Should be IntPtr.Zero if not needed. The response of the neighbors. A vector of k*_samples->rows elements.</param>
            <param name="dist">Should be IntPtr.Zero if not needed. The distances from the input vectors to the neighbors. A vector of k*_samples->rows elements.</param>
            <returns>In case of regression, the predicted result will be a mean value of the particular vector's neighbor responses. In case of classification the class is determined by voting</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMDefaultCreate">
            <summary>
            Create a default EM model
            </summary>
            <returns>Pointer to the EM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMRelease(System.IntPtr)">
            <summary>
            Release the EM model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMTrain(System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvEMParams,System.IntPtr)">
            <summary>
            Train the EM model using the specific training data
            </summary>
            <param name="model">The EM model</param>
            <param name="samples">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for EM</param>
            <param name="labels">Can be IntPtr.Zero if not needed. Optionally computed output "class label" for each sample</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMPredict(System.IntPtr,System.IntPtr,System.IntPtr)">
            <summary>
            Given the EM <paramref name="model"/>, predit the probability of the <paramref name="samples"/>
            </summary>
            <param name="model">The EM model</param>
            <param name="samples">The input samples</param>
            <param name="probs">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMGetMeans(System.IntPtr)">
            <summary>
            Get the means of the clusters from the EM model
            </summary>
            <param name="model">The EM model</param>
            <returns>The means of the clusters of the EM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMGetCovs(System.IntPtr)">
            <summary>
            Get the covariance matrices of the clusters from the EM model
            </summary>
            <param name="model">The EM model</param>
            <returns>The covariance matrices of the clusters of the EM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMGetWeights(System.IntPtr)">
            <summary>
            Get the weights of the clusters from the EM model
            </summary>
            <param name="model">The EM model</param>
            <returns>The weights of the clusters of the EM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMGetProbs(System.IntPtr)">
            <summary>
            Get the probabilities from the EM model
            </summary>
            <param name="model">The EM model</param>
            <returns>The probabilities of the EM model </returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvEMGetNclusters(System.IntPtr)">
            <summary>
            Get the number of clusters from the EM model
            </summary>
            <param name="model">The EM model</param>
            <returns>The number of clusters of the EM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvSVMDefaultCreate">
            <summary>
            Create a default SVM model
            </summary>
            <returns>Pointer to the SVM model</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvSVMRelease(System.IntPtr)">
            <summary>
            Release the SVM model and all the memory associated to ir
            </summary>
            <param name="model">The SVM model to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvSVMTrain(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvSVMParams)">
            <summary>
            Train the SVM model with the specific paramters
            </summary>
            <param name="model">The SVM model</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix</param>
            <param name="responses">The response for the training data. It's usually a 32-bit floating point matrix; In classification problem, it can be an Int32 matrix.</param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for SVM</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvSVMGetDefaultGrid(Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE,Emgu.CV.ML.Structure.MCvParamGrid@)">
            <summary>
            Get the default parameter grid for the specific SVM type
            </summary>
            <param name="type">The SVM type</param>
            <param name="grid">The parameter grid reference, values will be filled in by the funtion call</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvSVMTrainAuto(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvSVMParams,System.Int32,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid)">
            <summary>
            The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal. 
            </summary>
            <param name="model">The SVM model</param>
            <param name="trainData">The training data.</param>
            <param name="responses">The response for the training data.</param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for SVM</param>
            <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
            <param name="cGrid">cGrid</param>
            <param name="gammaGrid">gammaGrid</param>
            <param name="pGrid">pGrid</param>
            <param name="nuGrid">nuGrid</param>
            <param name="coefGrid">coedGrid</param>
            <param name="degreeGrid">degreeGrid</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.cvSVMPredict(System.IntPtr,System.IntPtr)">
            <summary>
            Predicts response for the input sample.
            </summary>
            <param name="model">The SVM model</param>
            <param name="_sample">The input sample</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.cvSVMGetSupportVector(System.IntPtr,System.Int32)">
            <summary>
            The method retrieves a given support vector
            </summary>
            <param name="model">The SVM model</param>
            <param name="i">The index of the support vector</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.cvSVMGetSupportVectorCount(System.IntPtr)">
            <summary>
            The method retrieves the number of support vectors
            </summary>
            <param name="model">The SVM model</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.cvSVMGetVarCount(System.IntPtr)">
            <summary>
            The method retrieves the number of vars
            </summary>
            <param name="model">The SVM model</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvANN_MLPCreate(System.IntPtr,Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION,System.Double,System.Double)">
            <summary>
            Create a neural network using the specific parameters
            </summary>
            <param name="layerSizes">The size of the layer</param>
            <param name="activeFunction">Activation function</param>
            <param name="fParam1">Free parameters of the activation function, alpha</param>
            <param name="fParam2">Free parameters of the activation function, beta</param>
            <returns>The nearual network created</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvANN_MLPRelease(System.IntPtr)">
            <summary>
            Release the ANN_MLP model
            </summary>
            <param name="model">The ANN_MLP model to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvANN_MLPTrain(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams,Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG)">
            <summary>
            Train the ANN_MLP model with the specific paramters
            </summary>
            <param name="model">The ANN_MLP model</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleWeights">(RPROP only) The optional floating-point vector of weights for each sample. Some samples may be more important than others for training, e.g. user may want to gain the weight of certain classes to find the right balance between hit-rate and false-alarm rate etc</param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for ANN_MLP</param>
            <param name="flags">The traning flag</param>
            <returns>The number of done iterations</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvANN_MLPPredict(System.IntPtr,System.IntPtr,System.IntPtr)">
            <summary>
            Given the <paramref name="model"/>, predit the <paramref name="outputs"/> response of the <paramref name="inputs"/> samples
            </summary>
            <param name="model">The ANN_MLP model</param>
            <param name="inputs">The input samples</param>
            <param name="outputs">The prediction results, should have the same # of rows as the inputs</param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreeParamsCreate">
            <summary>
            Create default parameters for CvDTreeParams
            </summary>
            <returns>Pointer to the default CvDTreeParams</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreeParamsRelease(System.IntPtr)">
            <summary>
            Release the CvDTreeParams
            </summary>
            <param name="dTreeParam">Pointer to the decision tree parameters to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreeCreate">
            <summary>
            Create a default decision tree
            </summary>
            <returns>Pointer to the decision tree</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreeRelease(System.IntPtr)">
            <summary>
            Release the decision tree model
            </summary>
            <param name="model">The decision tree model to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreeTrain(System.IntPtr,System.IntPtr,Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvDTreeParams)">
            <summary>
            Train the decision tree using the specific training data
            </summary>
            <param name="model">The Decision Tree model</param>
            <param name="tflag">The data layout type of the train data</param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <param name="missingMask">Can be IntPtr.Zero if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="param">The parameters for training the decision tree</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvDTreePredict(System.IntPtr,System.IntPtr,System.IntPtr,System.Boolean)">
            <summary>
            The method takes the feature vector and the optional missing measurement mask on input, traverses the decision tree and returns the reached leaf node on output. The prediction result, either the class label or the estimated function value, may be retrieved as value field of the CvDTreeNode structure
            </summary>
            <param name="model">The decision tree model</param>
            <param name="sample">The sample to be predicted</param>
            <param name="missingDataMask">Can be IntPtr.Zero if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="rawMode">Normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..num_of_categoriesi-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param>
            <returns>Pointer to the reached leaf node on output. The prediction result, either the class label or the estimated function value, may be retrieved as value field of the CvDTreeNode structure</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTParamsCreate">
            <summary>
            Create default parameters for CvRTParams
            </summary>
            <returns>Pointer to the default CvRTParams</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTParamsRelease(System.IntPtr)">
            <summary>
            Release the CvRTParams
            </summary>
            <param name="rTreesParam">Pointer to the random tree parameters to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTreesCreate">
            <summary>
            Create a default random tree
            </summary>
            <returns>Pointer to the random tree</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTreesRelease(System.IntPtr)">
            <summary>
            Release the random tree model
            </summary>
            <param name="model">The random tree model to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTreesTrain(System.IntPtr,System.IntPtr,Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvRTParams)">
            <summary>
            Train the random tree using the specific traning data
            </summary>
            <param name="model">The Random Tree model</param>
            <param name="tFlag">The data layout type of the train data</param>
            <param name="missingMask">Can be IntPtr.Zero if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="param">The parameters for training the random tree</param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvRTreesPredict(System.IntPtr,System.IntPtr,System.IntPtr)">
            <summary>
            The method takes the feature vector and the optional missing measurement mask on input, traverses the random tree and returns the cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)
            </summary>
            <param name="model">The decision tree model</param>
            <param name="sample">The sample to be predicted</param>
            <param name="missingDataMask">Can be IntPtr.Zero if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <returns>The cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostParamsCreate">
            <summary>
            Create default parameters for CvBoost
            </summary>
            <returns>Pointer to the default CvBoostParams</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostParamsRelease(System.IntPtr)">
            <summary>
            Release the CvBoostParams
            </summary>
            <param name="bTreeParam">Pointer to the boost tree parameters to be released</param>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostTrain(System.IntPtr,System.IntPtr,Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.ML.Structure.MCvBoostParams,System.Boolean)">
            <summary>
            Train the boost tree using the specific traning data
            </summary>
            <param name="model">The Boost Tree model</param>
            <param name="tFlag">The data layout type of the train data</param>
            <param name="missingMask">Can be IntPtr.Zero if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="param">The parameters for training the random tree</param>
            <param name="varIdx">Can be IntPtr.Zero if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <param name="update">specifies whether the classifier needs to be updated (i.e. the new weak tree classifiers added to the existing ensemble), or the classifier needs to be rebuilt from scratch</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostPredict(System.IntPtr,System.IntPtr,System.IntPtr,System.IntPtr,Emgu.CV.Structure.MCvSlice,System.Boolean)">
            <summary>
            Runs the sample through the trees in the ensemble and returns the output class label based on the weighted voting
            </summary>
            <param name="model">The Boost Tree model</param>
            <param name="sample">The input sample</param>
            <param name="missing">Can be IntPtr.Zero if not needed. The optional mask of missing measurements. To handle missing measurements, the weak classifiers must include surrogate splits</param>
            <param name="weakResponses">Can be IntPtr.Zero if not needed. a floating-point vector, of responses from each individual weak classifier. The number of elements in the vector must be equal to the slice length.</param>
            <param name="slice">The continuous subset of the sequence of weak classifiers to be used for prediction</param>
            <param name="rawMode">Normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..num_of_categoriesi-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param>      
            <returns>The output class label based on the weighted voting</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostCreate">
            <summary>
            Create a default boost classicfier
            </summary>
            <returns>Pointer to the boost classicfier</returns>
        </member>
        <member name="M:Emgu.CV.ML.MlInvoke.CvBoostRelease(System.IntPtr)">
            <summary>
            Release the boost classicfier
            </summary>
            <param name="model">The boost classicfier to be released</param>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvRTParams">
            <summary>
            Wrapped CvRTParams structure
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.Structure.MCvRTParams.GetDefaultParameter">
            <summary>
            Get the default Decision tree training parameters
            </summary>
            <returns>The default Decision tree training parameters</returns>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.maxCategories">
             <summary>
             If a discrete variable, on which the training procedure tries to make a split, takes more than max_categories values, the precise best subset estimation may take a very long time (as the algorithm is exponential). Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into max_categories clusters (i.e. some categories are merged together).
            Note that this technique is used only in N(>2)-class classification problems. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
             </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.maxDepth">
            <summary>
            This parameter specifies the maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than max_depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.minSampleCount">
            <summary>
            A node is not split if the number of samples directed to the node is less than the parameter value. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.cvFolds">
            <summary>
            If this parameter is &gt;1, the tree is pruned using cv_folds-fold cross validation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.useSurrogates">
            <summary>
            If true, surrogate splits are built. Surrogate splits are needed to handle missing measurements and for variable importance estimation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.use1seRule">
            <summary>
            If true, the tree is truncated a bit more by the pruning procedure. That leads to compact, and more resistant to the training data noise, but a bit less accurate decision tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.truncatePrunedTree">
            <summary>
            If true, the cut off nodes (with Tn&lt;=CvDTree::pruned_tree_idx) are physically removed from the tree. Otherwise they are kept, and by decreasing CvDTree::pruned_tree_idx (e.g. setting it to -1) it is still possible to get the results from the original un-pruned (or pruned less aggressively) tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.regressionAccuracy">
            <summary>
            Another stop criteria - only for regression trees. As soon as the estimated node value differs from the node training samples responses by less than the parameter value, the node is not split further.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.priors">
            <summary>
            The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if users want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. 
            </summary>
            <remarks>A note about memory management: the field priors  is a pointer to the array of floats. The array should be allocated by user, and released just after the CvDTreeParams structure is passed to CvDTreeTrainData or CvDTree constructors/methods (as the methods make a copy of the array).</remarks>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.calcVarImportance">
            <summary>
            If it is set, then variable importance is computed by the training procedure. To retrieve the computed variable importance array, call the method CvRTrees::get_var_importance()
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.nactiveVars">
            <summary>
            The number of variables that are randomly selected at each tree node and that are used to find the best split(s). 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvRTParams.termCrit">
            <summary>
            Termination criteria for growing the forest: term_crit.max_iter is the maximum number of trees in the forest (see also max_tree_count parameter of the constructor, by default it is set to 50)
            term_crit.epsilon is the sufficient accuracy (OOB error). 
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvDTreeSplit">
            <summary>
            Decision tree node split
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.var_idx">
            <summary>
            Index of the variable used in the split 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.inversed">
            <summary>
            When it equals to 1, the inverse split rule is used (i.e. left and right branches are exchanged in the expressions below)
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.quality">
            <summary>
            The split quality, a positive number. It is used to choose the best primary split, then to choose and sort the surrogate splits. After the tree is constructed, it is also used to compute variable importance
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.next">
            <summary>
            Pointer to the next split in the node split list
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.Structure.MCvDTreeSplit.Order">
            <summary>
            Get or Set the Order of this TreeSplit
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.Structure.MCvDTreeSplit.Subset">
            <summary>
            Get the bit array indicating the value subset in case of split on a categorical variable.
            The rule is: if var_value in subset then next_node&lt;-left else next_node&lt;-right
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvDTreeSplit.MOrder">
            <summary>
            Wrapped Order structure
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.MOrder.c">
            <summary>
            The threshold value in case of split on an ordered variable.
            The rule is: if var_value &lt; c then next_node&lt;-left else next_node&lt;-right
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeSplit.MOrder.split_point">
            <summary>
            Used internally by the training algorithm
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.RTrees">
            <summary>
            Random tree
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.StatModel">
            <summary>
            A statistic model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.StatModel.Save(System.String)">
            <summary>
            Save the statistic model to file
            </summary>
            <param name="fileName"></param>
        </member>
        <member name="M:Emgu.CV.ML.StatModel.Load(System.String)">
            <summary>
            Load the statistic model from file
            </summary>
            <param name="fileName">The file to load the model from</param>
        </member>
        <member name="M:Emgu.CV.ML.StatModel.Clear">
            <summary>
            Clear the statistic model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.RTrees.#ctor">
            <summary>
            Create a random tree
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.RTrees.DisposeObject">
            <summary>
            Release the random tree and all memory associate with it
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.RTrees.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvRTParams)">
            <summary>
            Train the random tree using the specific traning data
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="tflag">data layout type</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <param name="missingMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="param">The parameters for training the random tree</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.RTrees.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32})">
            <summary>
            The method takes the feature vector and the optional missing measurement mask on input, traverses the random tree and returns the cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)
            </summary>
            <param name="sample">The sample to be predicted</param>
            <param name="missingDataMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <returns>The cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)</returns>
        </member>
        <member name="T:Emgu.CV.ML.DTree">
            <summary>
            Decision Tree 
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.DTree.#ctor">
            <summary>
            Create a default decision tree
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.DTree.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvDTreeParams)">
            <summary>
            Train the decision tree using the specific traning data
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="tflag">data layout type</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <param name="missingMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="param">The parameters for training the decision tree</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.DTree.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},System.Boolean)">
            <summary>
            The method takes the feature vector and the optional missing measurement mask on input, traverses the decision tree and returns the reached leaf node on output. The prediction result, either the class label or the estimated function value, may be retrieved as value field of the CvDTreeNode structure
            </summary>
            <param name="sample">The sample to be predicted</param>
            <param name="missingDataMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="rawMode">normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..num_of_categoriesi-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param>
            <returns>Pointer to the reached leaf node on output. The prediction result, either the class label or the estimated function value, may be retrieved as value field of the CvDTreeNode structure</returns>
        </member>
        <member name="M:Emgu.CV.ML.DTree.DisposeObject">
            <summary>
            Release the decision tree and all the memory associate with it
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.SVMParams">
            <summary>
            The parameters for the SVM model
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._svmType">
            <summary>
            The type of SVM
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._kernelType">
            <summary>
            The type of SVM kernel
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._degree">
            <summary>
            For poly
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._gamma">
            <summary>
            For poly/rbf/sigmoid
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._coef0">
            <summary>
            For poly/sigmoid
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._c">
            <summary>
            For CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._nu">
            <summary>
            For CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._p">
            <summary>
            For CV_SVM_EPS_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._classWeights">
            <summary>
            For CV_SVM_C_SVC
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.SVMParams._termCrit">
            <summary>
            Termination criteria
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.SVMType">
            <summary>
            The type of SVM
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.KernelType">
            <summary>
            The type of SVM kernel
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.Degree">
            <summary>
            For poly
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.Gamma">
            <summary>
            For poly/rbf/sigmoid
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.Coef0">
            <summary>
            For poly/sigmoid
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.C">
            <summary>
            For CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.Nu">
            <summary>
            For CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.P">
            <summary>
            For CV_SVM_EPS_SVR
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.ClassWeights">
            <summary>
            For CV_SVM_C_SVC
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.TermCrit">
            <summary>
            Get or Set the termination criteria
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.SVMParams.MCvSVMParams">
            <summary>
            Get the equivalent representation of MCvSVMParams
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvEMParams">
            <summary>
            Wrapped CvEMParams
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.nclusters">
            <summary>
            The number of mixtures
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.cov_mat_type">
            <summary>
            The type of the mixture covariation matrices
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.start_step">
            <summary>
            The initial step the algorithm starts from
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.probs">
            <summary>
            Initial probabilities p_i,k; are used (and must be not NULL) only when start_step=CvEM::START_M_STEP
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.weights">
            <summary>
            Initial mixture weights pi_k; are used (if not NULL) only when start_step=CvEM::START_E_STEP
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.means">
            <summary>
            Initial mixture means a_k; are used (and must be not NULL) only when start_step=CvEM::START_E_STEP.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.covs">
            <summary>
            Initial mixture covariation matrices S_k; are used (if not NULL) only when start_step=CvEM::START_E_STEP.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvEMParams.term_crit">
            <summary>
            Termination criteria of the procedure. EM algorithm stops either after a certain number of iterations (term_crit.num_iter), or when the parameters change too little (no more than term_crit.epsilon) from iteration to iteration
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.ANN_MLP">
            <summary>
            Neural network
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.ANN_MLP.#ctor(Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION,System.Double,System.Double)">
            <summary>
            Create a neural network using the specific parameters
            </summary>
            <param name="layerSize">The size of the layer</param>
            <param name="activeFunction">Activation function</param>
            <param name="fParam1">Free parameters of the activation function, alpha</param>
            <param name="fParam2">Free parameters of the activation function, beta</param>
        </member>
        <member name="M:Emgu.CV.ML.ANN_MLP.DisposeObject">
            <summary>
            Release the memory associated with this neural network
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.ANN_MLP.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams,Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG)">
            <summary>
            Train the ANN_MLP model with the specific paramters
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleWeights">It is not null only for RPROP. The optional floating-point vector of weights for each sample. Some samples may be more important than others for training, e.g. user may want to gain the weight of certain classes to find the right balance between hit-rate and false-alarm rate etc</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for ANN_MLP</param>
            <param name="flag">The traning flag</param>
            <returns>The number of done iterations</returns>
        </member>
        <member name="M:Emgu.CV.ML.ANN_MLP.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single})">
            <summary>
            Predit the response of the <paramref name="samples"/>
            </summary>
            <param name="samples">The input samples</param>
            <param name="outputs">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="T:Emgu.CV.ML.Boost">
            <summary>
            Boost Tree 
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.Boost.#ctor">
            <summary>
            Create a default Boost classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.Boost.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.ML.MlEnum.DATA_LAYOUT_TYPE,Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvBoostParams,System.Boolean)">
            <summary>
            Train the boost tree using the specific traning data
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="tflag">data layout type</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="varType">The types of input variables</param>
            <param name="missingMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="param">The parameters for training the boost tree</param>
            <param name="update">specifies whether the classifier needs to be updated (i.e. the new weak tree classifiers added to the existing ensemble), or the classifier needs to be rebuilt from scratch</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.Boost.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Single},Emgu.CV.Structure.MCvSlice,System.Boolean)">
            <summary>
            The method takes the feature vector and the optional missing measurement mask on input, traverses the random tree and returns the cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)
            </summary>
            <param name="sample">The sample to be predicted</param>
            <param name="missingDataMask">Can be null if not needed. When specified, it is an 8-bit matrix of the same size as <paramref name="trainData"/>, is used to mark the missed values (non-zero elements of the mask)</param>
            <param name="weakResponses">Can be null if not needed. a floating-point vector, of responses from each individual weak classifier. The number of elements in the vector must be equal to the slice length.</param>
            <param name="slice">The continuous subset of the sequence of weak classifiers to be used for prediction</param>
            <param name="rawMode">Normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..num_of_categoriesi-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param>    
            <returns>The cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates)</returns>
        </member>
        <member name="M:Emgu.CV.ML.Boost.DisposeObject">
            <summary>
            Release the Boost classifier and all memory associate with it
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams">
            <summary>
            Parameters for Artificla Neural Network - MultiLayer Perceptron
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.term_crit">
            <summary>
            The termination criteria
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.train_method">
            <summary>
            The type of training method
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.bp_dw_scale">
            <summary>
            backpropagation parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.bp_moment_scale">
            <summary>
            backpropagation parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.rp_dw0">
            <summary>
            rprop parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.rp_dw_plus">
            <summary>
            rprop parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.rp_dw_minus">
            <summary>
            rprop parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.rp_dw_min">
            <summary>
            rprop parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams.rp_dw_max">
            <summary>
            rprop parameters
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvDTreeNode">
            <summary>
            An OpenCV decision Tree Node
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.classIdx">
            <summary>
            The assigned to the node normalized class index (to 0..class_count-1 range), it is used internally in classification trees and tree ensembles.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.Tn">
            <summary>
            The tree index in a ordered sequence of trees. The indices are used during and after the pruning procedure. The root node has the maximum value Tn  of the whole tree, child nodes have Tn less than or equal to the parent's Tn, and the nodes with Tn&lt;=CvDTree::pruned_tree_idx are not taken into consideration at the prediction stage (the corresponding branches are considered as cut-off), even if they have not been physically deleted from the tree at the pruning stage. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.value">
            <summary>
            The value assigned to the tree node. It is either a class label, or the estimated function value.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.parent">
            <summary>
            Pointer to the parent tree node
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.left">
            <summary>
            Pointer to the left tree node
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.right">
            <summary>
            Pointer to the right tree node
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.split">
            <summary>
            Pointer to CvDTreeSplit
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.sampleCount">
             <summary>
             The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing, and all the variables for other surrogate splits are missing too,
            the sample is directed to the left if left-&gt;sample_count&gt;right-&gt;sample_count and to the right otherwise
             </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.depth">
            <summary>
            The node depth, the root node depth is 0, the child nodes depth is the parent's depth + 1. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.numValid">
            <summary>
            Internal parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.offset">
            <summary>
            Internal parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.bufIdx">
            <summary>
            Internal parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.maxlr">
            <summary>
            Internal parameters
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.complexity">
            <summary>
            Global pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.alpha">
            <summary>
            Global pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.nodeRisk">
            <summary>
            Global pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.treeRisk">
            <summary>
            Global pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.treeError">
            <summary>
            Global pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.cvTn">
            <summary>
            Cross-validation pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.cvNodeRisk">
            <summary>
            Cross-validation pruning data
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeNode.cvNodeError">
            <summary>
            Cross-validation pruning data
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.KNearest">
            <summary>
            The KNearest classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.KNearest.#ctor">
            <summary>
            Create a default KNearest classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.KNearest.#ctor(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},System.Boolean,System.Int32)">
            <summary>
            Creaet a KNearest classifier using the specific traing data
            </summary>
            <param name="isRegression">Specify the output variables type. It can be either categorical (isRegression=false) or ordered (isRegression=true)</param>
            <param name="maxK">The number of maximum neighbors that may be passed to the method findNearest.</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be IntPtr.Zero if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
        </member>
        <member name="M:Emgu.CV.ML.KNearest.DisposeObject">
            <summary>
            Release the classifer and all the memory associated with it
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.KNearest.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},System.Boolean,System.Int32,System.Boolean)">
            <summary>
            Update the KNearest classifier using the specific traing data.
            </summary>
            <param name="isRegression">Specify the output variables type. It can be either categorical (isRegression=false) or ordered (isRegression=true)</param>
            <param name="maxK">The number of maximum neighbors that may be passed to the method findNearest.</param>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="updateBase">
            If true, the existing classifer is updated using the new training data;
            Otherwise, the classifier is trained from scratch</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.KNearest.FindNearest(Emgu.CV.Matrix{System.Single},System.Int32,Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single})">
            <summary>
            For each input vector (which are rows of the matrix <paramref name="samples"/>) the method finds k &lt;= get_max_k() nearest neighbor. In case of regression, the predicted result will be a mean value of the particular vector's neighbor responses. In case of classification the class is determined by voting.
            </summary>
            <param name="samples">The sample matrix where each row is a sample</param>
            <param name="k">The number of nearest neighbor to find</param>
            <param name="results">
            Can be null if not needed.
            If regression, return a mean value of the particular vector's neighbor responses;
            If classification, return the class determined by voting.
            </param>
            <param name="kNearestNeighbors">Should be null if not needed. Setting it to non-null values incures a performance panalty. A matrix of (k * samples.Rows) rows and (samples.Cols) columns that will be filled the data of the K nearest-neighbor for each sample</param>
            <param name="neighborResponses">Should be null if not needed. The response of the neighbors. A vector of k*_samples->rows elements.</param>
            <param name="dist">Should be null if not needed. The distances from the input vectors to the neighbors. A vector of k*_samples->rows elements.</param>
            <returns>In case of regression, the predicted result will be a mean value of the particular vector's neighbor responses. In case of classification the class is determined by voting</returns>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvSVMParams">
            <summary>
            Wrapped CvSVMParams
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.svm_type">
            <summary>
            The type of SVM
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.kernel_type">
            <summary>
            The type of SVM kernel
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.degree">
            <summary>
            for poly
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.gamma">
            <summary>
            for poly/rbf/sigmoid
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.coef0">
            <summary>
            for poly/sigmoid
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.C">
            <summary>
            for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.nu">
            <summary>
            for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.p">
            <summary>
            for CV_SVM_EPS_SVR
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.class_weights">
            <summary>
            for CV_SVM_C_SVC
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvSVMParams.term_crit">
            <summary>
            termination criteria
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvParamGrid">
            <summary>
            Wrapped CvParamGrid structure used by SVM
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvParamGrid.min_val">
            <summary>
            Minimum value
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvParamGrid.max_val">
            <summary>
            Maximum value
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvParamGrid.step">
            <summary>
            step
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.Structure.MCvDTreeParams">
            <summary>
            Parameters OpenCV's decision tree parameters
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.Structure.MCvDTreeParams.GetDefaultParameter">
            <summary>
            Get the default Decision tree training parameters
            </summary>
            <returns>The default Decision tree training parameters</returns>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.maxCategories">
             <summary>
             If a discrete variable, on which the training procedure tries to make a split, takes more than max_categories values, the precise best subset estimation may take a very long time (as the algorithm is exponential). Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into max_categories clusters (i.e. some categories are merged together).
            Note that this technique is used only in N(>2)-class classification problems. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
             </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.maxDepth">
            <summary>
            This parameter specifies the maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than max_depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.minSampleCount">
            <summary>
            A node is not split if the number of samples directed to the node is less than the parameter value. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.cvFolds">
            <summary>
            If this parameter is &gt;1, the tree is pruned using cv_folds-fold cross validation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.useSurrogates">
            <summary>
            If true, surrogate splits are built. Surrogate splits are needed to handle missing measurements and for variable importance estimation.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.use1seRule">
            <summary>
            If true, the tree is truncated a bit more by the pruning procedure. That leads to compact, and more resistant to the training data noise, but a bit less accurate decision tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.truncatePrunedTree">
            <summary>
            If true, the cut off nodes (with Tn&lt;=CvDTree::pruned_tree_idx) are physically removed from the tree. Otherwise they are kept, and by decreasing CvDTree::pruned_tree_idx (e.g. setting it to -1) it is still possible to get the results from the original un-pruned (or pruned less aggressively) tree. 
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.regressionAccuracy">
            <summary>
            Another stop criteria - only for regression trees. As soon as the estimated node value differs from the node training samples responses by less than the parameter value, the node is not split further.
            </summary>
        </member>
        <member name="F:Emgu.CV.ML.Structure.MCvDTreeParams.priors">
            <summary>
            The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if users want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. 
            </summary>
            <remarks>A note about memory management: the field priors  is a pointer to the array of floats. The array should be allocated by user, and released just after the CvDTreeParams structure is passed to CvDTreeTrainData or CvDTree constructors/methods (as the methods make a copy of the array).</remarks>
        </member>
        <member name="T:Emgu.CV.ML.NormalBayesClassifier">
            <summary>
            A Normal Bayes Classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.NormalBayesClassifier.#ctor">
            <summary>
            Create a normal Bayes classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.NormalBayesClassifier.DisposeObject">
            <summary>
            Release the memory associated with this classifier
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.NormalBayesClassifier.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},System.Boolean)">
            <summary>
            Train the classifier using the specific data
            </summary>
            <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="update">If true, the training data is used to update the classifier; Otherwise, the data in the classifier are cleared before training is performed</param>
            <returns>The number of done iterations</returns>
        </member>
        <member name="M:Emgu.CV.ML.NormalBayesClassifier.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32})">
            <summary>
            Given the NormalBayesClassifier <paramref name="model"/>, predit the probability of the <paramref name="samples"/>
            </summary>
            <param name="samples">The input samples</param>
            <param name="results">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="T:Emgu.CV.ML.EM">
            <summary>
            Expectation Maximization model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.EM.#ctor">
            <summary>
            Create an Expectation Maximization model
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.EM.#ctor(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.ML.EMParams,Emgu.CV.Matrix{System.Int32})">
            <summary>
            Creaet an Expectation Maximization model using the specific training parameters
            </summary>
            <param name="samples">The samples to be trained</param>
            <param name="sampleIdx"></param>
            <param name="parameters"></param>
            <param name="labels"></param>
        </member>
        <member name="M:Emgu.CV.ML.EM.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.ML.EMParams,Emgu.CV.Matrix{System.Int32})">
            <summary>
            Train the EM model using the specific training data
            </summary>
            <param name="samples">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
            <param name="parameters">The parameters for EM</param>
            <param name="labels">Can be null if not needed. Optionally computed output "class label" for each sample</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.Predict(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single})">
            <summary>
            Predit the probability of the <paramref name="samples"/>
            </summary>
            <param name="samples">The input samples</param>
            <param name="probs">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
            <returns>In case of classification the method returns the class label, in case of regression - the output function value</returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.GetMeans">
            <summary>
            Get the mean of the clusters
            </summary>
            <returns>The mean of the clusters</returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.GetWeights">
            <summary>
            Get the weights of the clusters
            </summary>
            <returns>The weights of the clusters</returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.GetProbabilities">
            <summary>
            Get the probability matrix
            </summary>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.GetCovariances">
            <summary>
            Get the covariance matrices for each cluster
            </summary>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.EM.DisposeObject">
            <summary>
            Release the memory associated with this EM model
            </summary>
        </member>
        <member name="P:Emgu.CV.ML.EM.NumberOfClusters">
            <summary>
            Get the number of clusters of this EM model
            </summary>
        </member>
        <member name="T:Emgu.CV.ML.SVM">
            <summary>
            Support Vector Machine 
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.SVM.#ctor">
            <summary>
            Create a support Vector Machine
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.SVM.DisposeObject">
            <summary>
            Release all the memory associated with the SVM
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.SVM.Train(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.SVMParams)">
            <summary>
            Train the SVM model with the specific paramters
            </summary>
            <param name="trainData">The training data.</param>
            <param name="responses">The response for the training data.</param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="parameters">The parameters for SVM</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.SVM.GetDefaultGrid(Emgu.CV.ML.MlEnum.SVM_PARAM_TYPE)">
            <summary>
            Get the default parameter grid for the specific SVM type
            </summary>
            <param name="type">The SVM type</param>
            <returns>The default parameter grid for the specific SVM type </returns>
        </member>
        <member name="M:Emgu.CV.ML.SVM.TrainAuto(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvSVMParams,System.Int32)">
            <summary>
            The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal. 
            </summary>
            <param name="trainData">The training data.</param>
            <param name="responses">The response for the training data.</param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="parameters">The parameters for SVM</param>
            <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.SVM.TrainAuto(Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Single},Emgu.CV.Matrix{System.Int32},Emgu.CV.Matrix{System.Int32},Emgu.CV.ML.Structure.MCvSVMParams,System.Int32,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid,Emgu.CV.ML.Structure.MCvParamGrid)">
            <summary>
            The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal. 
            </summary>
            <param name="trainData">The training data.</param>
            <param name="responses">The response for the training data.</param>
            <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&lt;int&gt; of nx1</param>
            <param name="parameters">The parameters for SVM</param>
            <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
            <param name="cGrid">cGrid</param>
            <param name="gammaGrid">gammaGrid</param>
            <param name="pGrid">pGrid</param>
            <param name="nuGrid">nuGrid</param>
            <param name="coefGrid">coedGrid</param>
            <param name="degreeGrid">degreeGrid</param>
            <returns></returns>
        </member>
        <member name="M:Emgu.CV.ML.SVM.Predict(Emgu.CV.Matrix{System.Single})">
            <summary>
            Predicts response for the input sample.
            </summary>
            <param name="sample">The input sample</param>
        </member>
        <member name="M:Emgu.CV.ML.SVM.GetSupportVector(System.Int32)">
            <summary>
            The method retrieves a given support vector
            </summary>
            <param name="i">The index of the support vector</param>       
        </member>
        <member name="M:Emgu.CV.ML.SVM.GetSupportVectorCount">
            <summary>
            The method retrieves the number of support vectors
            </summary>
        </member>
        <member name="M:Emgu.CV.ML.SVM.GetVarCount">
            <summary>
            The method retrieves the number of vars
            </summary>
        </member>
    </members>
</doc>
