﻿#include "featuredetector.h"
#include "ui_featuredetector.h"

#include <chrono>

#include <QFileDialog>
#include <QDebug>
#include <QSettings>

#include <opencv2/highgui.hpp>

#include "Script/chaiscript.h"
#include "Script/ChaiScriptSyntaxHighlighter.hpp"
#include "Script/tolua.h"

using namespace cv;
using namespace std;


FeatureDetect::FeatureDetect(QWidget *parent) :
    QWidget(parent),
    ui(new Ui::FeatureDetector)
{
    ui->setupUi(this);
    widgets::ChaiScriptSyntaxHighlighter *highlighter = new widgets::ChaiScriptSyntaxHighlighter(ui->ScriptEdit->document());

    m_EngineType = engine_ChaiScript;
    initFun_ChaiScript();
    initFun_Lua();

    ui->FeatureType->addItem("BRISK");
    ui->FeatureType->addItem("ORB");
    ui->FeatureType->addItem("MSER");
    ui->FeatureType->addItem("FAST");
    ui->FeatureType->addItem("AGAST");
    ui->FeatureType->addItem("GFTT");
    ui->FeatureType->addItem("SimpleBlob");
    ui->FeatureType->addItem("KAZE");
    ui->FeatureType->addItem("AKAZE");
    ui->FeatureType->addItem("SIFT");
    ui->FeatureType->addItem("SURF");
    ui->FeatureType->addItem("FREAK");
    ui->FeatureType->addItem("StarDetector");
    ui->FeatureType->addItem("Brief");
    ui->FeatureType->addItem("LUCID");
    ui->FeatureType->addItem("FREAK");
    ui->FeatureType->addItem("LATCH");
    ui->FeatureType->addItem("DAISY");
    ui->FeatureType->addItem("MSDDetector");
    ui->FeatureType->addItem("VGG");
    ui->FeatureType->addItem("BoostDesc");
    ui->FeatureType->addItem("PCT");
    ui->FeatureType->addItem("PCTSQFD");
    ui->FeatureType->addItem("HarrisLaplace");
    ui->FeatureType->addItem("Affine");
}

FeatureDetect::~FeatureDetect()
{
    delete ui;
}

void FeatureDetect::on_OpenImage_clicked()
{
    ImagePath = QFileDialog::getOpenFileName();

    //chai.eval("print(\"Hello World\");ApplyDetectFun();");
}

void FeatureDetect::on_FeatureType_currentIndexChanged(const QString &arg1)
{
    if(arg1.isEmpty())
    {
        return ;
    }

    ApplyDetect_Cpp(arg1.toStdString());
    emit UpdateIconListSig(ProcImageMap, std::to_string((unsigned long long )this) /*this->objectName().toStdString()*/);
}


void FeatureDetect::SlectectImageToShowSlot(const std::string ImageId, const std::string objectName)
{
    if(objectName != std::to_string((unsigned long long )this) /*this->objectName().toStdString()*/)
    {
        return;
    }

    if(ProcImageMap.find(ImageId) == ProcImageMap.end())
    {
        return ;
    }

    emit ShowImageSig(ProcImageMap[ImageId], ImageId);
}

//BRISK: detector + descriptor
//ORB: detector + descriptor
//MSER: detector
//FAST: detector
//AGAST: detector
//GFFT: detector
//SimpleBlobDetector: detector
//KAZE: detector + descriptor
//AKAZE: detector + descriptor
//FREAK: descriptor
//StarDetector: detector
//BriefDescriptorExtractor: descriptor
//LUCID: descriptor
//LATCH: descriptor
//DAISY: descriptor
//MSDDetector: detector
//SIFT: detector + descriptor
//SURF: detector + descriptor


int FeatureDetect::ApplyDetect_Cpp( const std::string typeString )
{
    ProcImageMap.clear();
    if (!ImagePath.isEmpty())
    {
        std::string item = ImagePath.toStdString();

        try
        {
            std::chrono::time_point<std::chrono::high_resolution_clock> TimeStart(std::chrono::high_resolution_clock::now());

            ProcImageMap["InImage"] = cv::imread( item, IMREAD_COLOR );
            if(ProcImageMap["InImage"].empty())
            {
                qDebug() <<"imread failed:"<< item.c_str();
                return -1;
            }
            ProcImageMap[typeString]  = ProcImageMap["InImage"].clone();
            mKeypoints.clear();

            if (typeString == "BRISK") {
                detector = cv::BRISK::create();
            }
            else if (typeString == "ORB")
            {
                detector  = cv::ORB::create();
            }
            else if (typeString == "MSER")
            {
                detector = cv::MSER::create();
            }
            else if (typeString == "FAST")
            {
                detector  = cv::FastFeatureDetector::create();
            }
            else if (typeString == "AGAST")
            {
                detector = cv::AgastFeatureDetector::create();
            }
            else if (typeString == "GFTT")
            {
                detector  = cv::GFTTDetector::create();
            }
            else if (typeString == "SimpleBlob")
            {
                detector = cv::SimpleBlobDetector::create();
            }
            else if (typeString == "KAZE")
            {
                detector  = cv::KAZE::create();
            }
            else if (typeString == "AKAZE")
            {
                detector = cv::AKAZE::create();
            }
            else if (typeString == "SIFT") {
                detector  = cv::xfeatures2d::SIFT::create();
            }
            else if (typeString == "SURF") {
                detector  = cv::xfeatures2d::SURF::create();
            }
            else if (typeString == "FREAK") {
                detector  = cv::xfeatures2d::FREAK::create();
            }
            else if (typeString == "StarDetector") {
                detector  = cv::xfeatures2d::StarDetector::create();
            }
            else if (typeString == "Brief") {
                detector  = cv::xfeatures2d::BriefDescriptorExtractor::create();
            }
            else if (typeString == "LUCID") {
                detector  = cv::xfeatures2d::LUCID::create();
            }
            else if (typeString == "LATCH") {
                detector  = cv::xfeatures2d::LATCH::create();
            }
            else if (typeString == "DAISY") {
                detector  = cv::xfeatures2d::DAISY::create();
            }
            else if (typeString == "MSDDetector") {
                detector  = cv::xfeatures2d::MSDDetector::create();
            }
            else if (typeString == "VGG") {
                detector  = cv::xfeatures2d::VGG::create();
            }
            else if (typeString == "BoostDesc") {
                detector  = cv::xfeatures2d::BoostDesc::create();
            }
            //            else if (typeString == "PCT") {
            //               Ptr<xfeatures2d::PCTSignatures> PctDetector  = cv::xfeatures2d::PCTSignatures::create();
            //               PctDetector->detect(ProcImageMap["InImage"], mKeypoints);
            //            }
            //            else if (typeString == "PCTSQFD") {
            //                Ptr<xfeatures2d::PCTSignaturesSQFD> PctDetector  = cv::xfeatures2d::PCTSignaturesSQFD::create();
            //                PctDetector->detect(ProcImageMap["InImage"], mKeypoints);
            //            }
            else if (typeString == "HarrisLaplace") {
                detector  = cv::xfeatures2d::HarrisLaplaceFeatureDetector::create();
            }
            //            else if (typeString == "Affine") {
            //                Ptr<xfeatures2d::AffineFeature2D> AffineDetector  = cv::xfeatures2d::AffineFeature2D::create(mKeypoints);
            //                AffineDetector->detect(ProcImageMap["InImage"], mKeypoints);
            //            }
            else {
                qDebug() << L"不存在的类型:"<<typeString.c_str();
                return -1;
            }

            if(mKeypoints.empty())
            {
                detector->detect(ProcImageMap["InImage"], mKeypoints);
            }

            drawKeypoints(ProcImageMap[typeString], mKeypoints, ProcImageMap[typeString]);

            long long CurUs = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - TimeStart).count() / 1000.f;
            qDebug() <<typeString.c_str()<<", CurUs:"<< CurUs<<" us";
        }
        catch (cv::Exception &e)
        {
            qDebug() <<"FeatureDetector::Apply Exception:"<< e.what();
            return -2;
        }
    }
    return 0;
}

int FeatureDetect::ApplyDetect_ChaiScript( const std::string ScriptData )
{
    if(!ScriptData.empty() && !ImagePath.isEmpty())
    {
        std::string item = ImagePath.toStdString();
        //std::chrono::time_point<std::chrono::high_resolution_clock> TimeStart(std::chrono::high_resolution_clock::now());

        ProcImageMap["InImage"] = cv::imread( item, IMREAD_COLOR );
        if(ProcImageMap["InImage"].empty())
        {
            qDebug() <<"imread failed:"<< item.c_str();
            return -1;
        }
        ProcImageMap["detector"]  = ProcImageMap["InImage"].clone();
        mKeypoints.clear();

        try{
            detector = GetDector_ChaiScript(ScriptData);
        }
#ifdef __WITH_CHAISCRIPT__
        catch (const chaiscript::exception::bad_boxed_cast &e)
        {
            qDebug() <<"GetDector excption:"<< e.what();
            return -2;
        }
        catch (const chaiscript::exception::eval_error &e)
        {
            qDebug() <<"GetDector excption:"<< e.pretty_print().c_str();
            return -2;
        }
#endif
        catch(...)
        {
            qDebug() <<"GetDector excption:"<< ScriptData.c_str();
            return -2;
        }

        try {
            if(detector != nullptr)
            {
                detector->detect(ProcImageMap["InImage"], mKeypoints);
                drawKeypoints(ProcImageMap["detector"], mKeypoints, ProcImageMap["detector"]);
            }
        }
        catch(cv::Exception &e)
        {
            qDebug() <<"detect Exception:"<< e.what();
            return -2;
        }
        catch (...) {
            qDebug() <<"detect Exception:";
            return -2;
        }
        return 0;
    }
    return -1;
}

int FeatureDetect::ApplyDetect_Lua( const std::string ScriptData )
{
    if(!ScriptData.empty())
    {
        std::string item = ImagePath.toStdString();
        //std::chrono::time_point<std::chrono::high_resolution_clock> TimeStart(std::chrono::high_resolution_clock::now());

        ProcImageMap["InImage"] = cv::imread( item, IMREAD_COLOR );
        if(ProcImageMap["InImage"].empty())
        {
            qDebug() <<"imread failed:"<< item.c_str();
            return -1;
        }
        ProcImageMap["detector"]  = ProcImageMap["InImage"].clone();
        mKeypoints.clear();

        try{
            detector = GetDector_Lua(ScriptData);
        }
        catch(...)
        {
            qDebug() <<"GetDector excption:"<< ScriptData.c_str();
            return -2;
        }

        try {
            if(detector != nullptr)
            {
                detector->detect(ProcImageMap["InImage"], mKeypoints);
                drawKeypoints(ProcImageMap["detector"], mKeypoints, ProcImageMap["detector"]);
            }
        }
        catch(cv::Exception &e)
        {
            qDebug() <<"detect Exception:"<< e.what();
            return -2;
        }
        catch (...) {
            qDebug() <<"detect Exception:";
            return -2;
        }

        return 0;
    }
    return -1;
}

void FeatureDetect::on_ScriptExe_clicked()
{
    switch (m_EngineType)
    {
    case engine_ChaiScript:
    {
        QString ScriptData = ui->ScriptEdit->toPlainText();
        ApplyDetect_ChaiScript( ScriptData.toStdString() );
    }
        break;

    case engine_Cpp:
    {
        QString arg1 = ui->FeatureType->currentText();
        ApplyDetect_Cpp( arg1.toStdString() );
    }
        break;

    case engine_Lua:
    {
        QString ScriptData = ui->ScriptEdit->toPlainText();
        ApplyDetect_Lua( ScriptData.toStdString() );
    }
        break;

    default:
        break;
    }

    emit UpdateIconListSig(ProcImageMap, std::to_string((unsigned long long )this) /*this->objectName().toStdString()*/);
}

void FeatureDetect::on_EngineType_currentTextChanged(const QString &arg1)
{
    if(arg1 == "C++")
    {
        m_EngineType = engine_Cpp;
    }
    else if(arg1 == "ChaiScript")
    {
        m_EngineType = engine_ChaiScript;
    }
    else if(arg1 == "Lua")
    {
        m_EngineType = engine_Lua;
    }
    else {
        m_EngineType = engine_ChaiScript;
    }
}

void FeatureDetect::on_FeatureType_currentTextChanged(const QString &arg1)
{
    QSettings tempConfigure("./configure/FeatureDetect.ini", QSettings::IniFormat);
    QString typeString = arg1;
    switch (m_EngineType)
    {
    case engine_ChaiScript:
    {
        QString IniPath = "/ChaiScript/";
        if (typeString == "BRISK")
        {
            QString DefaultStr = "/** @brief The BRISK constructor\
            \n@param thresh AGAST detection threshold score.\
            \n@param octaves detection octaves. Use 0 to do single scale.\
            \n@param patternScale apply this scale to the pattern used for sampling the neighbourhood of a\
            \nkeypoint.\
            \n \
            \nCV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector = cv::BRISK::create();
        }
        else if (typeString == "ORB")
        {
            QString DefaultStr = "    /** @brief The ORB constructor\
                    \n\
                    \n@param nfeatures The maximum number of features to retain.\
                    \n@param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical\
                    \npyramid, where each next level has 4x less pixels than the previous, but such a big scale factor\
                    \nwill degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor\
                    \nwill mean that to cover certain scale range you will need more pyramid levels and so the speed\
                    \nwill suffer.\
                    \n@param nlevels The number of pyramid levels. The smallest level will have linear size equal to\
                    \ninput_image_linear_size/pow(scaleFactor, nlevels - firstLevel).\
                    \n@param edgeThreshold This is size of the border where the features are not detected. It should\
                    \nroughly match the patchSize parameter.\
                    \n@param firstLevel The level of pyramid to put source image to. Previous layers are filled\
                    \nwith upscaled source image.\
                    \n@param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The\
                    \ndefault value 2 means the BRIEF where we take a random point pair and compare their brightnesses,\
                    \nso we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3\
                    \nrandom points (of course, those point coordinates are random, but they are generated from the\
                    \npre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel\
                    \nrectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such\
                    \noutput will occupy 2 bits, and therefore it will need a special variant of Hamming distance,\
                    \ndenoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each\
                    \nbin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).\
                    \n@param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features\
                    \n(the score is written to KeyPoint::score and is used to retain best nfeatures features);\
                    \nFAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,\
                    \nbut it is a little faster to compute.\
                    \n@param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller\
                    \npyramid layers the perceived image area covered by a feature will be larger.\
                    \n@param fastThreshold the fast threshold\
                    \n \
                    \nCV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,\
                    \n    int firstLevel=0, int WTA_K=2, ORB::ScoreType scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
        }
        else if (typeString == "MSER")
        {
            QString DefaultStr = "/** @brief Full consturctor for %MSER detector\
                    \n\
                    \n@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$\
                    \n@param _min_area prune the area which smaller than minArea\
                    \n@param _max_area prune the area which bigger than maxArea\
                    \n@param _max_variation prune the area have similar size to its children\
                    \n@param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity\
                    \n@param _max_evolution  for color image, the evolution steps\
                    \n@param _area_threshold for color image, the area threshold to cause re-initialize\
                    \n@param _min_margin for color image, ignore too small margin\
                    \n@param _edge_blur_size for color image, the aperture size for edge blur\
                    \n \
                    \nCV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,\
                    \n      double _max_variation=0.25, double _min_diversity=.2,\
                    \n      int _max_evolution=200, double _area_threshold=1.01,\
                    \n      double _min_margin=0.003, int _edge_blur_size=5 );*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector = cv::MSER::create();
        }
        else if (typeString == "FAST")
        {
            QString DefaultStr = "/*CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,\
                    \nbool nonmaxSuppression=true,\
                    \nFastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::FastFeatureDetector::create();
        }
        else if (typeString == "AGAST")
        {
            QString DefaultStr = " /*CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,\
                    \nbool nonmaxSuppression=true,\
                    \nAgastFeatureDetector::DetectorType type = AgastFeatureDetector::OAST_9_16);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector = cv::AgastFeatureDetector::create();
        }
        else if (typeString == "GFTT")
        {
            QString DefaultStr = "/*CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,\
                    \nint blockSize=3, bool useHarrisDetector=false, double k=0.04 );*/s";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::GFTTDetector::create();
        }
        else if (typeString == "SimpleBlob")
        {
            QString DefaultStr = "/** @brief Class for extracting blobs from an image. :\
                    \n\
                    \nThe class implements a simple algorithm for extracting blobs from an image:\
                    \n\
                    \n1.  Convert the source image to binary images by applying thresholding with several thresholds from\
                    \n    minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between\
                    \n    neighboring thresholds.\
                    \n2.  Extract connected components from every binary image by findContours and calculate their\
                    \n    centers.\
                    \n3.  Group centers from several binary images by their coordinates. Close centers form one group that\
                    \n    corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter.\
                    \n4.  From the groups, estimate final centers of blobs and their radiuses and return as locations and\
                    \n    sizes of keypoints.\
                    \n\
                    \nThis class performs several filtrations of returned blobs. You should set filterBy\\* to true/false\
                    \nto turn on/off corresponding filtration. Available filtrations:\
                    \n\
                    \n-   **By color**. This filter compares the intensity of a binary image at the center of a blob to\
                    \nblobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs\
                    \nand blobColor = 255 to extract light blobs.\
                    \n-   **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive).\
                    \n-   **By circularity**. Extracted blobs have circularity\
                    \n(\\f$\frac{4*\\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and\
                    \nmaxCircularity (exclusive).\
                    \n-   **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio\
                    \nbetween minInertiaRatio (inclusive) and maxInertiaRatio (exclusive).\
                    \n-   **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between\
                    \nminConvexity (inclusive) and maxConvexity (exclusive).\
                    \n\
                    \nDefault values of parameters are tuned to extract dark circular blobs.\
                    \n\
                    \nCV_WRAP static Ptr<SimpleBlobDetector>\
                    \n  create(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector = cv::SimpleBlobDetector::create();
        }
        else if (typeString == "KAZE")
        {
            QString DefaultStr = "/** @brief The KAZE constructor                                   \
                    \n                                                                                  \
                    \n@param extended Set to enable extraction of extended (128-byte) descriptor.       \
                    \n@param upright Set to enable use of upright descriptors (non rotation-invariant). \
                    \n@param threshold Detector response threshold to accept point                      \
                    \n@param nOctaves Maximum octave evolution of the image                             \
                    \n@param nOctaveLayers Default number of sublevels per scale level                  \
                    \n@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or     \
                    \nDIFF_CHARBONNIER                                                                  \
                    \n                                                                                \
                    \nCV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,          \
                    \n                                float threshold = 0.001f,                         \
                    \n                                int nOctaves = 4, int nOctaveLayers = 4,          \
                    \n                                KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::KAZE::create();
        }
        else if (typeString == "AKAZE")
        {
            QString DefaultStr = "    /** @brief The AKAZE constructor                                                 \
                    \n                                                                                                 \
                    \n@param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,                        \
                    \nDESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.                             \
                    \n@param descriptor_size Size of the descriptor in bits. 0 -> Full size                           \
                    \n@param descriptor_channels Number of channels in the descriptor (1, 2, 3)                        \
                    \n@param threshold Detector response threshold to accept point                                     \
                    \n@param nOctaves Maximum octave evolution of the image                                            \
                    \n@param nOctaveLayers Default number of sublevels per scale level                                 \
                    \n@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or                    \
                    \nDIFF_CHARBONNIER                                                                                 \
                    \n                                                                                              \
                    \nCV_WRAP static Ptr<AKAZE> create(AKAZE::DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB, \
                    \n                                 int descriptor_size = 0, int descriptor_channels = 3,           \
                    \n                                 float threshold = 0.001f, int nOctaves = 4,                     \
                    \n                                 int nOctaveLayers = 4, KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector = cv::AKAZE::create();
        }
        else if (typeString == "SIFT") {                                                                                 \
            QString DefaultStr = "    /**                                                                                \
                    \n@param nfeatures The number of best features to retain. The features are ranked by their scores    \
                    \n(measured in SIFT algorithm as the local contrast)                                                 \
                    \n                                                                                                   \
                    \n@param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The\
                    \nnumber of octaves is computed automatically from the image resolution.                             \
                    \n                                                                                                   \
                    \n@param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform   \
                    \n(low-contrast) regions. The larger the threshold, the less features are produced by the detector.  \
                    \n                                                                                                   \
                    \n@param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning\
                    \nis different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are  \
                    \nfiltered out (more features are retained).                                                         \
                    \n                                                                                                   \
                    \n@param sigma The sigma of the Gaussian applied to the input image at the octave 0. If your image \
                    \nis captured with a weak camera with soft lenses, you might want to reduce the number.              \
                    \n                                                                                                   \
                    \nCV_WRAP static Ptr<SIFT> create( int nfeatures = 0, int nOctaveLayers = 3,                         \
                    \n                                double contrastThreshold = 0.04, double edgeThreshold = 10,        \
                    \n                                double sigma = 1.6);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::SIFT::create();
        }
        else if (typeString == "SURF") {
            QString DefaultStr = "/**                                                                                     \
                    \n@param hessianThreshold Threshold for hessian keypoint detector used in SURF.                       \
                    \n@param nOctaves Number of pyramid octaves the keypoint detector will use.                           \
                    \n@param nOctaveLayers Number of octave layers within each octave.                                    \
                    \n@param extended Extended descriptor flag (true - use extended 128-element descriptors; false - use  \
                    \n64-element descriptors).                                                                            \
                    \n@param upright Up-right or rotated features flag (true - do not compute orientation of features;    \
                    \nfalse - compute orientation).                                                                       \
                    \n                                                                                                    \
                    \nCV_WRAP static Ptr<SURF> create(double hessianThreshold=100,                                        \
                    \n              int nOctaves = 4, int nOctaveLayers = 3,                                              \
                    \n              bool extended = false, bool upright = false);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::SURF::create();
        }
        else if (typeString == "FREAK") {
            QString DefaultStr = "    /** @brief Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in @cite AOV12 .\
                    \n                                                                                                                                \
                    \nThe algorithm propose a novel keypoint descriptor inspired by the human visual system and more                                  \
                    \nprecisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is                                     \
                    \ncomputed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in                              \
                    \ngeneral faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK.                                 \
                    \nThey are competitive alternatives to existing keypoints in particular for embedded applications.                                \
                    \n                                                                                                                                \
                    \n@note                                                                                                                           \
                    \n   -   An example on how to use the FREAK descriptor can be found at                                                            \
                    \n        opencv_source_code/samples/cpp/freak_demo.cpp                                                                           \
                    \n *//**                                                                                                                          \
                    \n@param orientationNormalized Enable orientation normalization.                                                                  \
                    \n@param scaleNormalized Enable scale normalization.                                                                              \
                    \n@param patternScale Scaling of the description pattern.                                                                         \
                    \n@param nOctaves Number of octaves covered by the detected keypoints.                                                            \
                    \n@param selectedPairs (Optional) user defined selected pairs indexes,                                                            \
                    \n                                                                                                                              \
                    \nCV_WRAP static Ptr<FREAK> create(bool orientationNormalized = true,                                                             \
                    \n                         bool scaleNormalized = true,                                                                           \
                    \n                         float patternScale = 22.0f,                                                                            \
                    \n                         int nOctaves = 4,                                                                                      \
                    \n                         const std::vector<int>& selectedPairs = std::vector<int>());*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::FREAK::create();
        }
        else if (typeString == "StarDetector") {
            QString DefaultStr = "/*CV_WRAP static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30,\
                    \nint lineThresholdProjected=10,                                                                   \
                    \nint lineThresholdBinarized=8,                                                                    \
                    \nint suppressNonmaxSize=5);*/";                                                                     \

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::StarDetector::create();
        }
        else if (typeString == "Brief") {
            QString DefaultStr = "/** @brief Class for computing BRIEF descriptors described in @cite calon2010 . \
                    \n                                                                                            \
                    \n@param bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 .   \
                    \n@param use_orientation sample patterns using keypoints orientation, disabled by default.    \
                    \n                                                                                            \
                    \n    CV_WRAP static Ptr<BriefDescriptorExtractor> create( int bytes = 32, bool use_orientation = false );*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::BriefDescriptorExtractor::create();
        }
        else if (typeString == "LUCID") {
            QString DefaultStr = "/** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID       \
                    \n                                                                                                                          \
                    \nAn image descriptor that can be computed very fast, while being                                                           \
                    \nabout as robust as, for example, SURF or BRIEF.                                                                           \
                    \n                                                                                                                          \
                    \n@note It requires a color image as input.                                                                                 \
                    \n */                                                                                                                       \
                    \n/**                                                                                                                       \
                    \n * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth                         \
                    \n * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth  \
                    \n                                                                                                                        \
                    \nCV_WRAP static Ptr<LUCID> create(const int lucid_kernel = 1, const int blur_kernel = 2);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::LUCID::create();
        }
        else if (typeString == "LATCH") {
            QString DefaultStr = "/** latch Class for computing the LATCH descriptor.                                                                            \
                    \nIf you find this code useful, please add a reference to the following paper in your work:                                                  \
                    \nGil Levi and Tal Hassner, \"LATCH: Learned Arrangements of Three Patch Codes\", arXiv preprint arXiv:1501.03719, 15 Jan. 2015                \
                    \n                                                                                                                                           \
                    \nLATCH is a binary descriptor based on learned comparisons of triplets of image patches.                                                    \
                    \n                                                                                                                                           \
                    \n* bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1                                                                    \
                    \n* rotationInvariance - whether or not the descriptor should compansate for orientation changes.                                            \
                    \n* half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x     \
                    \n    then the half_ssd_size should be (7-1)/2 = 3.                                                                                          \
                    \n* sigma - sigma value for GaussianBlur smoothing of the source image. Source image will be used without smoothing in case sigma value is 0.\
                    \n                                                                                                                                           \
                    \nNote: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then     \
                    \n    you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT.   \
                    \n                                                                                                                                           \
                    \nNote: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp                                         \
                    \n                                                                                                                                           \
                    \n    CV_WRAP static Ptr<LATCH> create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size = 3, double sigma = 2.0);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::LATCH::create();
        }
        else if (typeString == "DAISY") {
            QString DefaultStr = "/** @brief Class implementing DAISY descriptor, described in @cite Tola10                                              \
                   \n                                                                                                                                    \
                   \n @param radius radius of the descriptor at the initial scale                                                                        \
                   \n @param q_radius amount of radial range division quantity                                                                           \
                   \n @param q_theta amount of angular range division quantity                                                                           \
                   \n @param q_hist amount of gradient orientations range division quantity                                                              \
                   \n @param norm choose descriptors normalization type, where                                                                           \
                   \n DAISY::NRM_NONE will not do any normalization (default),                                                                           \
                   \n DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0,                                     \
                   \n DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0,                                                     \
                   \n DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT\
                   \n @param H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image            \
                   \n @param interpolation switch to disable interpolation for speed improvement at minor quality loss                                   \
                   \n @param use_orientation sample patterns using keypoints orientation, disabled by default.                                           \
                   \n CV_WRAP static Ptr<DAISY> create( float radius = 15, int q_radius = 3, int q_theta = 8,                                        \
                   \n             int q_hist = 8, DAISY::NormalizationType norm = DAISY::NRM_NONE, InputArray H = noArray(),                         \
                   \n             bool interpolation = true, bool use_orientation = false );*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::DAISY::create();
        }
        else if (typeString == "MSDDetector") {
            QString DefaultStr = "/** @brief Class implementing the MSD (*Maximal Self-Dissimilarity*) keypoint detector, described in @cite Tombari14.  \
                    \n                                                                                                                                   \
                    \nThe algorithm implements a novel interest point detector stemming from the intuition that image patches                            \
                    \nwhich are highly dissimilar over a relatively large extent of their surroundings hold the property of                              \
                    \nbeing repeatable and distinctive. This concept of \"contextual self-dissimilarity\" reverses the key                                 \
                    \nparadigm of recent successful techniques such as the Local Self-Similarity descriptor and the Non-Local                            \
                    \nMeans filter, which build upon the presence of similar - rather than dissimilar - patches. Moreover,                               \
                    \nit extends to contextual information the local self-dissimilarity notion embedded in established                                   \
                    \ndetectors of corner-like interest points, thereby achieving enhanced repeatability, distinctiveness and                            \
                    \nlocalization accuracy.                                                                                                             \
                    \n                                                                                                                                   \
                    \nstatic Ptr<MSDDetector> create(int m_patch_radius = 3, int m_search_area_radius = 5,                                               \
                    \n        int m_nms_radius = 5, int m_nms_scale_radius = 0, float m_th_saliency = 250.0f, int m_kNN = 4,                             \
                    \n        float m_scale_factor = 1.25f, int m_n_scales = -1, bool m_compute_orientation = false);*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::MSDDetector::create();
        }
        else if (typeString == "VGG") {
            QString DefaultStr = "/** @brief Class implementing VGG (Oxford Visual Geometry Group) descriptor trained end to end\
                    \nusing \"Descriptor Learning Using Convex Optimisation\" (DLCO) aparatus described in @cite Simonyan14.      \
                    \n                                                                                                          \
                    \n@param desc type of descriptor to use, VGG::VGG_120 is default (120 dimensions float)                     \
                    \nAvailable types are VGG::VGG_120, VGG::VGG_80, VGG::VGG_64, VGG::VGG_48                                   \
                    \n@param isigma gaussian kernel value for image blur (default is 1.4f)                                      \
                    \n@param img_normalize use image sample intensity normalization (enabled by default)                        \
                    \n@param use_orientation sample patterns using keypoints orientation, enabled by default                    \
                    \n@param scale_factor adjust the sampling window of detected keypoints to 64.0f (VGG sampling window)       \
                    \n6.25f is default and fits for KAZE, SURF detected keypoints window ratio                                  \
                    \n6.75f should be the scale for SIFT detected keypoints window ratio                                        \
                    \n5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio                       \
                    \n0.75f should be the scale for ORB keypoints ratio                                                         \
                    \n                                                                                                          \
                    \n@param dsc_normalize clamp descriptors to 255 and convert to uchar CV_8UC1 (disabled by default)          \
                    \n                                                                                                          \
                    \nCV_WRAP static Ptr<VGG> create( int desc = VGG::VGG_120, float isigma = 1.4f,                             \
                    \n                                bool img_normalize = true, bool use_scale_orientation = true,             \
                    \n                                float scale_factor = 6.25f, bool dsc_normalize = false );*/";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::VGG::create();
        }
        else if (typeString == "BoostDesc") {
            QString DefaultStr = "@note BGM is the base descriptor where each binary dimension is computed as the output of a single weak learner.  \
                    \nBGM_HARD and BGM_BILINEAR refers to same BGM but use different type of gradient binning. In the BGM_HARD that                 \
                    \nuse ASSIGN_HARD binning type the gradient is assigned to the nearest orientation bin. In the BGM_BILINEAR that use            \
                    \nASSIGN_BILINEAR binning type the gradient is assigned to the two neighbouring bins. In the BGM and all other modes that use   \
                    \nASSIGN_SOFT binning type the gradient is assigned to 8 nearest bins according to the cosine value between the gradient        \
                    \nangle and the bin center. LBGM (alias FP-Boost) is the floating point extension where each dimension is computed              \
                    \nas a linear combination of the weak learner responses. BINBOOST and subvariants are the binary extensions of LBGM             \
                    \nwhere each bit is computed as a thresholded linear combination of a set of weak learners.                                     \
                    \nBoostDesc header files (boostdesc_*.i) was exported from original binaries with export-boostdesc.py script from               \
                    \nsamples subfolder.                                                                                                            \
                    \n                                                                                                                              \
                    \nCV_WRAP enum                                                                                                                  \
                    \n{                                                                                                                             \
                    \n   BGM = 100, BGM_HARD = 101, BGM_BILINEAR = 102, LBGM = 200,                                                                 \
                    \n   BINBOOST_64 = 300, BINBOOST_128 = 301, BINBOOST_256 = 302                                                                  \
                    \n};                                                                                                                            \
                    \n                                                                                                                              \
                    \nCV_WRAP static Ptr<BoostDesc> create( int desc = BoostDesc::BINBOOST_256,                                                     \
                    \n                bool use_scale_orientation = true, float scale_factor = 6.25f )*/;";

            IniPath += typeString;
            if( !tempConfigure.contains(IniPath) )
            {
                tempConfigure.setValue(IniPath, DefaultStr);
            }
            //detector  = cv::xfeatures2d::BoostDesc::create();
        }
        //            else if (typeString == "PCT") {
        //               Ptr<xfeatures2d::PCTSignatures> PctDetector  = cv::xfeatures2d::PCTSignatures::create();
        //               PctDetector->detect(ProcImageMap["InImage"], mKeypoints);
        //            }
        //            else if (typeString == "PCTSQFD") {
        //                Ptr<xfeatures2d::PCTSignaturesSQFD> PctDetector  = cv::xfeatures2d::PCTSignaturesSQFD::create();
        //                PctDetector->detect(ProcImageMap["InImage"], mKeypoints);
        //            }
        else if (typeString == "HarrisLaplace") {

        }
        //            else if (typeString == "Affine") {
        //                Ptr<xfeatures2d::AffineFeature2D> AffineDetector  = cv::xfeatures2d::AffineFeature2D::create(mKeypoints);
        //                AffineDetector->detect(ProcImageMap["InImage"], mKeypoints);
        //            }
        else {
            qDebug() << L"不存在的类型:"<<typeString;
            return ;
        }

        ui->ScriptEdit->setText(tempConfigure.value(IniPath).toString());
    }
        break;

    case engine_Cpp:
    {
        //显示预定义的代码

    }
        break;

    case engine_Lua:
    {
        //显示预定义的代码
    }
        break;

    default:
        break;
    }
}
