#include "germtools.h"

GermTools::GermTools() :
    imgP(new ImageProcessor), nNetwork(0), classifier(0),
    total(0), sum(0), germ(0), isWave(false)
{
}

GermTools::~GermTools()
{
    regions.clear();

    if(imgP) delete imgP;
    if(nNetwork) delete nNetwork;

    imgP = 0;
    nNetwork = 0;
    classifier = 0;
}

/*
 *	This method compared to manualContour detects ROI automatically by
 *	removing handwritten blobs around the edges. It assumes that the
 *	handwritten numbers are bigger than seeds and located around edges.
 *	'src' and 'dst' initially are same. 'runNNFlag' indicates current
 *	mode: training/running.
 *
 */
void GermTools::autoContour(const Mat &src, Mat &dst, bool runFlag)
{
    Mat hsv;
	// vector to store each single-channel image
    vector<Mat> planes;

    // Convert input image to HSV color format and Split HSV to single channel,
    // 'v' is useful for finding radicle and 's' is useful for finding seeds
    cvtColor(src, hsv, CV_BGR2HSV);
    split(hsv, planes);

    // Get ROI from s-channel image
    Rect roi = estimateROI(planes[1]);
	// Apply ROI onto s-channel image
    Mat s = Mat(planes[1], roi);
    Mat v = Mat(planes[2], roi);

    // count number of seeds in SATURATION 'S' channel and get result image
    // with region of seeds being identified
    regions = locateSeeds(s, dst, runFlag);

    // For both training stage and testing stage, calculate feature vector of
    // located seeds for VALUE channel. Each seed is enclosed in a
    // square window of size 64 x 64
    generateData(v, regions, runFlag);
}

/*
 *	This method is used to detect location of possible seeds
 *	'src' image is source image and 'dst' image is a copy of
 *	'src' as well as the result image to be displayed on
 *	'trainingcontrol' GUI.
 *
 *	'nnFlag' indicates whether this process is for Neural Network.
 *	When 'nnFlag = true', neural network has been trained and ready
 *	to run test on unknown images
 *
 */
void GermTools::manualContour(const Mat& src, Mat& dst, bool runFlag)
{
    Mat hsv;
    vector<Mat> planes;

    // Convert input image to HSV color format and Split HSV to single channel,
    // 'v' is useful for finding radicle and 's' is useful for finding seeds
    cvtColor(src, hsv, CV_BGR2HSV);
    split(hsv, planes);

    // count number of seeds in SATURATION 'S' channel and get result image
    // with region of seeds being identified
    regions = locateSeeds(planes[1], dst, runFlag);

    // For both training stage and testing stage, calculate feature vector of
    // located seeds for VALUE channel. Each seed is enclosed in a
    // square window of size 64 x 64
    generateData(planes[2], regions, runFlag);
}
/*
 *	'src_s' image will be original image in SATURATION channel 's'
 *	'src_s' needs to be gray level image with minimum noise
 *	as well as manual ROI has been set.
 *
 *	When this method is done during training stage
 *	'dst_rgb' which is result image to be displayed on GUI and will
 *	have seeds enclosed in square(ROIs) and seed
 *	regions to be identified
 */
QList<SeedNode *> GermTools::locateSeeds(const Mat& src_s, Mat& dst_rgb, bool runFlag)
{
    Mat rgb_copy, dst1, dst2, test, test_resize;
    Mat dist = Mat(src_s.size(), CV_32FC1);
    Mat temp = Mat(src_s.size(), CV_32FC1);
    Mat bin = Mat(src_s.size(), CV_8UC1);

//    Mat con = Mat(src_s.size(), CV_8UC3);
	// Vector of contour points
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
	// List of seeds
    QList<SeedNode *> list;
    // check if input image is of binary format with depth equal to 1
    if(src_s.channels() != 1 || !src_s.data) return list;

    Rect box;
    // Length of single seed skeleton and standard deviation
    double arcL = 0;
    // indicator for seed germination within each region
    int key = 0;
    // total located seeds within image
    total = 0;

    // Make a copy of original RGB image, since dst_rgb is
    // also a copy of original image
    rgb_copy = dst_rgb.clone();

    // for this series of images, threshold value of 80 seems
    // to work well for both dark and light color seeds
    threshold(src_s, bin, 80, 255, CV_THRESH_BINARY);

    // cvThreshold(dst, bin, 120, 255, CV_THRESH_BINARY_INV);
    // Apply Gaussian filtering to remove any pepper noise
    GaussianBlur(bin, dst1, Size(3, 3), 0.5, 0.5);

    // Distance Transform to find seed skeletons
    distanceTransform(dst1, dist, CV_DIST_L2, 3);
    Laplacian(dist, temp, CV_32F, 3);

    threshold(temp, temp, -1, 255, CV_THRESH_BINARY);
    temp.convertTo(dst2, CV_8U, 1, 0);
    // invert binary image
    bitwise_not(dst2, dst2);
    // Based on skeleton image of seeds, find number of contours
    findContours(dst2, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);

    double ratio = 1. / (double)DISPLAY_RATIO;

    /////////////////////////////////////////////////////
    // Training stage of Neural Network//////////////////
    /////////////////////////////////////////////////////
    if(!runFlag)
    {
        namedWindow("training_image");

        for(int idx = 0; idx >= 0; idx = hierarchy[idx][0])
        {
			// Get current arc length and rectangular bounding box
            arcL = arcLength(Mat(contours[idx]), -1);
            box = boundingRect(Mat(contours[idx]));

            // Experimentally found value representing seed skeleton length.
            // For any seed skeleton that shorter than 100 is not considered as
            // seeds
            if(arcL > 100)
            {
                // Reformatted upper left corner
                Point p1 = Point(box.x-16, box.y-16);
                // Reformatted bottom right corner
                Point p2 = Point(box.x+48, box.y+48);

                //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!//
                // display image with bounding box and user                      //
                // need to identify whether the seed radicle is visible or not   //
                // num key 1 value is 49, num key 0 value is 48                  //
                // Pressing 1 for germination and pressing 0 for non-germination //
                // When noise is encountered, pressing 2                         //
                //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!//
                test = rgb_copy.clone();
                rectangle(test, p1, p2, CV_RGB(0, 255, 0), 2);
                resize(test, test_resize, Size(), ratio, ratio);

                imshow("training_image", test_resize);
                key = waitKey(0) - 48;
				// Append the seed node to the end of list
                list.append(new SeedNode(p1, key, arcL));
				// Increment number of seed located
                ++total;
            }
        }
        destroyWindow("training_image");
    }
    /////////////////////////////////////////////////////
    //       Usage stage of classifier                 //
    /////////////////////////////////////////////////////
    else if(runFlag)
    {
        for(int idx = 0; idx >= 0; idx = hierarchy[idx][0])
        {
			// Get arc length and rectangular bounding box
            arcL = arcLength(Mat(contours[idx]), -1);
            box = boundingRect(Mat(contours[idx]));
			// Seeds are located with arc length greater than 100
            if(arcL > 100)
            {
                // Upper left corner
                Point p1 = Point(box.x-16, box.y-16);

                // key = 0
                list.append(new SeedNode(p1, key, arcL));
                ++total;    // total number of seeds
            }
        }
    }
    // 'sum' is total number of seeds in images located so far
    sum += total;

    return list;
}
/*
 *	Based on VALUE channel image from HSV color space plus location of seeds,
 *	calculate texture feature vector based on image pixel value. If wavelet
 *	transform is selected, then generate wavelet coefficients as data for texture
 *	feature.
 */
void GermTools::generateData(const Mat& v, const QList<SeedNode *>& regions, bool runFlag)
{
    if(regions.isEmpty()) return;
	// Based on whether wavelet transform, get co-occurrence probabilities on gray pixel 
	// value or wavelet transform coefficients
    //if(isWave) 
    //    imgP->analyzeWaveletRegion(v, regions, runFlag);    
    //else 
    imgP->analyzeLBPRegion(v, regions, runFlag);
}

/*
 *	Given a gray scale image, this method will estimate top and
 *	bottom blob noises. After that, find region enclosed by top
 *	bottom which contains seeds. Then applied fixed percentage to
 *	crop side edge which may include unneeded noises as well.
 *
 */
Rect GermTools::estimateROI(const Mat &input)
{
    Mat bin;
    vector<int> lowerVec, upperVec;
    // Perform binary transform on input image mat which
    // is 's' channel.
    threshold(input, bin, 80, 255, CV_THRESH_BINARY);

    IplImage ipl = bin;
    CBlobResult blobs;
	// Get blobs using cvblobslib function call
    blobs = CBlobResult(&ipl, NULL, 0);

    Rect box, roi;
    Scalar meanArea, stdArea;
    double avgA=0, stdA=0;
    int numBlobs = blobs.GetNumBlobs();
    int w = input.cols, h = input.rows;

    double min_area=0, max_area=0;
	// Matrix to store area of each blob
    Mat areaMat(1, numBlobs, CV_64FC1);
	// For each located blob
    for(int i = 0; i < numBlobs; i++)
    {
        // Stores area value of located blobs
        areaMat.at<double>(0, i) = blobs.GetBlob(i)->Area();
    }
	// Get average and standard deviation of each blob's area
    meanStdDev(areaMat, meanArea, stdArea);
    avgA = (double) meanArea(0);
    stdA = (double) stdArea(0);

    min_area = avgA - 2. * stdA;
    max_area = avgA + 2. * stdA;
    // Filter out the rest of seeds and only left with bigger blobs such as
    // hand written numbers.
    blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, max_area);
    // Filter out any blobs with width greater than w/2
    blobs.Filter(blobs, B_EXCLUDE, CBlobGetWidth(), B_GREATER_OR_EQUAL, w/2);
    // Filter out any blobs with height greater than h/2
    blobs.Filter(blobs, B_EXCLUDE, CBlobGetHeight(), B_GREATER_OR_EQUAL, h/2);
    // Filter out any blobs with aspect ratio greater than 5 times of input image
    blobs.Filter(blobs, B_EXCLUDE, CBlobGetAspect(), B_GREATER_OR_EQUAL, 5*h/w);

    // Blobs after exclusion
    numBlobs = blobs.GetNumBlobs();
	// Loop through each blob from the rest of blobs that have not been filtered out
    for(int i = 0; i < numBlobs; i++)
    {
		// Get bounding box of each blob
        box = blobs.GetBlob(i)->GetBoundingBox();
        // Separate blobs into lower and upper regions
        if(box.y < h/2)                      //upper regions
        {
            upperVec.push_back(box.y+box.height);
        }
        else if(box.y > h/2 && box.y < h)    // lower regions
        {
            lowerVec.push_back(box.y);
        }
    }
	// Get number of blobs from lower and upper regions
	int l = lowerVec.size(), u = upperVec.size();
    int top=0, bottom=h;

    // For lower regions blobs, find out smallest upper edge
    for(int i = 0; i < l; i++)
        if(lowerVec[i] < bottom) bottom = lowerVec[i];
    // For upper regions blobs, find out largest lower edge
    for(int i = 0; i < u; i++)
        if(upperVec[i] > top) top = upperVec[i];

    // New region formed by rectangle of top and bottom
    roi = Rect(w/20, top, 90*w/100, bottom-top);
	// Clear memory
    blobs.ClearBlobs();
    return roi;
}

/*
 *  Load  training set data file and feed to NN to train neural network
 *  Currently, number of output neuron can be only 1, since output is
 *  either 1 for germination or 0 for non-germination. 'File' is user-selected
 *  data file for training
 *
 */
bool GermTools::startTraining(QString &file, int method, int percent, int size, int step, int max, int hidden)
{
    // create data set reader
    dataReader d;
    // Number of actual features
    int ft = imgP->getDim();
    // Logging file for accumulative accuracy
    QString logfile = "./output/";
    QDateTime now = QDateTime::currentDateTime();

    // load data file with number of input features and target
    // Number of target is normally same as number of output
    if(d.loadDataFile(file.toStdString().c_str(), ft, 1))
    {
        if(method == 1)
        {
            d.setCreationApproach( STATIC );
            logfile = logfile.append("static");
        }
        else if(method == 2)
        {
            d.setCreationApproach( GROWING, percent );
            logfile = logfile.append("growing_" + QString::number(percent));
        }
        else if(method == 3)
        {
            d.setCreationApproach( WINDOWING, size, step );
            logfile = logfile.append("window_" + QString::number(size) + "_" +
                                     QString::number(step));
        }

        if(nNetwork) delete nNetwork;
        // create neural network, params are number of input, hidden neurons. Number
        // of output neuron is 1 by default
        nNetwork = new neuralNetwork(ft, hidden, 1);

        // create neural network trainer
        neuralNetworkTrainer nT( nNetwork );

        // nT.setTrainingParameters(0.01, 0.8, false);
        // Learning rate, momentum, useBatch flag
        nT.setTrainingParameters(0.001, 0.8, false);
        // Max number of epochs neural network will run and desired accuracy
        nT.setStoppingConditions(max, 90);
        // Append max number of epochs, number of hidden neurons, day and tim
        // to logging file name
        logfile = logfile.append("_m" + QString::number(max)
                                 + "_h" + QString::number(hidden)
                                 + "_" + QString::number(now.date().day())
                                 + "_" + QString::number(now.time().hour())
                                 + "_traininglogging.csv");
        // Logging file name and print out every 5 epochs
        nT.enableLogging(logfile.toStdString().c_str(), 5);

        // train neural network on data sets
        for (int i=0; i < d.getNumTrainingSets(); i++ )
        {
            nT.trainNetwork( d.getTrainingDataSet() );
        }

        //save the weights of both hidden layer and output layer
        nNetwork->saveWeights("./output/NN_weights.csv");

        return true;
    }

    return false;
}

/*
 *	Load weights for hidden neurons and output neurons. Then initialize
 *	neural network with number of hidden neurons and output neurons
 *
 */
bool GermTools::loadWeights(const QString &file, int hidden)
{
    if(nNetwork) delete nNetwork;
	// Number of dimensions of feature vector
    int ft = imgP->getDim();

    nNetwork = new neuralNetwork(ft, hidden, 1);
    return nNetwork->loadWeights(file.toStdString().c_str());
}

/*
 *  Given any input image 'nnImg' as well as located seed regions,
 *  using trained/preloaded neural network to classify each seed as
 *  germinated or non-germinated.
 *
 */
void GermTools::runNeuralNetwork(Mat &nnImg)
{

    // Get data points for current image
    double** dPts = imgP->getDataPts();
    int* output = 0;

    if(nNetwork)
    {        
        // Loop through current total number seeds in displaying image
        for(int i = 0; i < total; i++)
        {
            // Output result from trained neural network
            output = new int[1];
            output = nNetwork->feedForwardPattern(dPts[i]);
            regions.at(i)->val = output[0];
            // Release memory
            delete [] output;
        }
        drawOutput(nnImg);
    }
}

void GermTools::runSVM(Mat &svmImg)
{
    // Get data points for current image
    double** dPts = imgP->getDataPts();
    int dim = imgP->getDim();

    if(classifier)
    {
        classifier->classify(dPts, regions, 0, dim);
        // With classified result, draw the result
        drawOutput(svmImg);
    }
}

void GermTools::runMLP(Mat &mlpImg)
{
    // Get data points for current image
    double** dPts = imgP->getDataPts();
    int dim = imgP->getDim();

    if(classifier)
    {
        classifier->classify(dPts, regions, 1, dim);
        // With classified result, draw the result
        drawOutput(mlpImg);
    }
}

void GermTools::runBoost(Mat &boostImg)
{
    // Get data points for current image
    double** dPts = imgP->getDataPts();
    int dim = imgP->getDim();

    if(classifier)
    {
        classifier->classify(dPts, regions, 2, dim);
        // With classified result, draw the result
        drawOutput(boostImg);
    }
}

void GermTools::drawOutput(Mat &img)
{
    // @germ is number of classified germination in
    // current image.
    // Reset number of germinated seeds
    germ = 0;
    for(int k = 0; k < total; k++)
    {
        // Top-left corner of seed region
        Point p1 = (Point)regions.at(k)->pos;
        // Lower-right corner of seed region
        Point p2 = Point(p1.x+64, p1.y+64);
        // If output from neural network is 1, seed is germinated and enclose it
        // with green rectangle; if output is 0, seed is not germninated and
        // enclose it with red rectangle
        if(regions.at(k)->val == 1)
        {
            rectangle(img, p1, p2, CV_RGB(0, 255, 0), 2);
            germ++;
        }
        else
        {
            rectangle(img, p1, p2, CV_RGB(255, 0, 0), 2);
        }
    }
}

void GermTools::addClassifier(Classifier *c)
{
    classifier = c;
}

void GermTools::setWavelet(bool checked)
{
    this->isWave = checked;
    imgP->setWavelet(checked);
}
