
#define LOG_TAG "[GenderPlugin]"
#define _DEBUG_OUTPUT_ true

#include <opencv2/opencv.hpp>
#include "../BaseData.h"
#include "../GetFace.h"
#include <fstream>

using namespace std;
using namespace cv;

class GenderPlugin : public FaceMatProcessor {
    public:
        virtual int process(cv::Mat &, std::vector<std::string> &args);
        virtual int processOriginal(cv::Mat &orig, std::vector<std::string> &args);

    private:
        Mat norm_0_255(InputArray _src);
        void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';');
        int recog(Mat &faceImg, const string &train_dir);
        static bool isTrained;  //=false
        static Ptr<FaceRecognizer> model; 
        static int  male_static_count;
        static int  female_static_count;
};

bool GenderPlugin::isTrained = false;
int  GenderPlugin::male_static_count = 0;
int  GenderPlugin::female_static_count = 0;
Ptr<FaceRecognizer> GenderPlugin::model = NULL; 

extern "C" FaceMatProcessor *plugin_create(void ) {
    return new GenderPlugin();
}

extern "C" void plugin_destroy(FaceMatProcessor *p) {
    if(NULL != p) {
        delete p;
    }
}

int GenderPlugin::process(Mat &img, vector<string> &callback_args) {
    if(callback_args.size() < 1) {
        edebug("at least need training_csv_file\n");
        return -1;
    }

    Mat resizedImg(30, 30, img.type());
    Mat resizedGray(30, 30, CV_8UC1);
    resize(img, resizedImg, Size(30, 30));
    if(img.channels() >= 3) {
        cvtColor(resizedImg, resizedGray, CV_BGR2GRAY);
    } else {
        resizedGray = resizedImg;
    }
    //cvNamedWindow("face");
    //imshow("face", resizedGray);
    return recog(resizedGray, callback_args[0]);
}

int GenderPlugin::processOriginal(cv::Mat &orig, std::vector<std::string> &args) {
    //no need to do anything currently
    return 0;
}

Mat GenderPlugin::norm_0_255(InputArray _src) {
    Mat src = _src.getMat();
    // Create and return normalized image:
    Mat dst;
    switch(src.channels()) {
    case 1:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
        break;
    case 3:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
        break;
    default:
        src.copyTo(dst);
        break;
    }
    return dst;
}

void GenderPlugin::read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator) {
    std::ifstream file(filename.c_str(), ifstream::in);
    if (!file) {
        string error_message = "No valid input file was given, please check the given filename.";
        CV_Error(CV_StsBadArg, error_message);
    }
    string line, path, classlabel;
    while (getline(file, line)) {
        stringstream liness(line);
        getline(liness, path, separator);
        getline(liness, classlabel);
        if(!path.empty() && !classlabel.empty()) {
            Mat readedImg = imread(path, 0);

            Size sz = readedImg.size();
            cdebug("readedImg(%s) size=%dx%d, channels=%d, isContinuous=%s\n", 
                path.c_str(), sz.width, sz.height, readedImg.channels(), 
                readedImg.isContinuous() ? "true" : "false");

            images.push_back(readedImg);
            labels.push_back(atoi(classlabel.c_str()));
        }
    }
}

int GenderPlugin::recog(Mat &faceImg, const string &train_dir){

    if(!isTrained) {

        cdebug("isTrained = false\n");
        vector<Mat> images;
        vector<int> labels;
        // Get the path to your CSV.
        string fn_csv = train_dir;
        // These vectors hold the images and corresponding labels.
        // Read in the data. This can fail if no valid
        // input filename is given.
        try {
            read_csv(fn_csv, images, labels);
        } catch (cv::Exception& e) {
            cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
            // nothing more we can do
            exit(1);
        }
        // Quit if there are not enough images for this demo.
        if(images.size() <= 1) {
            string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
            CV_Error(CV_StsError, error_message);
        }
        // Get the height from the first image. We'll need this
        // later in code to reshape the images to their original
        // size:
        //int height = images[0].rows;
        // The following lines simply get the last images from
        // your dataset and remove it from the vector. This is
        // done, so that the training data (which we learn the
        // cv::FaceRecognizer on) and the test data we test
        // the model with, do not overlap.

        /*
        Mat testSample = images[images.size() - 1];
        int testLabel = labels[labels.size() - 1];
        images.pop_back();
        labels.pop_back();
        */


        // The following lines create an Fisherfaces model for
        // face recognition and train it with the images and
        // labels read from the given CSV file.
        // If you just want to keep 10 Fisherfaces, then call
        // the factory method like this:
        //
        //      cv::createFisherFaceRecognizer(10);
        //
        // However it is not useful to discard Fisherfaces! Please
        // always try to use _all_ available Fisherfaces for
        // classification.
        //
        // If you want to create a FaceRecognizer with a
        // confidence threshold (e.g. 123.0) and use _all_
        // Fisherfaces, then call it with:
        //
        //      cv::createFisherFaceRecognizer(0, 123.0);
        //
        model = createFisherFaceRecognizer();
        model -> train(images, labels);
        isTrained = true;

        model -> save("/tmp/sGenderData.xml");

        /*
        ifstream  ifs;
        ifs.open("/tmp/sGenderData.xml");
        if(!ifs) { //cant open
            cout << "open /tmp/sGenderData.xml failed!!!" << endl;
            ifs.close();
            return -1;
        } else {
            ifs.close();
            model->load("/tmp/sGenderData.xml");
            isTrained = true;
        }
        */
    }
    // The following line predicts the label of a given
    // test image:
    int predictedLabel = model->predict(faceImg);
    stringstream ss;
    ss << "/tmp/result/" << (predictedLabel == 0 ? "Male" : "Female") 
       << "__" << (predictedLabel == 0 ? male_static_count ++ : female_static_count ++) << ".bmp";
    imwrite(ss.str(), faceImg);
    
    //
    // To get the confidence of a prediction call the model with:
    //
    //      int predictedLabel = -1;
    //      double confidence = 0.0;
    //      model->predict(testSample, predictedLabel, confidence);
    //
    cdebug("Predicted class = %d, seems %s\n", predictedLabel , predictedLabel == 0 ? "男" : "女");
    
    /*

    // Here is how to get the eigenvalues of this Eigenfaces model:
    Mat eigenvalues = model->getMat("eigenvalues");
    // And we can do the same to display the Eigenvectors (read Eigenfaces):
    Mat W = model->getMat("eigenvectors");
    // Get the sample mean from the training data
    Mat mean = model->getMat("mean");

    */
    
    /*

    // Display or save:
    if(argc == 2) {
        imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
    } else {
        imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
    }
    // Display or save the first, at most 16 Fisherfaces:
    for (int i = 0; i < min(16, W.cols); i++) {
        string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
        cout << msg << endl;
        // get eigenvector #i
        Mat ev = W.col(i).clone();
        // Reshape to original size & normalize to [0...255] for imshow.
        Mat grayscale = norm_0_255(ev.reshape(1, height));
        // Show the image & apply a Bone colormap for better sensing.
        Mat cgrayscale;
        applyColorMap(grayscale, cgrayscale, COLORMAP_BONE);
        // Display or save:
        if(argc == 2) {
            imshow(format("fisherface_%d", i), cgrayscale);
        } else {
            imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
        }
    }
    // Display or save the image reconstruction at some predefined steps:
    for(int num_component = 0; num_component < min(16, W.cols); num_component++) {
        // Slice the Fisherface from the model:
        Mat ev = W.col(num_component);
        Mat projection = subspaceProject(ev, mean, images[0].reshape(1,1));
        Mat reconstruction = subspaceReconstruct(ev, mean, projection);
        // Normalize the result:
        reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
        // Display or save:
        if(argc == 2) {
            imshow(format("fisherface_reconstruction_%d", num_component), reconstruction);
        } else {
            imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction);
        }
    }
    */

    // Display if we are not writing to an output folder:
    if(waitKey(5) == 27) {
        return -10086;
    }

    return predictedLabel;
}
