#include <jni.h>

#include <opencv2/opencv.hpp>
#include <opencv2/legacy/legacy.hpp>

#include <iostream>                                                                                                                                                                                 
#include <fstream>
#include <string>
#include <utility>
#include <vector>
#include <map>

#include "face_detect.h"

//for dir_operations()
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <stddef.h>


using namespace cv;
using namespace std;

#define LOG_TAG "GrayCamera"
#include "tools.h"

#define WORKING_DIR  "/sdcard/.facerecog"                                                                                                                                                            
#define FACES_DIR    "faces"
#define DEBUG_DIR    "debug"

#define CASCADE_FILE "haarcascade_frontalface_alt2.xml"
#define FACEDB_FILE  "facedata.xml"
#define NAME_FILE   "names.txt"
#define FACE_BASE_NAME "orl_faces_base_"

#define DEBUG 1

#ifndef isnan
inline bool isnan(float x) {
    return x != x;
}
#endif

string workingDir =  WORKING_DIR; 
string facesDir    = workingDir  + "/" + FACES_DIR;
string cascadeFile = workingDir  + "/" + CASCADE_FILE;
string facesdbFile = workingDir  + "/" + FACEDB_FILE;
string nameFile    = workingDir  + "/" + NAME_FILE;   
string debugDir    = workingDir  + "/" + DEBUG_DIR;
string facesBaseName(FACE_BASE_NAME);

CvHaarClassifierCascade* faceCascade = NULL;

const char *name_unknown = "unknown"; 

const float IMG_DEFAULT_BIG = 176.0;
const float IMG_DEFAULT_SMALL = 144.0;
const float IMG_DEFAULT_RATIO = IMG_DEFAULT_BIG / IMG_DEFAULT_SMALL;

// simple container class
class Eigenface {
    public:
        int nTrainFaces;                // the number of training images
        int nEigens;                    // the number of eigenvalues
        IplImage* pAvgTrainImg;         // the average image
        vector<IplImage*> eigenVectVec; // eigenvectors
        CvMat* eigenValMat;             // eigenvalues
        CvMat* projectedTrainFaceMat;   // projected training faces
};      



extern "C" {

    /*
     * Load face cascadeclassifer file
     * 
     *@return: true if success, else false
     */
    JNIEXPORT jboolean JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_init( JNIEnv* env, jobject obj);

    /*
     * @param recog: 
     *              true to do recognize; false not to recog
     * @return: 
     *              NULL if not detect face
     *              or a String of (founded name) or (name_unknown)
     */
    JNIEXPORT jstring JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_hasHead( JNIEnv* env, jobject obj, jintArray buf, int w, int h, jboolean recog);

    JNIEXPORT jintArray JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_saveHead( JNIEnv* env, jobject obj, jintArray buf, int w, int h, jstring people);

    JNIEXPORT jboolean JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_train(JNIEnv *env, jobject obj);

    JNIEXPORT jboolean JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_reset(JNIEnv *env, jobject obj);

    JNIEXPORT jstring JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_getAllNames(JNIEnv *env, jobject obj);

    JNIEXPORT jstring JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_getNameUnknown(JNIEnv *env, jobject obj);

    JNIEXPORT jstring JNICALL
        Java_com_finalx_GrayCamera_GrayCameraJni_getWorkingDirectory(JNIEnv *env, jobject obj);

}

void doPCA(Eigenface& data, const vector<IplImage*>& faceImgVec);
char *doRecognize(IplImage *faceImg, char *founded_name);
int findNearestNeighbor(const Eigenface& data, float * projectedTestFace, const vector<string> &names);
string getFolder(const string& filename);
vector<string> getNameList (const string& dir);
bool init();
bool learn(Eigenface& data, const vector<IplImage*>& faceImgVec);                                                                                                                                 
vector<string> listDir(const string &dir);
vector< vector <string> > listSubdir (const string & dir);
int loadFaceImgVector(vector<IplImage*>& faceImgVec, const string& dir);
vector< string > loadNameFile(const string& filename );
int loadTrainingData(Eigenface& data, const string& filename);
int make_people_dir(const char *name);
void optimal_size(int in[], int out[]);
void storeTrainingData(const Eigenface& data, const string& filename);
int writeMatrix(CvMat* mat, const string& filename);
void writeNameFile(const string& filename, const vector< vector <string> >& subFiles);
IplImage *resizeImage(const IplImage *srcImg);
void myCvReleaseImage(IplImage **img);

JNIEXPORT jstring JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_getWorkingDirectory(JNIEnv *env, jobject obj)
{
    return env->NewStringUTF(workingDir.c_str());
}

JNIEXPORT jstring JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_getNameUnknown(JNIEnv *env, jobject obj){

    return env->NewStringUTF(name_unknown);
}

JNIEXPORT jstring JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_hasHead( JNIEnv* env, jobject obj, jintArray buf, int w, int h, jboolean recog)
{
    jint *cbuf = env->GetIntArrayElements(buf, false);
    if (cbuf == NULL ) {
        LOGE("GrayCamera", "WARN :cbuf = NULL");
        return NULL;
    }

    DURATION_START;

    Mat srcImgMat(h, w, CV_8UC4, (unsigned char *)cbuf);
    IplImage *srcImg = new IplImage(srcImgMat);
    IplImage *dstImg = resizeImage(srcImg);

    LOGD("Passed to native: (%dx%d) with depth:%d channel:%d --> After Pre-process: (%dx%d) depth:%d channel:%d",
            srcImg->width, srcImg->height, srcImg->depth, srcImg->nChannels,
            dstImg->width, dstImg->height, dstImg->depth, dstImg->nChannels);
    DURATION_STOP("pre-process image ");

    delete srcImg;
    srcImg = dstImg;
    IplImage *faceImg = NULL;

    DURATION_START;

    bool res = face_detect(srcImg, &faceImg, faceCascade, false);

    myCvReleaseImage( &srcImg);

    DURATION_STOP("face detectation");

    env->ReleaseIntArrayElements(buf, cbuf, 0);

    if( NULL == faceImg || !res) {
        return NULL;
    }

    if (recog == JNI_FALSE) {
        myCvReleaseImage(&faceImg);
        return env->NewStringUTF(name_unknown);
    }

    DURATION_START;
    char name_founded[128];
    strcpy(name_founded, name_unknown);
    char *recRes = NULL;

    if(access(facesdbFile.c_str(), 0) == 0 && access(nameFile.c_str(), 0) == 0 ) {
        recRes = doRecognize(faceImg, name_founded);
        DURATION_STOP("FACE RECOGNIZE ");
        myCvReleaseImage(&faceImg);

        if( NULL == recRes) {
            return env->NewStringUTF(name_unknown);
        } else {
            return env->NewStringUTF(name_founded);
        }
    }

#if 0
    //test ++  using original file 
    IplImage *testImg1 = cvLoadImage("/sdcard/.opencv-facerecog/faces/stanford/stanford_0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
    if( NULL == doRecognize(testImg1, name_founded)) {
        return env->NewStringUTF(name_unknown);
    } else {
        return env->NewStringUTF(name_founded);
    }

    //test --
#endif
    return env->NewStringUTF(name_unknown);
}

void myCvReleaseImage(IplImage **img)
{
    cvReleaseImage(img);
    *img = NULL;
}

IplImage * resizeImage(const IplImage *srcImg) 
{
    int origSize[2], smallSize[2];
    origSize[0] = srcImg->width; origSize[1] = srcImg->height;
    //    LOGD("src: %d x %d", origSize[0], origSize[1]);
    optimal_size(origSize, smallSize);

    IplImage *tmp2 = cvCreateImage(cvSize(smallSize[0], smallSize[1]), srcImg->depth, srcImg->nChannels);
    IplImage *tmp1 = cvCreateImage(cvSize(smallSize[0], smallSize[1]), IPL_DEPTH_8U, 1);

    //resize
    cvResize(srcImg, tmp2, CV_INTER_CUBIC);
    //cvt color
    cvCvtColor(tmp2, tmp1, CV_BGR2GRAY);

    myCvReleaseImage(&tmp2);
    return (tmp1);
}


JNIEXPORT jintArray JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_saveHead( JNIEnv* env, jobject obj, jintArray buf, int w, int h, jstring people) 
{

    jint *cbuf;
    const char *name;
    name = env->GetStringUTFChars(people, NULL);

    cbuf = env->GetIntArrayElements(buf, false);
    if (cbuf == NULL) {
        return 0;
    }

    Mat srcImgMat(h, w, CV_8UC4, (unsigned char *)cbuf);
    IplImage *srcImg = new IplImage(srcImgMat);
    IplImage *dstImg = resizeImage(srcImg);
    delete srcImg;
    srcImg = dstImg;

    //cvSaveImage("/sdcard/abc_230.bmp", srcImg);
    IplImage *faceImg = NULL;

    string file_name;
    char index_str[16];
    for(int i = 0; ; i++) {
        sprintf(index_str, "%d", i);
        file_name = facesDir + "/" + name + "/" + name + "_" + index_str + ".bmp";
        if(access(file_name.c_str(), 0) != 0) {
            break;
        }
    }

    int size = 0;
    if( face_detect(srcImg, &faceImg, faceCascade, false)) {
        LOGD("file name calcatued: %s", file_name.c_str());

        make_people_dir(name);
        cvSaveImage(file_name.c_str(), faceImg);

        myCvReleaseImage(&faceImg);

        size = w * h;
    } else {
        //not found face, return an array int[len=1]
        size = 1;
    }
    myCvReleaseImage (&srcImg);

    jintArray result = env->NewIntArray(size);
    env->SetIntArrayRegion(result, 0, size, cbuf);
    env->ReleaseIntArrayElements(buf, cbuf, 0);

    env->ReleaseStringUTFChars(people, name);

    return result;
}

JNIEXPORT jstring JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_getAllNames(JNIEnv *env, jobject obj)
{
    vector <string> nameList = listDir(facesDir);
    vector <string> filesEachName;

    string ret("");
    char num_str[16];

    for(int i=0; i < nameList.size(); i++){
        filesEachName = listDir(nameList[i]);
        if(filesEachName.size() > 0) {
            if(getFolder(filesEachName[0]).find(facesBaseName) != 0){
                sprintf(num_str, "%d", filesEachName.size());
                ret += getFolder(filesEachName[0]) + "=" + num_str + ";";
            }
        }
    }

    return env->NewStringUTF(ret.c_str());
}

JNIEXPORT jboolean JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_train(JNIEnv *env, jobject obj)
{
    Eigenface data;               // feature container
    vector<IplImage*> faceImgVec; // vector of face images      

    // create face listings
    vector< vector<string> > subFiles = listSubdir(facesDir);

    LOGD("subFiles: +++++++++");
    for(int i = 0; i < subFiles.size(); i ++ ) {
        for (int j = 0; j < subFiles[i].size(); j ++ ) {
            LOGD("%s", subFiles[i][j].c_str());
        }
    }
    LOGD("subFiles: ----------");

    writeNameFile(nameFile, subFiles);

    // load
    data.nTrainFaces = loadFaceImgVector(faceImgVec, facesDir);

    if (data.nTrainFaces > 0) {
        // learn
        if (learn(data, faceImgVec)) {
            // store the recognition data as an xml file
            storeTrainingData(data, facesdbFile);
            return JNI_TRUE;
        } else {
            //cout<< "Error: Could not build faces database" << endl;
            return JNI_FALSE;
        }
    } else {
        //writeLog(__LINE__, "No faces in " + facesDir);
        return JNI_FALSE;
    }
}

JNIEXPORT jboolean JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_init( JNIEnv* env, jobject obj)
{
    if(init()) {
        return JNI_TRUE;
    } else 
        return JNI_FALSE;
}

JNIEXPORT jboolean JNICALL Java_com_finalx_GrayCamera_GrayCameraJni_reset(JNIEnv *env, jobject obj)
{
    char cmd_buf[256];
    sprintf(cmd_buf, "rm -r %s/face*", workingDir.c_str());
    LOGD("reset cmd[0]:%s", cmd_buf);
    system(cmd_buf);
    sprintf(cmd_buf, "rm -r %s", nameFile.c_str());
    LOGD("reset cmd[1]:%s", cmd_buf);
    system(cmd_buf);

    mkdir(workingDir.c_str(), 0755);
    mkdir(facesDir.c_str(), 0755);

    return Java_com_finalx_GrayCamera_GrayCameraJni_init( env, obj);
}

int make_people_dir(const char *name){
    string dir_name = facesDir + "/" +  name;
    return mkdir(dir_name.c_str(), 0755);
}

jint JNI_OnLoad(JavaVM* vm, void* reserved){  
    LOGD("JNI_OnLoad startup~~!");  

    mkdir(workingDir.c_str(), 0755);
    mkdir(facesDir.c_str(), 0755);

    return JNI_VERSION_1_6;  
}     

bool init()
{
    // Create a new Haar classifier
    if (faceCascade == NULL) {
        faceCascade = (CvHaarClassifierCascade*)cvLoad( cascadeFile.c_str(), 0, 0, 0 );
    }
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !faceCascade )
    {
        LOGE("can't load cascade file %s", cascadeFile.c_str());
        return false;
    }

    return true;
}

vector<string> listDir(const string &dir) 
{
    DIR* dp;
    struct dirent* ep;
    vector<string> files;
    string tmp;

    //__android_log_print(ANDROID_LOG_DEBUG, "GrayCamera", "%s: arg dir: %s", __func__, dir.c_str());
    dp = opendir(dir.c_str());                                                                                                                                                                             
    if (dp != NULL){   
        while (ep = readdir(dp)) {
            if(0 == strcmp(ep->d_name, ".") || 0 == strcmp(ep->d_name, "..")) {
                continue;
            }

            tmp = dir + "/" + ep->d_name;
            files.push_back(tmp);

        }
        closedir(dp);
    }   

    //LOGD("RUN TO HERE :%d", __LINE__);
    return files;
}

// returns an array of the dirs content
vector< vector <string> > listSubdir (const string & dir) {
    //LOGD("%s: arg dir: %s", __func__, dir.c_str());

    vector< vector <string> > subFiles;
    vector<string> files = listDir (dir);

    for ( int i = 0; i < files.size(); i++ ) {
        subFiles.push_back(listDir (files[i]));
    }
    return subFiles;
}

// writes file with names
void writeNameFile(const string& filename, const vector< vector <string> >& subFiles) {
    if (filename.size() != 0) {
        ofstream out( filename.c_str() );   // Open file for writing
        if ( out.is_open() ) {
            for ( int i = 0; i < subFiles.size(); i++ ) {
                for ( int j = 0; j < subFiles[i].size(); j++ ) {
                    out << getFolder(subFiles[i][j]) << "\n";
                    LOGD("write to %s,content:%s", filename.c_str(), getFolder(subFiles[i][j]).c_str());
                }
            }
            out.close();
        } else {
            //writeLog(__LINE__, "Unable to open " + filename);
        }
        //writeLog(__LINE__, filename + " written");
    } else {
        //writeLog(__LINE__, "Size of filename is zero");
    }
} 

// returns relative folder name       
string getFolder(const string& filename) {
    string ret;
    string before_last_slash;

    int pos_last_slash = filename.find_last_of('/');
    before_last_slash = filename.substr(0, pos_last_slash);

    pos_last_slash = before_last_slash.find_last_of('/');
    ret = before_last_slash.substr(pos_last_slash + 1);

    return ret;
}       

// returns name look-up list                                                                                                                                                                        
vector<string> getNameList (const string& dir) {
    vector<string> list;
    LOGD("getnamelist ++++");
    vector<string> subDirs = listDir(dir);
    vector<string> tmp;

    for ( int i = 0; i < subDirs.size(); i++ ) {
        tmp = listDir(subDirs[i]);
        for ( int j = 0; j < tmp.size(); j++ ) {
            list.push_back(tmp[j]);
        }
    }
    LOGD("getnamelist ----");
    return list;
}


int loadFaceImgVector(vector<IplImage*>& faceImgVec, const string& dir) {
    int nFaces = 0;
    vector< vector <string> > subFiles = listSubdir(dir);

    // count faces
    for (int i = 0; i<subFiles.size(); i++ ) {
        nFaces += subFiles[i].size();
        LOGD("%s, founding face: total %d ", __func__, nFaces);
        //if (DEBUG) writeLog(__LINE__, "nFace: " + lexical_cast<string>( i->size()));
    }
    //writeLog(__LINE__, "nFaces: " + lexical_cast<string>(nFaces));
    if (nFaces == 0) return 0;

    faceImgVec = vector<IplImage*>(nFaces);

    // store the face images in an array
    int count = 0;
    for ( int i = 0; i < subFiles.size(); i++ ) {
        for ( int j = 0; j < subFiles[i].size(); j++ ) {
            faceImgVec[count] = cvLoadImage((char*)(subFiles[i][j]).c_str(), CV_LOAD_IMAGE_GRAYSCALE);
            if ( !faceImgVec[count] ) {
                // writeLog(__LINE__, "Can't load image from " + (*j));
                return 0;
            }
            LOGD("load face image: %s to faceImgVec[%d]", subFiles[i][j].c_str(), count);
            count++;
        }
    }

    return nFaces;
}  

void doPCA(Eigenface& data, const vector<IplImage*>& faceImgVec) {
    // set the number of eigenvalues to use
    data.nEigens = data.nTrainFaces-1;

    // allocate the eigenvector images
    CvSize faceImgSize;
    faceImgSize.width  = faceImgVec[0]->width;
    faceImgSize.height = faceImgVec[0]->height;
    data.eigenVectVec = vector<IplImage*>(data.nEigens);
    for (int i=0; i<data.nEigens; i++) {
        data.eigenVectVec[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);

    }

    data.eigenValMat = cvCreateMat( 1, data.nEigens, CV_32FC1 );
    data.pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
    CvTermCriteria calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, data.nEigens, 1);


#if 0
    unsigned int count = 0;
    IplImage* faceImgArray[faceImgVec.size()]; //malloc (faceImgVec.size());
    for ( count =0; count < faceImgVec.size(); count ++ ) { 
        faceImgArray[count] = faceImgVec[count];
    }   

    IplImage* eignVectArray[data.eigenVectVec.size()];
    for ( count = 0; count < data.eigenVectVec.size();count ++ ) { 
        eignVectArray[count] = data.eigenVectVec[count];                                                                                                                                                
    }   

    // compute average image, eigenvalues, and eigenvectors
    cvCalcEigenObjects( data.nTrainFaces ,           // Number of source objects. 
            (void*) (faceImgArray),                 // Array of IplImage input objects
            (void*) (eignVectArray),                    // Array of eigen objects
            CV_EIGOBJ_NO_CALLBACK,                  // I/O flags
            0, 0,                                   // I/O buffer, userData
            &calcLimit,                             // Criteria that determine when to stop calculation of eigen objects
            data.pAvgTrainImg,                      // Averaged object.
            data.eigenValMat->data.fl);             // Pointer to the eigenvalues array

#endif
    // compute average image, eigenvalues, and eigenvectors

    cvCalcEigenObjects( data.nTrainFaces,                       // Number of source objects. 
            (void*)&(*(faceImgVec.begin())),        // Array of IplImage input objects
            (void*)&(*(data.eigenVectVec.begin())), // Array of eigen objects
            CV_EIGOBJ_NO_CALLBACK,                  // I/O flags
            0, 0,                                   // I/O buffer, userData
            &calcLimit,                             // Criteria that determine when to stop calculation of eigen objects
            data.pAvgTrainImg,                      // Averaged object.
            data.eigenValMat ->data.fl);            // Pointer to the eigenvalues array
    for(int i = 0; i < data.eigenValMat->rows; i ++) {
        for(int j =0; j < data.eigenValMat->cols; j ++) {
            LOGD("eigenValMat[%d][%d] = %f", i, j, *((float *)((unsigned char *)(data.eigenValMat->data.fl) + (data.eigenValMat->step * i + (sizeof (float *)) *j))));
        }
    }

    /*
       char varname[256];
       for (int i=0; i < data.nTrainFaces; i ++) {
       sprintf(varname, "/sdcard/tmp/file_%d.bmp", i);
       cvSaveImage(varname, faceImgVec[i]);
       }

       for ( count = 0; count < data.eigenVectVec.size();count ++ ) { 
       eignArray[count] = data.eigenVectVec[count];                                                                                                                                                
       }   
       */

    cvNormalize(data.eigenValMat, data.eigenValMat, 1, 0, CV_L1, 0);
} 

// modified C functions from Robin Hewitt
bool learn(Eigenface& data, const vector<IplImage*>& faceImgVec) {                                                                                                                                  
    // enough faces?
    if ( data.nTrainFaces < 3 ) { 
        LOGE("data.nTranFace: %d < 3", data.nTrainFaces);
        //writeLog(__LINE__, "Need 2 or more training faces.");
        //writeLog(__LINE__, "Input file contains only " + lexical_cast<string>(data.nTrainFaces));
        return false;
    }   

    // do PCA on the training faces
    doPCA(data, faceImgVec);

    // project the training images onto the PCA subspace
    data.projectedTrainFaceMat = cvCreateMat( data.nTrainFaces, data.nEigens, CV_32FC1 );
    int offset = data.projectedTrainFaceMat->step / sizeof(float);
    LOGD("line<%d> offset = %d\n", __LINE__, offset);
    for (int i=0; i<data.nTrainFaces; i++) {
        cvEigenDecomposite(faceImgVec[i],                                       // Input object
                data.nEigens,                                        // Number of eigen objects. 
                &(*(data.eigenVectVec.begin())),                     // Array of IplImage input objects
                0, 0,                                                // I/O flags, userData
                data.pAvgTrainImg,                                   // Averaged object
                //projectedTrainFaceMat->data.fl + i*nEigens);
            (float *) (data.projectedTrainFaceMat->data.fl + i*offset));     // Calculated coefficients
        LOGD("projectedTrainFaceMat ... = %lf\n",  * ((float *) (((unsigned char *) data.projectedTrainFaceMat->data.fl) + i*offset)));

    }   

    // check for double faces
    if(DEBUG){
        for (int i = 0; i< data.nEigens; i++) {
            //writeLog(__LINE__, "*(data.projectedTrainFaceMat->data.fl + " + lexical_cast<string>(i) 
            //       + "*offset) = " + lexical_cast<string>(*(data.projectedTrainFaceMat->data.fl + i*offset)));
        }   
    }   
#if 0
    map<float, int> projVals;
    for (int i = 0; i< data.nEigens; i++) {
        projVals[*(data.projectedTrainFaceMat->data.fl + i*offset)]++;
    }   

    for( int i = 0; i < projVals.size(); i ++) {
        if(projVals[i].second > 1){
            // writeLog(__LINE__, "Error: Database contains probably one face two times: " + lexical_cast<string>(i->first));
            __android_log_print(ANDROID_LOG_DEBUG, "GrayCamera", "ERROR: database contains probably one face two times");
            return false;
        }
    }
#endif

    return true;
}

void storeTrainingData(const Eigenface& data, const string& filename) {
    // create a file-storage interface
    CvFileStorage * fileStorage = cvOpenFileStorage( filename.c_str(), 0, CV_STORAGE_WRITE );

    // store all the data
    cvWriteInt( fileStorage, "nEigens",               data.nEigens );
    cvWriteInt( fileStorage, "nTrainFaces",           data.nTrainFaces );
    cvWrite(    fileStorage, "eigenValMat",           data.eigenValMat,           cvAttrList(0,0));
    cvWrite(    fileStorage, "projectedTrainFaceMat", data.projectedTrainFaceMat, cvAttrList(0,0));
    cvWrite(    fileStorage, "avgTrainImg",           data.pAvgTrainImg,          cvAttrList(0,0));

    char varname[32];// = "eigenVect_";
    for (int i=0; i<data.nEigens; i++) {
        //string varname = "eigenVect_" + lexical_cast<string>(i);
        sprintf(varname, "eigenVect_%d", i);
        cvWrite(fileStorage, varname, data.eigenVectVec[i], cvAttrList(0,0));
    }

#if 0
    if (DEBUG) {
        //writeLog(__LINE__, "nEigens = " + lexical_cast<string>(data.nEigens));
        //writeLog(__LINE__, "nTrainFaces = " + lexical_cast<string>(data.nTrainFaces));

        writeMatrix(data.eigenValMat, debugDir + "/" + "eigenValMat.txt");
        writeMatrix(data.projectedTrainFaceMat, debugDir + "/" + "projectedTrainFaceMat.txt");
        cvSaveImage(string(debugDir + "/" + "avgTrainImg.pgm").c_str(), data.pAvgTrainImg);

        for (int i=0; i<data.nEigens; i++) {
            //string varname = "eigenVect_" + lexical_cast<string>(i);
            sprintf(varname, "eigenVect_%d", i);
            cvSaveImage(string(debugDir + "/" + varname + ".pgm").c_str(), data.eigenVectVec[i]);
        }
    }
#endif

    // release the file-storage interface
    cvReleaseFileStorage( &fileStorage );
}  


// debug function to write out the OpenCV matrix as Octave matrix
int writeMatrix(CvMat* mat, const string& filename) {
    bool error = false;
    ofstream out( filename.c_str() );   // Open file for writing
    if ( out.is_open() ) {
        // out.setf( ios::scientific );
        // out.precision( 6 );
        out << "# Created by opencv-facerecog\n";
        out << "# name:  " << filename << "\n";
        out << "# type:  matrix\n";
        out << "# rows: " << mat->rows<<"\n";
        out << "# columns: " << mat->cols<<"\n";
        for ( int i = 0; i < mat->rows; i++ ) {
            for ( int j = 0; j < mat->cols; j++ ) {
                out << cvmGet(mat,i,j);
            }
            out<< "\n";
        }
        out.close();
    } else {
        error = true;
        //writeLog(__LINE__, "Unable to open " + filename );
    }
    //writeLog(__LINE__, filename + " written" );

    return error;  //zero, if there is no problem
}       

void optimal_size(int in[], int out[])
{
    float ratio = ((float) in[0]) / in[1];
    //LOGD("input ratio: %f", ratio);
    if(in[0] == in[1]){
        out[0] = out[1] = (int) IMG_DEFAULT_BIG;
        return;
    }

    if(in[0] > in[1]){
        out[0] = (int) (IMG_DEFAULT_BIG * ratio);
        out[1] = (int) (IMG_DEFAULT_BIG);
    } else {
        out[0] = (int) (IMG_DEFAULT_BIG);
        out[1] = (int) (IMG_DEFAULT_BIG * ratio);
    }

}

int loadTrainingData(Eigenface& data, const string& filename)
{
    // create a file-storage interface
    CvFileStorage* fileStorage = cvOpenFileStorage( filename.c_str(), 0, CV_STORAGE_READ );
    if ( !fileStorage ) {
        LOGE("%s cant't load %s", __func__, filename.c_str());
        return -1;
    }

    // load data
    data.nEigens               =          cvReadIntByName(fileStorage, 0, "nEigens",               0);
    data.nTrainFaces           =          cvReadIntByName(fileStorage, 0, "nTrainFaces",           0);
    data.eigenValMat           = (CvMat*)    cvReadByName(fileStorage, 0, "eigenValMat",           0);
    data.projectedTrainFaceMat = (CvMat*)    cvReadByName(fileStorage, 0, "projectedTrainFaceMat", 0);
    data.pAvgTrainImg          = (IplImage*) cvReadByName(fileStorage, 0, "avgTrainImg",           0);

    LOGD("%s: pAvgTrainImg->(%d, %d)", __func__, data.pAvgTrainImg->width, data.pAvgTrainImg->height);

    char i_cstr[32] = "\0";
    data.eigenVectVec = vector<IplImage*>(data.nTrainFaces);
    for (int i=0; i<data.nEigens; i++) {
        sprintf(i_cstr, "eigenVect_%d", i);
        data.eigenVectVec[i] = (IplImage*) cvReadByName(fileStorage, 0, i_cstr, 0);
    }

    // release the file-storage interface
    cvReleaseFileStorage( &fileStorage );

    return 0;

}

// loads file with names
vector< string > loadNameFile(const string& filename ){
    vector< string > retVector;
    if (filename.size()==0) {
        LOGE("%s filename  passed in size 0", __func__);
    } else {
        ifstream in( filename.c_str() );
        string line;
        if ( !in ) {
            LOGE("%s filename %s passed unable to open", __func__, filename.c_str());
        } else {
            while ( getline( in, line ) ) {
                retVector.push_back( line );
                //__android_log_print(ANDROID_LOG_DEBUG, "GrayCamera", "%s readed", line.c_str());
            }
        }
    }
    return retVector;
}

char *doRecognize(IplImage *faceImg, char *name_founded)
{
    Eigenface data;
    if( 0 != loadTrainingData(data, facesdbFile) ) {
        return NULL;
    }

    //test ++
    //storeTrainingData(data, "/sdcard/tmp/traindata.xml");
    //test --

    vector<string> names = loadNameFile(nameFile);
    if((int)names.size() != data.nTrainFaces){
        LOGE("%d entries in namefile, but expected %d", names.size(), data.nTrainFaces);
        return NULL;
    }

    // project the image onto the PCA subspace
    float* projectedTestFace = (float *)cvAlloc( data.nEigens*sizeof(float) );

    //cvSaveImage("/sdcard/abc_753.bmp", faceImg);
    cvEigenDecomposite(faceImg,              // Input object
            data.nEigens,                    // Number of eigen objects. 
            &(*(data.eigenVectVec.begin())), // Array of IplImage input objects
            0, 0,                            // I/O flags, userData
            data.pAvgTrainImg,               // Averaged object
            projectedTestFace);              // Calculated coefficients


    for (int i = 0; i< data.nEigens; i++) {
        if(isnan(projectedTestFace[i])){
            LOGE("Error: Database contains probably one face two times");
            return NULL;
        }
    }

    int iNearest = findNearestNeighbor(data, projectedTestFace, names);

    cvFree(&projectedTestFace);

#if 0
    if(iNearest < 0) {
        //与所有注册的face匹配均不成功
        strcpy(name_founded, name_unknown);
    } else {
        strcpy(name_founded, names[iNearest].c_str());
    }
#endif

    strcpy(name_founded, names[iNearest].c_str());
    string name_founded_string(name_founded);
    if( name_founded_string.find(facesBaseName) == 0 ) {
        LOGD("founded: %s, will not show to caller, so return NULL, at %s:%d", name_founded, __FILE__, __LINE__);
        return NULL;
    } 

    return name_founded;
}

int findNearestNeighbor(const Eigenface& data, float * projectedTestFace, const vector<string> &names) {
    double  leastDistSq = 0;
    bool    firstLoop = true;
    int     iNearest = 0;
    string  orig_name("");

    for (int iTrain=0; iTrain<data.nTrainFaces; iTrain++) {
        double distSq=0;
        for (int i=0; i<data.nEigens; i++) {
            float d_i = projectedTestFace[i] - data.projectedTrainFaceMat->data.fl[iTrain*data.nEigens + i];
            distSq += d_i*d_i / data.eigenValMat->data.fl[i];  // Mahalanobis
            //distSq += d_i*d_i; // Euclidean
        }
        if(orig_name != names[iTrain]) {
            LOGD("\n%s ------>", names[iTrain].c_str());
        }
        LOGD("\t\t distSq/1M = %lf", distSq/1000000);
        orig_name = names[iTrain];
        //__android_log_print(ANDROID_LOG_DEBUG, "GrayCamera", "distSq: %lf", distSq);
        if (firstLoop || distSq < leastDistSq) {
            leastDistSq = distSq;
            iNearest = iTrain;
            firstLoop = false;
        }
    }

    LOGD("\nleastDistSq/1M: =====> [%s] %lf <=====", names[iNearest].c_str(), leastDistSq/1000000);

#if 0
    //实践表明, 当leastDistSq/1M > 200 时,可以认为可信度太低,结果不可靠
    if(leastDistSq/1000000 > 500.0) {
        iNearest = -1;
    }
#endif

    return iNearest;
}
