/*
 * Copyright (c) 2018-2024 北京华恒盛世科技有限公司
 * QUICKPOOL is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *     http://license.coscl.org.cn/MulanPSL2
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
*/

#include "xdrbase.h"
#include "jm.h"

static struct hostNode *sptrLoadReqNode;

static int log_removenode(char *);
static int is_from_known(struct sockaddr_in *);
static void set_tclnode(struct Tcl_Node *, struct hostNode *, struct hostNode *, bool_t);
static unsigned int sizeof_host_reply(struct QuickPoolHostReply *);
static int sort_hosts(struct requirement *, int, char **, int, struct hostNode **, int);

static int filter_resources(struct request *, struct QuickPoolResourceReply *);
static int childjm_get_tmp_tmpdir_jobid(char ***, unsigned int *);

/**
 * @brief socket handler: Handle JS_READY request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode JS ready request
 *
 * @note        JS send this request when it is ready for job status update.
 */
void 
handle_jsready(int fd, struct header *hdr, XDR *xdrs)
{
    gboolJSReady = TRUE;
    gf_chan_close(fd);

    return;
} // end function handle_jsready

/**
 * @brief socket handler: Handle JS_NEW_JOB request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode new job request
 *
 * @note        create job entry and put it into JM job list.
 *              call fork_2start_job to start the job and send
 *              reply to JS.
 */
void
handle_newjob(int fd, struct header *hdr, XDR *xdrs)
{
    struct header outhdr;
    gf_init_header(&outhdr);

    struct request jobExecReply;
    memset(&jobExecReply, 0, sizeof(struct request));

    struct jobInfo jobBase;
    memset(&jobBase, 0, sizeof(struct jobInfo));
    if (!libxdr_jobbase(xdrs, &jobBase, hdr, NULL)) {
        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "libxdr_jobbase");
        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);
	return;
    }

    bool_t found = FALSE;
    struct jobEntry *job;
    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->forw) {

        if (job->job_base.jobid == jobBase.jobid) {
            found = TRUE;
            break;
        }
    }

    if (found) {
        goto sendReply;
    }

    char strJobId[32];
    job = (struct jobEntry *)calloc(1, sizeof(struct jobEntry));
    if (NULL == job) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
	return;
    }

    memcpy((char *)&job->job_base, (char *)&jobBase, sizeof(struct jobInfo));

    job->job_base.status &= ~JOB_STATUS_MIG;
    job->job_base.start_time = time(NULL);
    job->job_base.reasons = 0;
    job->job_base.subreasons = 0;

    job->running = 0;
    int reply;
    if (create_jmjob(job, &jobBase, &reply) < 0) {

        FREEUP(job);
        outhdr.reqcode = reply;
        gf_sendback_header(fd, &outhdr);

        return;
    }

    job->execute_flag = 0;

    if (0 > job->running) {
        job->running = 0;
    }

    if (set_execute_user(job) < 0) {

        delete_jmjob(job);
        outhdr.reqcode = ERROR_NO_USER;
        gf_sendback_header(fd, &outhdr);

        return;
    }

    if (NULL != job->job_base.submit->prejob_command
        && '\0' != job->job_base.submit->prejob_command[0]) {

        JM_SET_STATE(job, (JOB_STATUS_RUN | JOB_STATUS_PREJOB))
    } else {
        JM_SET_STATE(job, JOB_STATUS_RUN);
    }

    reply = fork_2start_job(job);

    if (0 != reply) {
        log_message(ERR, NOCLASS, "%s: failed to start job <%s> in %s().", __func__, 
                    jobid_2string(job->job_base.jobid, strJobId), "fork_2start_job");
        delete_jmjob(job);

        outhdr.reqcode = reply;
        gf_sendback_header(fd, &outhdr);

        return;
    } else {
        jmlog_newstatus(job);    
    }

sendReply:

    jobExecReply.number = 3;
    jobExecReply.keyvalues = (key_value_t *)calloc(jobExecReply.number, sizeof(key_value_t));
    if (NULL == jobExecReply.keyvalues) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    unsigned int num = 0;
    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBIDS;
    jobExecReply.keyvalues[num].value = (char *)calloc(32, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 32, "%lld", job->job_base.jobid);
    num ++;

    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBPID;
    jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->job_base.job_pid);
    num ++;

    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBPGID;
    jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->job_base.job_pgid);
    num ++;

    unsigned int len = gf_xdrsize_request(&jobExecReply);

    char *replyBuf;
    replyBuf = (char *)calloc(len, sizeof(char));
    if (NULL == replyBuf) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", len);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, len, XDR_ENCODE);
    outhdr.reqcode = reply;
    if (!gf_xdr_message(&xdrs2, (char *)&jobExecReply, &outhdr, NULL, gf_xdr_request, NULL)) {

        log_message(ERR, NOCLASS, STR_JOB_FAIL_S_M, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "gf_xdr_request");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        gf_xdr_free(gf_xdr_request, &jobExecReply);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    gf_xdr_free(gf_xdr_request, &jobExecReply);
    xdr_destroy(&xdrs2);

    return;
} // end function handle_newjob

/**
 * @brief socket handler: Handle JS_SWIT_JOB request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode switch job request
 *
 * @note        Job is switched from one queue to another, its base
 *              information could be changed.
 */
void
handle_switchjob(int fd, struct header *hdr, XDR *xdrs)
{
    struct header outhdr;
    gf_init_header(&outhdr);

    struct jobInfo jobBase;
    memset(&jobBase, 0, sizeof(struct jobInfo));
    if (!libxdr_jobbase(xdrs, &jobBase, hdr, NULL)) {
        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "libxdr_jobbase");
        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);
        return;
    }

    bool_t found = FALSE;
    struct jobEntry *job;
    for (job = (struct jobEntry *)glistJmJobs->back; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->back) {

        if (job->job_base.jobid == jobBase.jobid) {
            found = TRUE;
            break;
        }
    }

    char strJobId[32];
    if (!found) {
        outhdr.reqcode = ERROR_NO_JOB;
        gf_sendback_header(fd, &outhdr);
        log_message(ERR, NOCLASS, "%s: js trying to switch a non-exist job <%s>",
	            __func__, jobid_2string(jobBase.jobid, strJobId));
        return;
    }

    if (IS_JOB_ENDED(job->job_base.status)) {
        outhdr.reqcode = ERROR_JOB_FINISH;
        gf_sendback_header(fd, &outhdr);
        return;
    }

    gf_list_free(job->job_base.run_winlist, NULL);
    FREEUP(job->job_base.run_window);

    if (NULL != job->job_base.run_window && '\0' != job->job_base.run_window[0]) {
        job->job_base.run_window = parse_time_window(jobBase.run_window,
                                                     job->job_base.run_winlist, "queue");
    }
    job->winclose_time = time(NULL);

    gf_xdr_free(libxdr_jobinfo_submit, job->job_base.submit);
    job->job_base.submit = jobBase.submit;

    job->job_base.nice = jobBase.nice;
    job->job_base.job_attribute = jobBase.job_attribute;

    gf_xdr_free(libxdr_thresholds, &(job->job_base.thresholds));
    dup_thresholds(&(job->job_base.thresholds), &(jobBase.thresholds));

    memcpy((char *)&job->job_base.resource_limits[RESOURCE_LIMIT_RUN],
	   (char *)&jobBase.resource_limits[RESOURCE_LIMIT_RUN],
	   sizeof(struct resourceLimit));

    FREEUP(job->job_base.resume_condition);
    free_requirement(job->resume_cond);
    FREEUP(job->resume_cond);
    if (jobBase.resume_condition && jobBase.resume_condition[0] != '\0') {

        if ((job->resume_cond = check_threshold(jobBase.resume_condition)) == NULL) {
            log_message(ERR, NOCLASS, "%s: Job <%s> failed in %s(%s).", __func__,
                        jobid_2string(job->job_base.jobid, strJobId),
                        "check_threshold", jobBase.resume_condition);
        } else {
            job->job_base.resume_condition = copy_string(jobBase.resume_condition);
        }
    }

    FREEUP(job->job_base.stop_condition);
    free_requirement(job->stop_cond);
    FREEUP(job->stop_cond);
    if (jobBase.stop_condition && jobBase.stop_condition[0] != '\0') {
        if ((job->stop_cond = check_threshold(jobBase.stop_condition)) == NULL) {
            log_message(ERR, NOCLASS, "%s: Job <%s> failed in %s(%s).", __func__,
	                jobid_2string(job->job_base.jobid, strJobId),
                        "check_threshold", jobBase.stop_condition);
        } else {
            job->job_base.stop_condition = copy_string(jobBase.stop_condition);
        }
    }

    if (0 != job->job_base.resource_limits[RESOURCE_LIMIT_RUN].rlim_curh) {
        job->job_base.resource_limits[RESOURCE_LIMIT_RUN].rlim_curl = 0x7fffffff;
    }

    sigset_t newmask, oldmask;
    sigemptyset(&newmask);
    sigaddset(&newmask, SIGCHLD);
    sigprocmask(SIG_BLOCK, &newmask, &oldmask);
    gf_list_remove(glistJmJobs, (LIST_T *)job);
    putjob_injmJL(job);
    sigprocmask(SIG_SETMASK, &oldmask, NULL);

    if (renice_job(job) < 0) {
        log_message(DEBUG, EXECUTE, "%s: renice job <%s> failed",
                    __func__, jobid_2string(job->job_base.jobid, strJobId));
    }

    struct request jobExecReply;
    memset(&jobExecReply, 0, sizeof(struct request));

    jobExecReply.number = 1;
    jobExecReply.keyvalues = (key_value_t *)calloc(jobExecReply.number, sizeof(key_value_t));
    if (NULL == jobExecReply.keyvalues) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    unsigned int num = 0;
    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBIDS;
    jobExecReply.keyvalues[num].value = (char *)calloc(32, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 32, "%lld", job->job_base.jobid);
    num ++;

    unsigned int len = gf_xdrsize_request(&jobExecReply);

    char *replyBuf;
    replyBuf = (char *)calloc(len, sizeof(char));
    if (NULL == replyBuf) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", len);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, len, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, &jobExecReply, &outhdr, NULL, gf_xdr_request, NULL)) {

        log_message(ERR, NOCLASS, STR_JOB_FAIL_S_M, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "gf_xdr_request");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        gf_xdr_free(gf_xdr_request, &jobExecReply);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    gf_xdr_free(gf_xdr_request, &jobExecReply);
    xdr_destroy(&xdrs2);

    return;
} // end function handle_switchjob

/**
 * @brief socket handler: Handle JS_MODIFY_JOB request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode switch job request
 *
 * @note        Job parameter is modified, its base information could be changed.
 */
void
handle_modifyjob(int fd, struct header *hdr, XDR *xdrs)
{
    struct header outhdr;
    gf_init_header(&outhdr);

    struct jobInfo jobBase;
    memset(&jobBase, 0, sizeof(struct jobInfo));
    if (!libxdr_jobbase(xdrs, &jobBase, hdr, NULL)) {
        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "libxdr_jobbase");
        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);
	return;
    }

    bool_t found = FALSE;
    struct jobEntry *job;
    for (job = (struct jobEntry *)glistJmJobs->back; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->back) {

        if (job->job_base.jobid == jobBase.jobid) {
	    found = TRUE;
	    break;
	}
    }

    char strJobId[32];
    if (!found) {
        outhdr.reqcode = ERROR_NO_JOB;
        gf_sendback_header(fd, &outhdr);
	log_message(ERR, NOCLASS, "%s: js trying to modify a non-existent job <%s>",
	            __func__, jobid_2string(jobBase.jobid, strJobId));
	return;
    }

    if (IS_JOB_ENDED(job->job_base.status)) {
        outhdr.reqcode = ERROR_JOB_FINISH;
        gf_sendback_header(fd, &outhdr);
	return;
    }

    memcpy((char *)&job->job_base.resource_limits[RESOURCE_LIMIT_CPU],
            (char *)&jobBase.resource_limits[RESOURCE_LIMIT_CPU], sizeof(struct resourceLimit));
    memcpy((char *)&job->job_base.resource_limits[RESOURCE_LIMIT_RSS],
           (char *)&jobBase.resource_limits[RESOURCE_LIMIT_RSS], sizeof(struct resourceLimit));
    memcpy((char *)&job->job_base.resource_limits[RESOURCE_LIMIT_RUN],
	   (char *)&jobBase.resource_limits[RESOURCE_LIMIT_RUN],
	   sizeof(struct resourceLimit));

    if (0 != job->job_base.resource_limits[RESOURCE_LIMIT_RUN].rlim_curh) {
        job->job_base.resource_limits[RESOURCE_LIMIT_RUN].rlim_curl = 0x7fffffff;
    }

    gf_xdr_free(libxdr_jobinfo_submit, job->job_base.submit);
    job->job_base.submit = jobBase.submit;

    struct request jobExecReply;
    memset(&jobExecReply, 0, sizeof(struct request));

    jobExecReply.number = 1;
    jobExecReply.keyvalues = (key_value_t *)calloc(jobExecReply.number, sizeof(key_value_t));
    if (NULL == jobExecReply.keyvalues) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    unsigned int num = 0;
    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBIDS;
    jobExecReply.keyvalues[num].value = (char *)calloc(32, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(jobBase.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 32, "%lld", job->job_base.jobid);
    num ++;

    unsigned int len = gf_xdrsize_request(&jobExecReply);

    char *replyBuf;
    replyBuf = (char *)calloc(len, sizeof(char));
    if (NULL == replyBuf) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", len);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, len, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, (char *)&jobExecReply, &outhdr, NULL, gf_xdr_request, NULL)) {

        log_message(ERR, NOCLASS, STR_JOB_FAIL_S_M, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "gf_xdr_request");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        gf_xdr_free(gf_xdr_request, &jobExecReply);

        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    gf_xdr_free(gf_xdr_request, &jobExecReply);
    xdr_destroy(&xdrs2);

    return;
} // end function handle_modifyjob

/**
 * @brief socket handler: Handle JS_SIG_JOB request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode singal job request
 *
 * @note        JS send singal to job. kill, stop, resume or other signals.
 */
void
handle_signaljob(int fd, struct header *hdr, XDR *xdrs)
{
    struct header outhdr;
    gf_init_header(&outhdr);

    struct request sigJobReq;
    memset(&sigJobReq, 0, sizeof(struct request));

    if (!gf_xdr_request(xdrs, &sigJobReq, hdr, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_request");
        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);
	return;
    }

    JOBID_T jobid=0;
    int sigValue=QUICKPOOL_SIGNAL_NULL;
    unsigned int i;
    for (i = 0; i < sigJobReq.number; i++) {
        switch (sigJobReq.keyvalues[i].key) {
        case REQUEST_KEY_JOBIDS:
            jobid = atoll(sigJobReq.keyvalues[i].value);
            break;
        case REQUEST_KEY_SIGNAL:
            sigValue = atoi(sigJobReq.keyvalues[i].value);
            break;
        }

        if (0 < jobid && QUICKPOOL_SIGNAL_NULL != sigValue) {
            break;
        }
    }

    if (0 == jobid || QUICKPOOL_SIGNAL_NULL == sigValue) {

        outhdr.reqcode = ERROR_ARGUMENT;
        gf_sendback_header(fd, &outhdr);

        gf_xdr_free(gf_xdr_request, &sigJobReq);
        return;
    }

    bool_t found = FALSE;
    struct jobEntry *job;
    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->forw) {

        if (job->job_base.jobid != jobid) {
            continue;
        }

        found = TRUE;
        break;
    }

    if (!found) {

        outhdr.reqcode = ERROR_NO_JOB;
        gf_sendback_header(fd, &outhdr);

        gf_xdr_free(gf_xdr_request, &sigJobReq);
        return;
    }

    int actFlag=0, jobReason=0, jobSubreason=0;
    for (i = 0; i < sigJobReq.number; i++) {
        switch (sigJobReq.keyvalues[i].key) {
        case REQUEST_KEY_CHKPNT_FLAG:
            actFlag = atoi(sigJobReq.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOB_REASON:
            jobReason = atoi(sigJobReq.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOB_SUBREASON:
            jobSubreason = atoi(sigJobReq.keyvalues[i].value);
            break;
        }
    }
    gf_xdr_free(gf_xdr_request, &sigJobReq);

    int savedActReasons=0;
    int savedActSubReasons=0;
    if (jobReason & SUSPEND_REASON_JSLOCK) {

        job->job_base.reasons = jobReason;
        job->job_base.subreasons = jobSubreason;
        savedActReasons = job->susp_reason;
        savedActSubReasons = job->susp_subreason;
        job->susp_reason = jobReason;
        job->susp_subreason = jobSubreason;
    }

    if (job->post_started) {

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        goto sendReply;
    }

    if (IS_JOB_ENDED(job->job_base.status)) {

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        goto sendReply;
    }

    if (-1 == job->job_base.job_pgid) {

        JM_SET_STATE(job, JOB_STATUS_EXIT);
        jmlog_newstatus(job);

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        goto sendReply;
    }

    char strJobId[32];
    if (!JOB_STARTED(job)) {

        if (is_terminate_signal(sigValue)) {

            int ret;
            ret = signal_job_begin(job, sigValue, actFlag, FALSE);
            if (0 > ret) {
                outhdr.reqcode = ERROR_SIGNAL_PENDING;
            }

            jmlog_newstatus(job);

            if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
                job->susp_reason = savedActReasons;
                job->susp_subreason = savedActSubReasons;
            }

            if (0 < outhdr.reqcode) {
                gf_sendback_header(fd, &outhdr);
                return;
            }

            goto sendReply;
        }

        outhdr.reqcode = ERROR_SIGNAL_PENDING;
        gf_sendback_header(fd, &outhdr);

        char sigStr[30];
        qpsignal_2string(sigValue, sigStr);
        log_message(DEBUG, DEBUGSIGNAL, "%s: retry signal %s for job <%s>",
                    __func__, sigStr, jobid_2string(job->job_base.jobid, strJobId));

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        return;
    }

    if (IS_JOB_PENDING(job->job_base.status)) {
        outhdr.reqcode = ERROR_SIGNAL_PENDING;
        gf_sendback_header(fd, &outhdr);

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        return;
    }

    int ret;
    if (job->job_base.act_pid || (job->job_base.status & JOB_STATUS_MIG)) {

        ret = signal_job_begin(job, sigValue, actFlag, FALSE);
        if (0 > ret) {
            outhdr.reqcode = ERROR_SIGNAL_PENDING;
        } else {
            job->job_base.status &= ~JOB_STATUS_MIG;
        }

        jmlog_newstatus(job);

        if (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        if (0 < outhdr.reqcode) {
            gf_sendback_header(fd, &outhdr);
            return;
        }

        goto sendReply;
    }

    ret = signal_job_begin(job, sigValue, actFlag, FALSE);
    if (0 > ret) {

        outhdr.reqcode = ERROR_SIGNAL_PENDING;
        jmlog_newstatus(job);

        if  (NULL != job && (jobReason & SUSPEND_REASON_JSLOCK)) {
            job->susp_reason = savedActReasons;
            job->susp_subreason = savedActSubReasons;
        }

        gf_sendback_header(fd, &outhdr);

        return;
    }

    jmlog_newstatus(job);

 sendReply:

    gf_init_header(&outhdr);

    struct request jobExecReply;
    memset(&jobExecReply, 0, sizeof(struct request));

    unsigned int num = 2; // jobid, status

    if (0 < job->job_base.act_pid) {
        num ++;
    }

    if (0 < job->actcmd_state) {
        num ++;
    }

    if (0 < job->job_base.reasons) {
        num ++;
    }

    jobExecReply.number = num;
    jobExecReply.keyvalues = (key_value_t *)calloc(jobExecReply.number, sizeof(key_value_t));
    if (NULL == jobExecReply.keyvalues) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    num = 0;
    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBIDS;
    jobExecReply.keyvalues[num].value = (char *)calloc(32, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 32, "%lld", job->job_base.jobid);
    num ++;

    jobExecReply.keyvalues[num].key = REQUEST_KEY_JOBSTATUS;
    jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
    if (NULL == jobExecReply.keyvalues[num].value) {
        log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->job_base.status);
    num ++;

    if (0 < job->job_base.act_pid) {
        jobExecReply.keyvalues[num].key = REQUEST_KEY_JOB_ACTPID;
        jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
        if (NULL == jobExecReply.keyvalues[num].value) {
            log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                        jobid_2string(job->job_base.jobid, strJobId), "calloc");
            outhdr.reqcode = ERROR_MEMORY;
            gf_sendback_header(fd, &outhdr);
            relaunch();
        }
        snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->job_base.act_pid);
        num ++;
    }

    if (0 < job->actcmd_state) {
        jobExecReply.keyvalues[num].key = REQUEST_KEY_JOB_ACTSTATUS;
        jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
        if (NULL == jobExecReply.keyvalues[num].value) {
            log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                        jobid_2string(job->job_base.jobid, strJobId), "calloc");
            outhdr.reqcode = ERROR_MEMORY;
            gf_sendback_header(fd, &outhdr);
            relaunch();
        }
        snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->actcmd_state);
        num ++;
    }

    if (0 < job->job_base.reasons) {
        jobExecReply.keyvalues[num].key = REQUEST_KEY_JOB_REASON;
        jobExecReply.keyvalues[num].value = (char *)calloc(12, sizeof(char));
        if (NULL == jobExecReply.keyvalues[num].value) {
            log_message(ERR, NOCLASS, "%s: job <%s> failed in %s(), %m.", __func__,
                        jobid_2string(job->job_base.jobid, strJobId), "calloc");
            outhdr.reqcode = ERROR_MEMORY;
            gf_sendback_header(fd, &outhdr);
            relaunch();
        }
        snprintf(jobExecReply.keyvalues[num].value, 12, "%d", job->job_base.reasons);
        num ++;
    }

    unsigned int size;
    size = gf_xdrsize_request(&jobExecReply);

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", size);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, (char *)&jobExecReply, &outhdr, NULL, gf_xdr_request, NULL)) {
        log_message(ERR, NOCLASS, STR_JOB_FAIL_S_M, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "gf_xdr_request");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        gf_xdr_free(gf_xdr_request, &jobExecReply);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    if (NULL != job) {
        job->actcmd_state = ACT_NO;
    }

    gf_xdr_free(gf_xdr_request, &jobExecReply);
    xdr_destroy(&xdrs2);

    return;
} // end function handle_signaljob

/**
 * @brief socket handler: Handle CMD_TASK_START request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode job task request
 * @param[in]   from         #4: node who send me start task request
 *
 * @note fork a child to start job task.
 */
void
handle_start_jobtask(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char addrbuf[24+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    struct request taskRequest;
    memset(&taskRequest, 0, sizeof(struct request));

    if (!gf_xdr_request(xdrs, &taskRequest, hdr, NULL)) {
        log_message(ERR, NOCLASS, "%s: gf_xdr_request() failed, request comes from %s.",
                    __func__, gf_sockaddr_str(from, addrbuf));

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        return;
    }

    int options=0, numArgc=0, nodeRank=0, numTask=0, nodePort=0;
    JOBID_T jobId = 0;
    char **taskArgv=NULL, *strExecUser=NULL, *strExecCwd=NULL, *strFromNode=NULL;
    char *strJobFile=NULL;
    unsigned int i;
    for (i=0; i<taskRequest.number; i++) {
        switch (taskRequest.keyvalues[i].key) {
        case REQUEST_KEY_OPTIONS:
            options = atoi(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOBIDS:
            jobId = atoll(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_NODE_RANK:
            nodeRank = atoll(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_NUM_TASK:
            numTask = atoi(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_USERNAME:
            strExecUser = copy_string(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_EXECUTE_CWD:
            strExecCwd = copy_string(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOB_FILE:
            strJobFile = copy_string(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_HOSTNAME:
            strFromNode = copy_string(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_FIRSTNODE_PORT:
            nodePort = atoi(taskRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_TASK_ARGC:
            numArgc = atoi(taskRequest.keyvalues[i].value);
            taskArgv = (char **)calloc(numArgc+1, sizeof(char *));
            if (NULL == taskArgv) {
                log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                            "calloc", (numArgc+1)*sizeof(char *));
                outhdr.reqcode = ERROR_MEMORY;
                gf_sendback_header(fd, &outhdr);
                relaunch();
            }
            numArgc = 0;
            break;
        case REQUEST_KEY_TASK_ARGV:
            taskArgv[numArgc++] = copy_string(taskRequest.keyvalues[i].value);
            taskArgv[numArgc] = NULL;
            break;
        }
    }

    gf_xdr_free(gf_xdr_request, &taskRequest);

    struct jobTask *task;
    task = (struct jobTask *)calloc(1, sizeof(struct jobTask));
    if (NULL == task) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", sizeof(struct jobTask));
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    struct hostNode *hNode;
    hNode = find_node_byaddr(from, (char *)__func__);
    if (NULL == hNode) {
        log_message(ERR, NOCLASS, "%s: cannot find node <%s> when trying to start job task.",
                    __func__, gf_sockaddr_str(from, addrbuf));

        outhdr.reqcode = ERROR_NO_HOST;
        gf_sendback_header(fd, &outhdr);

        return;
    }

    task->options = options;
    task->jobid = jobId;
    task->start_time = time(NULL);
    task->conn_fd = fd;
    gf_chan_state(fd, CHANNEL_LONGCONN);

    if (NULL != strFromNode) {
        task->from_node = strFromNode;
    } else {
        strFromNode = getenv("QP_TASK_FIRSTNODE");
        if (NULL != strFromNode) {
            task->from_node = copy_string(strFromNode);
        } else {
            task->from_node = copy_string(hNode->name);
        }
    }

    if (0 < nodePort) {
        task->port = nodePort;
    } else {
        char *strPort;
        strPort = getenv("QP_TASK_PORT");
        if (NULL != strPort) {
            task->port = atoi(strPort);
        }
    }

    task->node_rank = nodeRank;
    task->num_task = numTask;

    task->task_usage = (struct jobResourceUsage *)calloc(1, sizeof(struct jobResourceUsage));
    if (NULL == task->task_usage) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", sizeof(struct jobResourceUsage));
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    task->execute_user = strExecUser;
    task->cwd = strExecCwd;
    task->script = strJobFile;
    task->task_argv = taskArgv;

    gf_list_insert(glistJobTask, glistJobTask->forw, (LIST_T *)task);

    int ret;
    ret = fork_2start_jobtask(task);
    if (0 != ret) {

        char strJobId[32];
        log_message(ERR, NOCLASS, "%s: failed to start job <%s> task on node %s.", __func__, 
                    jobid_2string(jobId, strJobId), gptrMyHost->name);
    }

    outhdr.reqcode = ret;
    gf_sendback_header(fd, &outhdr);

    return;
} // end function handle_start_jobtask

/**
 * @brief socket handler: Handle CMD_SIGNAL_TASK request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode job task request
 * @param[in]   from         #4: node who send me signal task request
 *
 * @note fork a child to signal job task.
 */
void
handle_signal_jobtask(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char addrbuf[24+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    struct request signalRequest;
    memset(&signalRequest, 0, sizeof(struct request));

    if (!gf_xdr_request(xdrs, &signalRequest, hdr, NULL)) {

        log_message(ERR, NOCLASS, "%s: gf_xdr_request() failed, request comes from %s.",
                    __func__, gf_sockaddr_str(from, addrbuf));

        return;
    }

    int signalValue=-1;
    JOBID_T jobId=0;
    unsigned int i;
    for (i=0; i<signalRequest.number; i++) {
        switch (signalRequest.keyvalues[i].key) {
        case REQUEST_KEY_JOBIDS:
            jobId = atoll(signalRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_SIGNAL:
            signalValue = atoi(signalRequest.keyvalues[i].value);
            break;
        default:
            break;
        }
    }

    if (0 > signalValue || 32 <= signalValue) {
        log_message(ERR, NOCLASS, "%s: unknown signal value <%d> request comes from %s.",
                    __func__, signalValue, gf_sockaddr_str(from, addrbuf));
        return;
    }

    struct jobTask *task;
    for (task=(struct jobTask *)glistJobTask->forw; task!=(struct jobTask *)glistJobTask;
         task=(struct jobTask *)task->forw) {

        if (task->jobid != jobId) {
            continue;
        }

        kill_task_pg(task, signalValue);

        break;
    }

    return;
} // end function handle_signal_jobtask

/**
 * @brief socket handler: Handle JS_PROBE request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode probe request
 *
 * @note        Reply to JS that I am ok.
 */
void
handle_probe(int fd, struct header *hdr, XDR * xdrs)
{
    struct header outhdr;
    gf_init_header(&outhdr);

    unsigned int len = HEADER_LEN+10;

    char *reply;
    reply = (char *)calloc(len, sizeof(char));
    if (NULL == reply) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", len);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }
    
    XDR xdrs2;
    xdrmem_create(&xdrs2, reply, len, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, NULL, &outhdr, NULL, NULL, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_message");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(reply);
	return;
    }

    gf_add_data(fd, reply, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_probe

/**
 * @brief       Got startup reply from JS and create my running job list.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: communication header
 * @param[in]   xdrs         #3: XDR data
 */
void
handle_restart_reply(int fd, struct header *hdr, XDR *xdrs)
{
    switch (hdr->reqcode) {
    case 0:
    {
        struct jmPackage package;
        memset(&package, 0, sizeof(struct jmPackage));

        if (!xdr_jm_package(xdrs, &package, hdr, NULL)) {

            log_message(ERR, NOCLASS, "%s: failed to decode jmPackage from %s",
                        __func__, gstrBossNode);
            xdr_destroy(xdrs);
            return;
        }

        unsigned int i;
        for (i = 0; i < package.num_job; i++) {
            struct jobInfo *jobBase = &(package.base_jobinfo[i]);

            bool_t found = FALSE;
            struct jobEntry *job;
            for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
                 job = (struct jobEntry *)job->forw) {

                if (job->job_base.jobid != jobBase->jobid) {
                    continue;
                }

                found = TRUE;
                break;
            }

            if (!found) {
                job = (struct jobEntry *)calloc (1, sizeof(struct jobEntry));
                if (NULL == job) {
                    log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                                "calloc", sizeof(struct jobEntry));
                    relaunch();
                }
            }

            memcpy(&(job->job_base), jobBase, sizeof(struct jobInfo));

            char strJobId[32];
            if (NULL == jobBase->execute_user || '\0' == jobBase->execute_user[0]) {
                job->execute_gid = 0;
                job->job_base.execute_uid = -1;
            } else {

                struct passwd *pw;
                pw = get_user_byname(jobBase->execute_user); 
                if (NULL == pw || NULL == pw->pw_name) {
                    log_message(ERR, NOCLASS, STR_JOB_FAIL_S_S_M, __func__,
                                jobid_2string(jobBase->jobid, strJobId),
                                "get_user_byname", jobBase->execute_user);

                    if (!found) {
                        FREEUP(job);
                    }

                    continue;
                }

                job->execute_gid = pw->pw_gid;
                job->job_base.execute_uid = pw->pw_uid;
            }

            if (job->job_base.execute_home[0] != '\0') {
                job->execute_flag = JOB_EXEC_STARTED;
            } else {
                job->execute_flag = 0;
            }

            if (job->job_base.submit->prejob_command && job->job_base.submit->prejob_command[0] != '\0'
                && (job->job_base.execute_home[0] != '\0' || job->job_base.execute_cwd[0] != '\0')) {

                job->execute_flag |= JOB_EXEC_QPRE_OK | JOB_EXEC_QPRE_KNOWN;
            }

            job->job_base.act_signal = jobBase->act_signal;

            if (!found) {
                int reply;

                if (create_jmjob(job, jobBase, &reply) < 0) {
                    jm_die(DAEMON_QUIT_FATAL);
                }
            }

            reset_jobstatus(job);
            if (!(jmParams[RENICE_NEVER_AT_RESTART].bool_value)) {
                if (renice_job(job) < 0) {
                    log_message(ERR, NOCLASS, "%s: renice job <%s> failed",
                                __func__, jobid_2string(job->job_base.jobid, strJobId));
                }
            }

            job->running = jobBase->running;
            if (0 > job->running) {
                job->running = 0;
            }
        }

        /*get child jm tmp directory .****.tmpdir. directory name jobid values*/
        char **childJobID = NULL;
        unsigned int childJobNum = 0;
        unsigned int ret, j = 0;
        bool_t  jobFlag = FALSE;
        struct jobEntry childJmJob = {0};
        /*delete file and directory*/
        char tmpDirName[PATH_MAX+1] = {0};
        char strJobId[32] = {0};
        char fileBuf[PATH_MAX+1] = {0};

        ret = childjm_get_tmp_tmpdir_jobid(&childJobID, &childJobNum);
        if (ret < 0) {
            log_message(ERR, NOCLASS, "%s: get child jm tmp/.****.tmpdir. file name job id error.",
                        __func__);
        }

        /*if child jm job is UNKWN status, kill it*/
        for (j = 0; j < childJobNum; j++) {

            struct jobEntry *subJob;
            jobFlag = FALSE;
            for (subJob = (struct jobEntry *)glistJmJobs->forw; subJob != (struct jobEntry *)glistJmJobs;
                 subJob = (struct jobEntry *)subJob->forw) {

                if (subJob->job_base.jobid == atoi(childJobID[j])) {
                    jobFlag = TRUE;
                    break;
                }

            }

            if (!jobFlag) {

                childJmJob.job_base.jobid = atoi(childJobID[j]);
                ret = jmread_jobstatus(&childJmJob);
                if (ret < 0) {
                    continue;
                }

                if (!childJmJob.job_base.number_node) {
                    continue;
                }

                childJmJob.inst_usage = (struct jobResourceUsage *)calloc(childJmJob.job_base.number_node,
                                                                          sizeof(struct jobResourceUsage));
                if (NULL == childJmJob.inst_usage) {
                    log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc",
                                childJmJob.job_base.number_node*sizeof(struct jobResourceUsage));
                    relaunch();
                }

                childJmJob.max_usage = (struct jobResourceUsage *)calloc(childJmJob.job_base.number_node,
                                                                         sizeof(struct jobResourceUsage));
                if (NULL == childJmJob.max_usage) {
                    log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc",
                                childJmJob.job_base.number_node*sizeof(struct jobResourceUsage));
                    relaunch();
                }

                ret = send_signal_2job(&childJmJob, SIGKILL, TRUE);
                if (ret < 0) {
                    log_message(ERR, NOCLASS, "%s: send_signal_2job kill %d fail.",
                        __func__, childJmJob.job_base.job_pgid);

                }

                /*delete tmp/.****.tmpdir. file and directory*/
                get_tmpdir_4job(tmpDirName, childJmJob.job_base.jobid,
                                childJmJob.job_base.execute_uid);
                snprintf(fileBuf, PATH_MAX, "%s/jobstatus.%s", tmpDirName,
                         jobid_2longstring(childJmJob.job_base.jobid, strJobId));

                if (unlink(fileBuf) < 0 && errno != ENOENT) {
                    log_message(ERR, NOCLASS, STR_JOB_FAIL_S_S_M, __func__,
                            jobid_2string(childJmJob.job_base.jobid, strJobId), "unlink", fileBuf);
                }

                snprintf(fileBuf, PATH_MAX, "%s/.%s.acct",
                         tmpDirName, childJmJob.job_base.submit->script);
                if (unlink(fileBuf) < 0 && errno != ENOENT) {
                    log_message(ERR, NOCLASS, STR_JOB_FAIL_S_S_M, __func__,
                                jobid_2string(childJmJob.job_base.jobid, strJobId),
                                "unlink", fileBuf);
                }

                if (rmdir(tmpDirName) < 0 && errno != ENOENT) {
                    log_message(ERR, NOCLASS, STR_JOB_FAIL_S_S_M, __func__,
                                jobid_2string(childJmJob.job_base.jobid, strJobId),
                                "rmdir", tmpDirName);
                }

                log_message(DEBUG, TRACER, "%s: kill UNKWN jobid:[%s], unlink fileBuf:[%s], rmdir tmpDirName:[%s].",
                        __func__, childJobID[j], fileBuf, tmpDirName);
            }
        }

        FREEUP(childJobID);

        gintJMSleepTime = package.jm_params[PARAM_KEY_JM_SLEEP_TIME].int_value;
        gintUpdateUsage = package.jm_params[PARAM_KEY_RUSAGE_UPDATE_RATE].int_value;
        gintUpdateUsagePercent = package.jm_params[PARAM_KEY_RUSAGE_UPDATE_PERCENT].int_value;
        gintJobTerminateInterval = package.jm_params[PARAM_KEY_JOB_TERMINATE_INTERVAL].int_value;

        gf_xdr_free(xdr_jm_package, &package);
        log_message(DEBUG, EXECUTE, "%s: got configuration from master %s all right",
                    __func__, gstrBossNode);

        return;
    }
    case ERROR_NO_HOST:

        log_message(ERR, NOCLASS, "%s: This host is not yet used by the batch system.",
                    __func__);
        jm_die(DAEMON_QUIT_FATAL);
    case ERROR_XDR:

        log_message(ERR, NOCLASS, "%s: xdr_jm_package failed", __func__);

        break;
    default:

        log_message(ERR, NOCLASS, "%s: Invalid return code %d from js on %s",
                    __func__, hdr->reqcode, gstrBossNode);
        relaunch();
    }

    return;
} // end function handle_restart_reply

/**
 * @brief socket handler: Handle CMD_JM_REBOOT request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode probe request
 * @param[in]   auth         #4: who issue the command
 *
 * @note        Only cluster admin or root could reboot JM.
 */
void
handle_reboot(int fd, struct header *hdr, XDR *xdrs, struct auth *auth)
{
    struct header outhdr;
    char message[MAXMESSAGELENGTH+1];

    log_message(INFO, NOCLASS, "JM daemon rebooting...");

    gf_init_header(&outhdr);

    if (!is_clusteradmin(auth->user_name) && auth->uid != 0 ) {

        log_message(ERR, NOCLASS, "%s: uid <%d> not allowed to perform control operation",
                    __func__, auth->uid);

        outhdr.reqcode = ERROR_PERMISSION;
        snprintf(message, MAXMESSAGELENGTH, "%s: user[%s] has no permission to issue jm restart.",
                 __func__, auth->user_name);
        gf_errorback(fd, &outhdr, message);

        return;
    }

    gf_sendback_header2(fd, &outhdr);

    gboolJmRestart = TRUE;

    return;
} // end function handle_reboot

/**
 * @brief socket handler: Handle JM_STATIC_INFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode node static information request
 * @param[in]   from         #4: node who send me its static information
 *
 * @note        Got node static information from slave JM, update it
 *              to cluster node table.
 */
void
handle_static_info(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    struct hostNode hostInfo;
    struct hostNode *node;
    struct header outhdr;
    char message[MAXMESSAGELENGTH+1];
    memset(&hostInfo, 0, sizeof(hostInfo));
    gf_init_header(&outhdr);

    if (!is_from_known(from)) {
        outhdr.reqcode = ERROR_PERMISSION;
        snprintf(message, MAXMESSAGELENGTH, "%s: from unknown node.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    hostInfo.static_info.maxcpu = 0;

    if (!gboolMaster) {
        log_message(WARNING, NOCLASS, "%s: I am not the master!", __func__);
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH,
                 "%s: send node static information to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (!jm_xdr_staticinfo(xdrs, (void *)&hostInfo, hdr, NULL)) {
        log_message(ERR, NOCLASS, "%s: jm_xdr_staticinfo failed", __func__);
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: decode static info failed.",__func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    node = find_node_byaddr(from, __func__);
    if (NULL == node) {
        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(message, MAXMESSAGELENGTH, "%s: Cannot find the node by addr.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (NULL == gf_hash_find(gptrMyCluster->node_table, node->name)) {
        char addrbuf[24+1];

        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(message, MAXMESSAGELENGTH, "%s: Got info from client-only host [%s]",
                 __func__, node->name);
        gf_errorback(fd, &outhdr, message);
        log_message(ERR, NOCLASS, "%s: Got info from client-only host %s %s",
	            __func__, gf_sockaddr_str(from, addrbuf), node->name);
        return;
    }

    if (hostInfo.static_info.maxcpu <= 0 || hostInfo.static_info.maxmem < 0) {
        outhdr.reqcode = ERROR_ARGUMENT;
        snprintf(message, MAXMESSAGELENGTH, "%s: Invalid info received: maxcpu %d maxmem %f", 
                 __func__, hostInfo.static_info.maxcpu, hostInfo.static_info.maxmem);
        gf_errorback(fd, &outhdr, message);
        log_message(ERR, NOCLASS, "%s: Invalid info received: maxcpu %d maxmem %f",
                    __func__, hostInfo.static_info.maxcpu, hostInfo.static_info.maxmem);
        return;
    }

    node->static_info.maxcpu = hostInfo.static_info.maxcpu;
    node->static_info.maxmem  = hostInfo.static_info.maxmem;
    node->static_info.maxswap = hostInfo.static_info.maxswap;
    node->static_info.maxtmp  = hostInfo.static_info.maxtmp;

    if (NULL != node->static_info.host_type) {
        FREEUP(node->static_info.host_type);
    }
    node->static_info.host_type = hostInfo.static_info.host_type;

    if (NULL != node->static_info.host_arch) {
        FREEUP(node->static_info.host_arch);
    }
    node->static_info.host_arch = hostInfo.static_info.host_arch;

#if defined (HAVE_HWLOC_H)
    if (NULL != node->static_info.numa) {
        free_numanode(node->static_info.numa);
    }
    node->static_info.numa = hostInfo.static_info.numa;
#else
    node->static_info.numa = NULL;
#endif

    //GPU paraments
    node->num_GPU = hostInfo.num_GPU;

    gptrMyCluster->num_GPU += node->num_GPU;
    if (NULL != node->gpuinfo_static) {
    }
    node->gpuinfo_static = hostInfo.gpuinfo_static;

    if (NULL != node->gpuinfo_dynamic) {
    }
    node->gpuinfo_dynamic = hostInfo.gpuinfo_dynamic;

    if (NULL != node->static_info.host_type) {
        int typeIdx = get_hosttype_index(node->static_info.host_type);

        if (typeIdx >= 0) {
            node->type_index = typeIdx;
        } else {

            typeIdx = add_hosttype(node->static_info.host_type);
            if (typeIdx >= 0) {
                node->type_index = typeIdx;
            }
        }
    }
				 
    if (NULL != node->static_info.host_arch) {
    }

    log_message(DEBUG, CHANNEL, "%s: host <%s> maxcpu %d maxmem %f.",
                __func__, node->name, node->static_info.maxcpu,
                node->static_info.maxmem);

    gf_sendback_header(fd, &outhdr);

    return;
} // end function handle_static_info

/**
 * @brief       Handle slave JM load update request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode load update request
 * @param[in]   from         #4: node who send me load update request
 *
 * @note        Got node load from slave JM, update its values.
 */
void
handle_load_update(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    static int checkSumMismatch;

    struct header outhdr;
    gf_init_header(&outhdr);

    struct request req;
    memset(&req, 0, sizeof(struct request));

    char addrbuf[24+1];

    if (!gf_xdr_request(xdrs, &req, hdr, NULL)) {

        log_message(ERR, NOCLASS, "%s: gf_xdr_request failed from %s",
                    __func__, gf_sockaddr_str(from, addrbuf));

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        return;
    }

    if (!gboolMaster) { // I am not master.

        log_message(WARNING, NOCLASS, "%s: <%s> send load update to a wrong master node <%s>.",
                    __func__, gf_sockaddr_str(from, addrbuf), gptrMyHost->name);

        outhdr.reqcode = ERROR_WRONG_MASTER;
        gf_sendback_header(fd, &outhdr);
        gf_xdr_free(gf_xdr_request, &req);

        return;
    }

    struct hostNode *hNode;
    hNode = find_node_byaddr(from, (char *)__func__);
    if (NULL == hNode) {

        log_message(ERR, NOCLASS, "%s: Received load update from unknown host %s",
                    __func__, gf_sockaddr_str(from, addrbuf));

        outhdr.reqcode = ERROR_NO_HOST;
        gf_sendback_header(fd, &outhdr);
        gf_xdr_free(gf_xdr_request, &req);

        return;
    }

    if (NULL == gf_hash_find(gptrMyCluster->node_table, hNode->name)) {

        log_message(ERR, NOCLASS, "%s: Got load update from client-only host %s.",
                    __func__, hNode->name);

        outhdr.reqcode = ERROR_NO_HOST;
        gf_sendback_header(fd, &outhdr);
        gf_xdr_free(gf_xdr_request, &req);

        return;
    }

    unsigned int i;
    int checkSum=0;
    unsigned int numIndex=0, numUsrIndex=0, numStatus=0, seqNo=0, loadIndex=0;
    unsigned int numRes=0, numGPU=0;
    struct sharedResource *resource=NULL;
    struct resourceInstance *resInst=NULL;
    for (i=0; i<req.number; i++) {
        switch (req.keyvalues[i].key) {
        case REQUEST_KEY_NUM_LOADINDEX:
            numIndex = atoi(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_NUM_USER_LOADINDEX:
            numUsrIndex = atoi(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_NODE_STATUS:
            hNode->status[numStatus] = atoi(req.keyvalues[i].value);
            numStatus ++;
            break;
        case REQUEST_KEY_LOAD_SEQNO:
            seqNo = atoi(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_CLUSTER_CHECKSUM:
            checkSum = atoi(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_NUM_LOAD_VALUE:
            if (loadIndex < gptrSharedConf->qpinfo->num_index) {
                hNode->load[loadIndex] = atof(req.keyvalues[i].value);
            }

            loadIndex ++;
            break;
        case REQUEST_KEY_NUM_RESOURCE:
            break;
        case REQUEST_KEY_RESOURCE:
            resource = (struct sharedResource *)gf_hash_find(ghashResource, req.keyvalues[i].value);
            if (NULL != resource) {

                if (0 == LIST_NUM_ENTS(resource->inst_list)) {
                    break;
                }

                struct resourceInstance *inst;
                for (inst = (struct resourceInstance *)resource->inst_list->forw;
                     inst != (struct resourceInstance *)resource->inst_list;
                     inst = (struct resourceInstance *)inst->forw) {

                    if (0 == HASH_NUM_ENTS(inst->node_table)) {
                        continue;
                    }

                    struct hostNode *node;
                    node = (struct hostNode *)gf_hash_find(inst->node_table, hNode->name);
                    if (NULL == node) {
                        continue;
                    }

                    resInst = inst;
                    break;
                }
            }

            break;
        case REQUEST_KEY_RESOURCE_VALUE:
            if (NULL != resInst) {

                if (!strcmp(resInst->org_value, "-") && !strcmp(req.keyvalues[i].value, "-")) {
                    numRes ++;
                    break;
                }

                if (NULL == resInst->update_host) {
                    resInst->update_host = hNode;
                } else {
                    struct hostNode *instHost;

                    instHost = (struct hostNode *)gf_hash_find(resInst->node_table, hNode->name);
                    if (NULL == instHost) {
                        numRes ++;
                        break;
                    }

                    if (instHost != resInst->update_host) {
                        numRes ++;
                        break;
                    }

                    if (0 == strcmp(resInst->value, req.keyvalues[i].value)) {
                        numRes ++;
                        break;
                    }
                }

                FREEUP(resInst->value);
                FREEUP(resInst->org_value);
                resInst->value = copy_string(req.keyvalues[i].value);
                resInst->org_value = copy_string(req.keyvalues[i].value);
            }
            numRes ++;

            resInst = NULL;
            break;
        case REQUEST_KEY_NUM_GPU:
            numGPU = atoi(req.keyvalues[i].value);
            if (0 < numGPU && NULL == hNode->gpuinfo_dynamic) {
                hNode->gpuinfo_dynamic = (struct hostGPULoad *)calloc(numGPU, sizeof(struct hostGPULoad));
                if (NULL == hNode->gpuinfo_dynamic) {
                    log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                                "calloc", numGPU*sizeof(struct hostGPULoad));
                    outhdr.reqcode = ERROR_MEMORY;
                    gf_sendback_header(fd, &outhdr);
                    relaunch();
                }
            } else if (0 < numGPU && hNode->num_GPU < numGPU) {
                struct hostGPULoad *newLoad;
                newLoad = (struct hostGPULoad *)realloc(hNode->gpuinfo_dynamic, numGPU*sizeof(struct hostGPULoad));
                if (NULL == newLoad) {
                    log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                                "realloc", numGPU*sizeof(struct hostGPULoad));
                    outhdr.reqcode = ERROR_MEMORY;
                    gf_sendback_header(fd, &outhdr);
                    relaunch();
                }

                hNode->gpuinfo_dynamic = newLoad;
            }

            numGPU = 0;
            break;
        case REQUEST_KEY_GPU_MEMORY:
            if (NULL == hNode->gpuinfo_dynamic) {
                break;
            }

            hNode->gpuinfo_dynamic[numGPU].avail_gpu_mem = atof(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_GPU_STATUS:
            if (NULL == hNode->gpuinfo_dynamic) {
                break;
            }

            hNode->gpuinfo_dynamic[numGPU].status = copy_string(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_GPU_MODE:
            if (NULL == hNode->gpuinfo_dynamic) {
                break;
            }

            hNode->gpuinfo_dynamic[numGPU].gpu_mode = atoi(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_GPU_UT:
            if (NULL == hNode->gpuinfo_dynamic) {
                break;
            }

            hNode->gpuinfo_dynamic[numGPU].gpu_ut = atof(req.keyvalues[i].value);
            break;
        case REQUEST_KEY_GPU_TEMPERATURE:
            if (NULL == hNode->gpuinfo_dynamic) {
                numGPU ++;
                break;
            }

            hNode->gpuinfo_dynamic[numGPU].gpu_temperature = atoi(req.keyvalues[i].value);
            numGPU ++;
            break;
        }
    }

    if (numIndex != gptrSharedConf->qpinfo->num_index
        && numUsrIndex != gptrSharedConf->qpinfo->num_user_index) {

        log_message(WARNING, NOCLASS, "%s: node (%s) have different load index, ignore the update.",
                    __func__, hNode->name);

        outhdr.reqcode = ERROR_CONF_WARNING;
        gf_sendback_header(fd, &outhdr);
        gf_xdr_free(gf_xdr_request, &req);

        return;
    }

    if (gptrMyCluster->checksum != checkSum
        && checkSumMismatch < 5 && !jmParams[IGNORE_CHECKSUM].bool_value) {
        char addrbuf[24+1];

        log_message(WARNING, NOCLASS, "%s: node (%s) have different config with master",
                    __func__, gf_sockaddr_str(from, addrbuf));
        checkSumMismatch++;
    }

    hNode->noload_count = 0;

    if (seqNo - hNode->seq_number > 2 && seqNo > hNode->seq_number
        && 0 != hNode->seq_number) {

        log_message(ERR, NOCLASS, "%s: host %s seq_number %d not match with seqNo %d.",
                    __func__, hNode->name, hNode->seq_number, seqNo);
    }

    hNode->seq_number = seqNo;

    gf_xdr_free(gf_xdr_request, &req);

    unsigned int size = 32;

    // To encode master boot time.
    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__, "calloc", size);
        outhdr.reqcode = ERROR_MEMORY;
        gf_sendback_header(fd, &outhdr);
        relaunch();
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, (void *)&gtimeJmStart, &outhdr, NULL, gf_xdr_long, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_message/gf_xdr_long");

        outhdr.reqcode = ERROR_XDR;
        gf_sendback_header(fd, &outhdr);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_load_update

/**
 * @brief socket handler: Handle CMD_JM_DEBUG request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode debug JM request
 * @param[in]   auth         #4: who issue the command
 *
 * @note        Only cluster admin or root could debug JM.
 */
void
handle_jm_debug(int fd, struct header *hdr, XDR *xdrs, struct auth *auth)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    struct request debugRequest;
    memset(&debugRequest, 0, sizeof(struct request));
    if (!gf_xdr_request(xdrs, &debugRequest, hdr, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_request");
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "Decode debug request failed.");
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (!is_clusteradmin(auth->user_name) && auth->uid != 0 ) {

        log_message(ERR, NOCLASS, "%s: uid <%d> not allowed to perform control operation",
                    __func__, auth->uid);
        snprintf(message, MAXMESSAGELENGTH, "%s: user[%s] has no permission to set debug.",
                 __func__, auth->user_name);
        outhdr.reqcode = ERROR_PERMISSION;
        gf_errorback(fd, &outhdr, message);

        return;
    }

    int reqCode=0, options=0, level=0, logclass=0;
    char *strFileName=NULL;
    unsigned int i;
    for (i=0; i<debugRequest.number; i++) {
        switch (debugRequest.keyvalues[i].key) {
        case REQUEST_KEY_REQUEST_CODE:
            reqCode = atoi(debugRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_OPTIONS:
            options = atoi(debugRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_LOGLEVEL:
            level = atoi(debugRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_LOGCLASS:
            logclass = atoi(debugRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_LOGFILE:
            strFileName = debugRequest.keyvalues[i].value;
            break;
        }
    }

    char logFileName[PATH_MAX+1];
    char jmLogDir[PATH_MAX+1];

    memset(logFileName, 0, sizeof(logFileName));
    memset(jmLogDir, 0, sizeof(jmLogDir));

    if (NULL != strFileName && '\0' != strFileName[0]) {

        char *dir;
        if (((dir = strrchr(strFileName, '/')) != NULL)
            || ((dir = strrchr(strFileName,'\\')) != NULL)) {

            dir++;
            snprintf(logFileName, PATH_MAX-1, "%s", dir);
            dir--;
            char tmp = *dir;
            *dir = '\0';
            snprintf(jmLogDir, PATH_MAX-1, "%s", strFileName);
            *dir = tmp;
        } else {
            snprintf(logFileName, PATH_MAX-1, "%s", strFileName);
            if (baseParams[BASE_LOGDIR].string_value
                && strlen(baseParams[BASE_LOGDIR].string_value) > 0) {

                snprintf(jmLogDir, PATH_MAX-1, "%s", baseParams[BASE_LOGDIR].string_value);
            } else {
                jmLogDir[0] = '\0';
            }
        }
    } else {
        snprintf(logFileName, PATH_MAX-1, "%s", "jm");
        if (baseParams[BASE_LOGDIR].string_value
            && strlen(baseParams[BASE_LOGDIR].string_value) > 0) {
            snprintf(jmLogDir, PATH_MAX-1, "%s", baseParams[BASE_LOGDIR].string_value);
        } else {
            jmLogDir[0] = '\0';
        }
    }

    if (1 == options) {
        struct config_param *plp;
        for (plp = jmParams; plp->param_name != NULL; plp++) {
            switch(plp->param_type) {
            case STRING_PARAM:
                FREEUP(plp->string_value);
                break;
            case INT_PARAM:
                plp->int_value = 0;
                break;
            case FLOAT_PARAM:
                plp->float_value = 0.0;
                break;
            case BOOL_PARAM:
                plp->bool_value = FALSE;
                break;
            }
        }

        if (init_config_param(jmParams, gstrEnvDir) < 0){
            log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "init_config_param");

            jm_die(DAEMON_QUIT_FATAL);
        }

        gf_set_logclass(baseParams[BASE_JM_DEBUG].string_value);

        close_log();

        char logfile[PATH_MAX+1];
        snprintf(logfile, PATH_MAX, "%s/js.%s.log",
                 baseParams[BASE_LOGDIR].string_value, gptrMyHost->name);
        open_log(logfile, baseParams[BASE_LOG_MASK].string_value,
                 baseParams[BASE_JM_TIME_DEBUG].int_value);
    } else if (JM_DEBUG == reqCode) {

        log_message(DEBUG, TRACER, 
                    "debug jm request: filename= %s: newclass=%x, level = %d",
                    logFileName, logclass, level);

        if (0 < logclass) {
            gf_set_intlogclass(logclass);
        }

        if (0 <= level){

            close_log();

            strncat(logFileName, ".", PATH_MAX-1);
            strncat(logFileName, gptrMyHost->name, PATH_MAX-1);

            char strLogLevel[20];
            loglevel2string(level, strLogLevel);

            char logfile[PATH_MAX+1];
            snprintf(logfile, PATH_MAX, "%s/%s.log", jmLogDir, logFileName);
            open_log(logfile, strLogLevel, baseParams[BASE_JM_TIME_DEBUG].int_value);
        }
    } else if (JM_TIMING == reqCode) {

        if (NULL != strFileName && '\0' != strFileName[0]) {

            close_log();

            strncat(logFileName, ".", PATH_MAX-1);
            strncat(logFileName, gptrMyHost->name, PATH_MAX-1);

            char logfile[PATH_MAX+1];
            snprintf(logfile, PATH_MAX, "%s/js.%s.log", jmLogDir, logFileName);
            open_log(logfile, baseParams[BASE_LOG_MASK].string_value, level);
        }

        if (0 < level) {
            gf_set_timinglog(level);
        }
    } else {

        snprintf(message, MAXMESSAGELENGTH, "%s: unknown debug code[%d].", __func__, reqCode);
        outhdr.reqcode = ERROR_ARGUMENT;
        gf_errorback(fd, &outhdr, message);
        return;
    }

    gf_sendback_header(fd, &outhdr);

    return;
} // end function handle_jm_debug

/**
 * @brief socket handler: Handle CMD_JM_SHUTDOWN request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode shutdown node request
 * @param[in]   auth         #4: who issue the command
 *
 * @note        Only cluster admin or root could shutdown JM.
 */
void
handle_shutdown(int fd, struct header *hdr, XDR *xdrs, struct auth *auth)
{
    struct header outhdr;
    char message[MAXMESSAGELENGTH+1];

    log_message(INFO, NOCLASS, "JM daemon shutting down...");

    gf_init_header(&outhdr);

    if (!is_clusteradmin(auth->user_name) && auth->uid != 0 ) {

        log_message(ERR, NOCLASS, "%s: uid <%d> not allowed to perform control operation",
                    __func__, auth->uid);

        outhdr.reqcode = ERROR_PERMISSION;
        snprintf(message, MAXMESSAGELENGTH, "%s: user[%s] has no permission to issue jm stop.",
                 __func__, auth->user_name);
        gf_errorback(fd, &outhdr, message);

        return;
    }

    gf_sendback_header2(fd, &outhdr);
    gintJmQuit = DAEMON_SHUTDOWN;

    return;
} // end function handle_shutdown

/**
 * @brief socket handler: Handle JM_JOB_SETUP request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode job setup request
 *
 * @note        Got job setup from child JM, update job execution information
 *              and job status to JS.
 */
void
handle_job_setup(int fd, struct header *hdr, XDR *xdrs)
{
    struct request jobRequest;
    memset(&jobRequest, 0, sizeof(struct request));
    if (!gf_xdr_request(xdrs, &jobRequest, hdr, NULL)) {
        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_request");
        return;
    }

    JOBID_T jobid=0;
    pid_t jobPid=0, jobPGid=0;
    uid_t jobExecGid=0, jobExecUid=0;
    int jobStatus = 0, pendReason=0, jobExecFlag=0, exitStatus=0;
    char *execUserName=NULL, *execCWD=NULL, *execHome=NULL;
    float cpuTime = 0.0;
    unsigned int i;
    for (i=0; i<jobRequest.number; i++) {
        switch (jobRequest.keyvalues[i].key) {
        case REQUEST_KEY_JOBIDS:
            jobid = atoll(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOBSTATUS:
            jobStatus = atoi(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_PENDREASON:
            pendReason = atoi(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOBPID:
            jobPid = atol(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_JOBPGID:
            jobPGid = atol(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_EXECUTE_GID:
            jobExecGid = atoi(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_EXECUTE_UID:
            jobExecUid = atoi(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_EXECUTE_FLAG:
            jobExecFlag = atoi(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_USERNAME:
            execUserName = jobRequest.keyvalues[i].value;
            break;
        case REQUEST_KEY_EXECUTE_CWD:
            execCWD = jobRequest.keyvalues[i].value;
            break;
        case REQUEST_KEY_EXECUTE_HOME:
            execHome = jobRequest.keyvalues[i].value;
            break;
        case REQUEST_KEY_CPUTIME:
            cpuTime = atof(jobRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_EXITSTATUS:
            exitStatus = atoi(jobRequest.keyvalues[i].value);
            break;
        }
    }

    struct jobEntry *job;
    bool_t found = FALSE;
    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->forw) {

        if (job->job_base.jobid != jobid) {
            continue;
        }

        found = TRUE;
        break;
    }

    struct header outhdr;
    gf_init_header(&outhdr);

    char strJobId[32];
    if (!found) {
        log_message(ERR, NOCLASS, "%s: job <%s> is not found in jm",
                    __func__, jobid_2string(jobid, strJobId));

        gf_xdr_free(gf_xdr_request, &jobRequest);

        outhdr.reqcode = ERROR_NO_JOB;
        gf_sendback_header2(fd, &outhdr);

        return;
    }

    if (job->job_base.act_pid) {

        gf_xdr_free(gf_xdr_request, &jobRequest);

        outhdr.reqcode = ERROR_JOB_STARTED;
        gf_sendback_header2(fd, &outhdr);

        return;
    }

    struct jobEntry oldJob;
    memcpy((char *)&oldJob, (char *)job, sizeof(struct jobEntry));

    job->execute_flag |= JOB_EXEC_QPRE_KNOWN;
    if (jobExecFlag & JOB_EXEC_QPRE_OK) {
        job->execute_flag |= JOB_EXEC_QPRE_OK;
    }

    job->job_base.job_pid = jobPid;
    job->job_base.job_pgid = jobPGid;
    job->job_base.execute_uid = jobExecUid;
    job->execute_gid = jobExecGid;

    if (NULL != execUserName) {
        FREEUP(job->job_base.execute_user);
        job->job_base.execute_user = copy_string(execUserName);
    }

    if (NULL != execCWD) {
        FREEUP(job->job_base.execute_cwd);
        job->job_base.execute_cwd = copy_string(execCWD);
    }

    if (NULL != execHome) {
        FREEUP(job->job_base.execute_home);
        job->job_base.execute_home = copy_string(execHome);
    }

    if (jobStatus & JOB_STATUS_RUN) {

        if (!(jobStatus & JOB_STATUS_PREJOB)) {
            job->job_base.status &= ~JOB_STATUS_PREJOB;
        }

	if (status_job(BATCH_STATUS_JOB, job, job->job_base.status, 0) < 0) {

            gf_xdr_free(gf_xdr_request, &jobRequest);
            memcpy((char *)job, (char *)&oldJob, sizeof(struct jobEntry));

            outhdr.reqcode = ERROR_COMMUNICATE;
            gf_sendback_header2(fd, &outhdr);

            return;
        }

        job->execute_flag |= JOB_EXEC_STARTED;
    } else {
        job->job_base.reasons = pendReason;
        job->execute_flag |= JOB_EXEC_FINISH;
        job->comm_failcnt = 0;
        job->update_usage = FALSE;
        job->job_base.status = jobStatus;
        job->w_status = exitStatus;

        job->cpu_time = cpuTime;

        if (job_finish(job) < 0) {

            gf_xdr_free(gf_xdr_request, &jobRequest);
            memcpy((char *)job, (char *)&oldJob, sizeof(struct jobEntry));

            outhdr.reqcode = ERROR_JOB_FINISH;
            gf_sendback_header2(fd, &outhdr);

            return;
        }
    }

    if (gf_sendback_header2(fd, &outhdr) < 0) {
        log_message(ERR, NOCLASS, "%s: reply header failed for job <%s>",
                    __func__, jobid_2string(jobid, strJobId));
    }

    log_message(DEBUG, EXECUTE, "%s: JobId %s jstatus %d reason %x  job_pid %d job_pgid %d uid %d execute_gid <%d> execute_user <%s> execute_home <%s> execute_cwd <%s> execute_flag %x cputime %.2f w_status %d",
                __func__, jobid_2string(jobid, strJobId), jobStatus, pendReason,
                jobPid, jobPGid, jobExecUid, jobExecGid,
                execUserName, execHome, execCWD,
                jobExecFlag, cpuTime, exitStatus);

    gf_xdr_free(gf_xdr_request, &jobRequest);

    return;
} // end function handle_job_setup

/**
 * @brief socket handler: Handle JM_GET_INFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query quickpool information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the quickpool information.
 */
void
handle_info_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char message[MAXMESSAGELENGTH];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster){
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH,
                 "%s: send quickpool information request to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    unsigned int size;
    size = sizeof(struct quickpoolInfo);
    size += gptrSharedConf->qpinfo->num_resources * sizeof(struct resource_definition);
    unsigned int i;
    for (i=0; i<gptrSharedConf->qpinfo->num_resources; i++) {
        size += strlen(gptrSharedConf->qpinfo->resource_table[i].name) + 1 + XDR_OFFSET;
        size += strlen(gptrSharedConf->qpinfo->resource_table[i].des) + 1 + XDR_OFFSET;
    }

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {
        snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%d) reply buf failed due to %m.",
                 __func__, size);
        outhdr.reqcode = ERROR_MEMORY;
        gf_errorback(fd, &outhdr, message);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, gptrSharedConf->qpinfo, &outhdr, NULL, libxdr_quickpoolinfo, NULL)) {
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: encode quickpool information failed.", __func__);
        gf_errorback(fd, &outhdr, message);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);
    return;
} // end function handle_info_req

/**
 * @brief socket handler: Handle JM_GET_CLUSINFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query cluster information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the cluster information.
 */
void
handle_clusterinfo_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    struct QuickPoolClusterReply clusterInfoReply;
    memset(&clusterInfoReply, 0, sizeof(struct QuickPoolClusterReply));

    struct request cInfoReq;
    memset(&cInfoReq, 0, sizeof(struct request));
    if (!gf_xdr_request(xdrs, &cInfoReq, hdr, NULL)) {
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: decode cluster request failed.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (!gboolMaster){
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH,
                 "%s: send cluster information request to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);

        gf_xdr_free(gf_xdr_request, (void *) &cInfoReq);
        return;
    }

    gf_xdr_free(gf_xdr_request, (void *) &cInfoReq);

    clusterInfoReply.num_clusters = 1;
    clusterInfoReply.clusters = (struct QuickPoolClusterInfo *)calloc(1, sizeof(struct QuickPoolClusterInfo));
    if (NULL == clusterInfoReply.clusters) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", sizeof(struct QuickPoolClusterInfo));
        outhdr.reqcode = ERROR_MEMORY;
        snprintf(message, MAXMESSAGELENGTH, "%s: not enough memory to handle the request.",
                 __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    strcpy(clusterInfoReply.clusters[0].cluster_name, gptrMyCluster->name);
    if ((gptrMyCluster->status & CLUST_INFO_AVAIL)
        && (gboolMaster || (!gboolMaster && gptrMyCluster->boss_node != NULL))) {
        clusterInfoReply.clusters[0].status = CLUST_STAT_OK;
        strcpy(clusterInfoReply.clusters[0].master_name, gptrMyCluster->boss_node->name);
        strcpy(clusterInfoReply.clusters[0].manager_name, gptrMyCluster->main_admin);
        clusterInfoReply.clusters[0].manager_id = gptrMyCluster->main_adminid;
        clusterInfoReply.clusters[0].num_servers = HASH_NUM_ENTS(gptrMyCluster->node_table)-gptrMyCluster->num_clients;
        clusterInfoReply.clusters[0].num_clients = gptrMyCluster->num_clients;
        clusterInfoReply.clusters[0].num_admins = gptrMyCluster->num_admins;
        clusterInfoReply.clusters[0].admin_ids = gptrMyCluster->adminids;
        clusterInfoReply.clusters[0].admins = gptrMyCluster->admins;

        clusterInfoReply.clusters[0].num_resources = gptrMyCluster->num_numeric_resources;
        clusterInfoReply.clusters[0].resource_names = gptrMyCluster->numeric_resource_names;

        clusterInfoReply.clusters[0].host_types = (char **)calloc(gptrSharedConf->qpinfo->num_types,
                                                                  sizeof(char *));
        if (NULL == clusterInfoReply.clusters[0].host_types) {
            log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                        "calloc", gptrSharedConf->qpinfo->num_types*sizeof(char *));
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(message, MAXMESSAGELENGTH, "%s: not enough memory to handle the request.",
                     __func__);
            gf_errorback(fd, &outhdr, message);
            return;
        }

        unsigned int i, j;
        for (i=0,j=0; i<gptrSharedConf->qpinfo->num_types; i++) {
            int isSet;

            TEST_BIT(i, gptrMyCluster->type_bitmap, isSet);
            if (isSet == 1) {
                clusterInfoReply.clusters[0].host_types[j++] = gptrSharedConf->qpinfo->host_types[i];
            }
        }
        clusterInfoReply.clusters[0].num_types = j;

        clusterInfoReply.clusters[0].host_models = (char **)calloc(gptrSharedConf->qpinfo->num_models,
                                                                   sizeof(char *));
        if (NULL == clusterInfoReply.clusters[0].host_models) {
            log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                        "calloc", gptrSharedConf->qpinfo->num_models*sizeof(char *));
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(message, MAXMESSAGELENGTH, "%s: not enough memory to handle the request.",
                     __func__);
            gf_errorback(fd, &outhdr, message);
            return;
        }

        for (i=0,j=0; i<gptrSharedConf->qpinfo->num_models; i++) {
            int isSet;

            TEST_BIT(i, gptrMyCluster->model_bitmap, isSet);
            if (isSet == 1) {
                clusterInfoReply.clusters[0].host_models[j++] = gptrSharedConf->qpinfo->host_models[i];
            }
        }
        clusterInfoReply.clusters[0].num_models = j;

    } else {
        clusterInfoReply.clusters[0].status = CLUST_STAT_UNAVAIL;
        clusterInfoReply.clusters[0].master_name[0] = '\0';
        clusterInfoReply.clusters[0].manager_name[0] = '\0';
    }

    char *replyBuf;
    replyBuf = (char *)calloc(MAXMESSAGELENGTH*2, sizeof(char));
    if (NULL == replyBuf) {
        snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%d) reply buf failed due to %m.",
                 __func__, MAXMESSAGELENGTH*2);
        outhdr.reqcode = ERROR_MEMORY;
        gf_errorback(fd, &outhdr, message);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, MAXMESSAGELENGTH*2, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, (char *)&clusterInfoReply, &outhdr, NULL,
                        libxdr_clusterinfo_reply, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_message(libxdr_clusterinfo_reply)");
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: encode cluster reply failed.", __func__);
        gf_errorback(fd, &outhdr, message);

        if (clusterInfoReply.clusters != NULL) {
            FREEUP(clusterInfoReply.clusters);
        }
        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);

        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);
    
    if (clusterInfoReply.clusters != NULL) {
        unsigned int i;

        for (i=0; i<clusterInfoReply.num_clusters; i++) {
            FREEUP(clusterInfoReply.clusters[i].host_types);
            FREEUP(clusterInfoReply.clusters[i].host_models);
        }

        FREEUP(clusterInfoReply.clusters);
    }

    xdr_destroy(&xdrs2);
    
    return;
} // end function handle_clusterinfo_req

/**
 * @brief socket handler: Handle JM_GET_CLUSTERNAME request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query cluster name request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the cluster name.
 */
void
handle_clustername_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster){
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH, "%s: send cluster name request to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    unsigned int size;
    size = strlen(gptrMyCluster->name) + sizeof(int) + HEADER_LEN + 5 + XDR_OFFSET;

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {
        snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%d) reply buf failed due to %m.", __func__, size);
        outhdr.reqcode = ERROR_MEMORY;
        gf_errorback(fd, &outhdr, message);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, &(gptrMyCluster->name), &outhdr, NULL, gf_xdr_string, NULL)) {

        log_message(ERR, NOCLASS, "%s: failed to encode cluster name <%s>",
	            __func__, gptrMyCluster->name);
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: encode cluster name failed.", __func__);
        gf_errorback(fd, &outhdr, message);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_clustername_req

/**
 * @brief socket handler: Handle JM_GET_MASTINFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query master information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the cluster name.
 */
void
handle_masterinfo_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster){
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH, "%s: send master name request to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    struct hostNode *masterNode;
    masterNode = gptrMyCluster->boss_node;

    unsigned int size;
    size = strlen(masterNode->name) + sizeof(int) + HEADER_LEN + 5 + XDR_OFFSET;

    char *replyBuf;
    replyBuf = (char*)calloc(size, sizeof(char));
    if (NULL == replyBuf) {
        snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%d) reply buf failed due to %m.",
                 __func__, size);
        outhdr.reqcode = ERROR_MEMORY;
        gf_errorback(fd, &outhdr, message);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, &(masterNode->name), &outhdr, NULL, gf_xdr_string, NULL)) {

        log_message(ERR, NOCLASS, "%s: failed encode master host name[%s]",
                    __func__, masterNode->name);
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: encode master name failed.", __func__);
        gf_errorback(fd, &outhdr, message);

        xdr_destroy(&xdrs2);
        FREEUP(replyBuf);
        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_masterinfo_req

/**
 * @brief socket handler: Handle JM_GET_HOSTINFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query node information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the host node information.
 */
void 
handle_hostinfo_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster){
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH, "%s: send master name request to a wrong node <%s>.",
                 __func__, gptrMyHost->name);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    struct request hostInfoRequest;
    memset(&hostInfoRequest, 0, sizeof(struct request));

    struct QuickPoolHostReply hostReply;
    memset(&hostReply, 0, sizeof(struct QuickPoolHostReply));

    if (!gf_xdr_request(xdrs, &hostInfoRequest, hdr, NULL)) {

        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: decode node info request failed.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    unsigned int i, options=0, num=0, numHosts=0;
    char *names, **preferredHosts = NULL, *strResReq=NULL;
    for (i=0; i<hostInfoRequest.number; i++) {
        switch (hostInfoRequest.keyvalues[i].key) {
        case REQUEST_KEY_OPTIONS:
            options = atoi(hostInfoRequest.keyvalues[i].value);
            break;
        case REQUEST_KEY_NUMBER:
            num = atoi(hostInfoRequest.keyvalues[i].value);
            if (num > 0) {
                preferredHosts = (char **)calloc(num, sizeof(char *));
                if (NULL == preferredHosts) {
                   snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%ld) preferred nodes failed due to %m.",
                            __func__, num*sizeof(char *));
                    outhdr.reqcode = ERROR_MEMORY;
                    gf_errorback(fd, &outhdr, message);
                    return;
                }
            }
            break;
        case REQUEST_KEY_NAMES:
        {
            names = hostInfoRequest.keyvalues[i].value;

            unsigned int len = strlen(names);
            char *host = (char *)calloc(len+1, sizeof(char));
            if (NULL == host) {
                snprintf(message, MAXMESSAGELENGTH, "%s: calloc(%d) nodes failed due to %m.",
                         __func__, len+1);
                outhdr.reqcode = ERROR_MEMORY;
                gf_errorback(fd, &outhdr, message);
                return;
            }

            char *token;
            while (NULL != (token = get_string_token(&names, " ", host, len+1))) {
                preferredHosts[numHosts++] = copy_string(token);
                len = strlen(names);

                if (numHosts >= num) {
                    break;
                }
            }
            names = hostInfoRequest.keyvalues[i].value;
            FREEUP(host);

            break;
        }
        case REQUEST_KEY_RESREQ:
            strResReq = hostInfoRequest.keyvalues[i].value;
            break;
        }
    }

    struct requirement req;
    memset(&req, 0, sizeof(struct requirement));

    hostReply.num_hosts = 0;
    hostReply.hosts = (struct QuickPoolHostInfo *)calloc(HASH_NUM_ENTS(gptrMyCluster->node_table),
                                                         sizeof(struct QuickPoolHostInfo));
    if (NULL == hostReply.hosts) {

        gf_xdr_free(gf_xdr_request, &hostInfoRequest);
        free_string_array(preferredHosts, numHosts);

        outhdr.reqcode = ERROR_MEMORY;
        snprintf(message, MAXMESSAGELENGTH,
                 "%s: not enough memory to handle the node info request.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (0 == numHosts || (1 == numHosts && 0 == strcmp(preferredHosts[0], gptrMyHost->name))) {
        struct QuickPoolHostInfo *host = &(hostReply.hosts[0]);

        host->host = gptrMyHost->name;

        host->ncpus = gptrMyHost->static_info.maxcpu;
        host->maxmem  = gptrMyHost->static_info.maxmem;
        host->maxswap = gptrMyHost->static_info.maxswap;
        host->maxtmp  = gptrMyHost->static_info.maxtmp;
        host->res_bitmap = gptrMyHost->res_bitmap;

        host->host_type  = gptrSharedConf->qpinfo->host_types[gptrMyHost->type_index];
        host->host_model = gptrSharedConf->qpinfo->host_models[gptrMyHost->model_index];
        host->server = gptrMyHost->server;

        host->load_threshold = gptrMyHost->busy_threshold;
        host->flags = HOSTINFO_JM_INFO;

        host->max_jobs = gptrMyHost->max_jobs;
        host->num_GPU = gptrMyHost->num_GPU;
        host->gpuinfo_static = gptrMyHost->gpuinfo_static;
        host->numa = gptrMyHost->static_info.numa;

        hostReply.num_hosts ++;
    }

    struct hostNode *fromHost;
    if (1 == numHosts && 0 == strcmp(preferredHosts[0], gptrMyHost->name)) {
        goto sendback;
    }

    fromHost = find_node_byaddr(from, __func__);

    if (NULL != strResReq) {

        struct Tcl_Node tclNode;
        memset(&tclNode, 0, sizeof(struct Tcl_Node));
        set_tclnode(&tclNode, gptrMyHost, fromHost, TRUE);
        tclNode.ignDedicatedResource = TRUE;

        int ret;
        ret = set_requirement(strResReq, gptrSharedConf->qpinfo, REQ_SELECT, &req);
        if (0 != ret || evaluate(req.select, &tclNode, options & DFT_FROMTYPE) < 0) {

            if (ret == PARSE_BAD_VAL) {
                outhdr.reqcode = ERROR_PARSE_VALUE;
                snprintf(message, MAXMESSAGELENGTH,
                         "%s: bad value in select section.", __func__);
            } else if (ret == PARSE_BAD_NAME) {
                outhdr.reqcode = ERROR_PARSE_RESOURCE;
                snprintf(message, MAXMESSAGELENGTH,
                         "%s: bad resource name in selection section.", __func__);
            } else {
                outhdr.reqcode = ERROR_PARSE_REQUIREMENT;
                snprintf(message, MAXMESSAGELENGTH,
                         "%s: sytax error in select section.", __func__);
            }

            gf_hash_free(tclNode.respair_table, free);

            gf_xdr_free(gf_xdr_request, &hostInfoRequest);
            free_requirement(&req);
            free_string_array(preferredHosts, numHosts);

            gf_errorback(fd, &outhdr, message);

            return;
        }

        gf_hash_free(tclNode.respair_table, free);
    }

    struct Tcl_Node tclNode;
    HASH_WALK_T walk;
    char *key;
    struct hostNode *node;
    gf_hash_walk_start(gptrMyCluster->node_table, &walk);
    while (NULL != (node = (struct hostNode *)gf_hash_walk(&walk, &key))) {

        if (NULL != req.val) {
            if (fabs(req.val[MEM]) < INFINIT_LOAD && req.val[MEM] > node->static_info.maxmem) {
                continue;
            }

            if (fabs(req.val[SWP]) < INFINIT_LOAD && req.val[SWP] > node->static_info.maxswap) {
                continue;
            }

            if (fabs(req.val[TMP]) < INFINIT_LOAD && req.val[TMP] > node->static_info.maxtmp) {
                continue;
            }

            memset(&tclNode, 0, sizeof(struct Tcl_Node));
            set_tclnode(&tclNode, node, fromHost, FALSE);
            if (evaluate(req.select, &tclNode, options & DFT_FROMTYPE) != 1) {
                gf_hash_free(tclNode.respair_table, free);
                continue;
            }

            gf_hash_free(tclNode.respair_table, free);
        }

        for (i = 0; i < numHosts; i++) {
            if (issame_host(node->name, preferredHosts[i])) {
                break;
            }
        }

        if (i == numHosts && numHosts > 0) {
            continue;
        } else if (0 == numHosts && issame_host(node->name, gptrMyHost->name)) {
            continue;
        }

        struct QuickPoolHostInfo *host;
        host = &(hostReply.hosts[hostReply.num_hosts]);
        host->ncpus = node->static_info.maxcpu;
        host->maxmem  = node->static_info.maxmem;
        host->maxswap = node->static_info.maxswap;
        host->maxtmp  = node->static_info.maxtmp;
        host->res_bitmap = node->res_bitmap;

        host->host_type  = gptrSharedConf->qpinfo->host_types[node->type_index];
        host->host_model = gptrSharedConf->qpinfo->host_models[node->model_index];
        host->host = node->name;
        host->server = node->server;

        host->load_threshold = node->busy_threshold;

        host->flags = HOSTINFO_JM_INFO;
        host->max_jobs = node->max_jobs;

        host->num_GPU = node->num_GPU;
        host->gpuinfo_static = node->gpuinfo_static;
        host->numa = node->static_info.numa;

        hostReply.num_hosts ++;
    }
    gf_hash_walk_end(&walk);

 sendback:
    free_requirement(&req);
    free_string_array(preferredHosts, numHosts);

    if (0 == hostReply.num_hosts) {
        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(hostReply.error_message, MAXMESSAGELENGTH, "None of host[%s] find.",names);
        gf_errorback(fd, &outhdr, hostReply.error_message);

        gf_xdr_free(gf_xdr_request, &hostInfoRequest);
        FREEUP(hostReply.hosts);
        return;
    }
    
    gf_xdr_free(gf_xdr_request, &hostInfoRequest);

    unsigned int size;
    size = sizeof_host_reply(&hostReply);

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        snprintf(hostReply.error_message, MAXMESSAGELENGTH,
                 "%s: calloc(%d) reply buf failed due to %m.", __func__, size);
        gf_errorback(fd, &outhdr, hostReply.error_message);

        FREEUP(hostReply.hosts);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, &hostReply, &outhdr, NULL, libxdr_host_reply,
                        &gptrSharedConf->qpinfo->num_index)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "libxdr_host_reply");
        outhdr.reqcode = ERROR_XDR;
        snprintf(hostReply.error_message, MAXMESSAGELENGTH,
                 "%s: gf_xdr_message(libxdr_host_reply) failed.", __func__);
        gf_errorback(fd, &outhdr, hostReply.error_message);

        xdr_destroy(&xdrs2);
        FREEUP(hostReply.hosts);

        return;
    }

    FREEUP(hostReply.hosts);

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_hostinfo_req

/**
 * @brief socket handler: Handle JM_LOAD_REQ request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query node information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the host node information.
 */
void
handle_load_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    struct QuickPoolHostLoadReply reply;
    memset(&reply, 0, sizeof(struct QuickPoolHostLoadReply));

    struct header outhdr;
    gf_init_header(&outhdr);

    struct request loadReq;
    if (!gf_xdr_request(xdrs, &loadReq, hdr, NULL)) {

        outhdr.reqcode = ERROR_XDR;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "Decode decision request xdr error.");
        gf_errorback(fd, &outhdr, reply.error_message);
        return;
    }

    unsigned int i, options=0, num=0, numHosts=0, numNeed=0;
    char *names, **preferredHosts = NULL, *strResReq=(char *)"-";
    for (i=0; i<loadReq.number; i++) {
        switch (loadReq.keyvalues[i].key) {
        case REQUEST_KEY_OPTIONS:
            options = atoi(loadReq.keyvalues[i].value);
            break;
        case REQUEST_KEY_NUMBER:
            num = atoi(loadReq.keyvalues[i].value);
            if (num > 0) {
                preferredHosts = (char **)calloc(num, sizeof(char *));
                if (NULL == preferredHosts) {
                    snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) preferred nodes failed due to %m.",
                             __func__, num*sizeof(char *));
                    outhdr.reqcode = ERROR_MEMORY;
                    gf_errorback(fd, &outhdr, reply.error_message);
                    return;
                }
            }
            break;
        case REQUEST_KEY_NAMES:
        {
            names = loadReq.keyvalues[i].value;

            unsigned int len = strlen(names);
            char *host = (char *)calloc(len+1, sizeof(char));
            if (NULL == host) {
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%d) preferred nodes failed due to %m.",
                         __func__, len+1);
                outhdr.reqcode = ERROR_MEMORY;
                gf_errorback(fd, &outhdr, reply.error_message);
                return;
            }

            char *token;
            while (NULL != (token = get_string_token(&names, " ", host, len+1))) {
                preferredHosts[numHosts++] = copy_string(token);
                len = strlen(names);

                if (numHosts >= num) {
                    break;
                }
            }
            FREEUP(host);
            break;
        }
        case REQUEST_KEY_NUMBER_NEED:
            numNeed = atoi(loadReq.keyvalues[i].value);
            break;
        case REQUEST_KEY_RESREQ:
            strResReq = loadReq.keyvalues[i].value;
            break;
        }
    }

    struct requirement req;
    memset(&req, 0, sizeof(struct requirement));

    int propt;
    propt = REQ_SELECT | REQ_ORDER | REQ_FILTER;
    if (options & DFT_FROMTYPE)
        propt |= REQ_TYPE;

    sptrLoadReqNode = find_node_byaddr(from, __func__);
    if (NULL == sptrLoadReqNode) {
        outhdr.reqcode = ERROR_NO_HOST;

        char addrbuf[24+1];
        snprintf(reply.error_message, MAXMESSAGELENGTH, "Load request come from an unknown Host[%s].",
                 gf_sockaddr_str(from, addrbuf));
        gf_errorback(fd, &outhdr, reply.error_message);

        gf_xdr_free(gf_xdr_request, &loadReq);

        return;
    }

    struct Tcl_Node tclNode;
    memset(&tclNode, 0, sizeof(struct Tcl_Node));
    set_tclnode(&tclNode, gptrMyHost, sptrLoadReqNode, TRUE);
    tclNode.ignDedicatedResource = FALSE;

    int ret = set_requirement(strResReq, gptrSharedConf->qpinfo, propt, &req);
    if (0 != ret || (evaluate(req.select, &tclNode, options & DFT_FROMTYPE) < 0)) {
        if (ret == PARSE_BAD_VAL) {
            outhdr.reqcode = ERROR_PARSE_VALUE;
            snprintf(reply.error_message, MAXMESSAGELENGTH,
                     "Bad resource requirement: resource value is not valid.");
        } else if (ret == PARSE_BAD_NAME) {
            outhdr.reqcode = ERROR_PARSE_RESOURCE;
            snprintf(reply.error_message, MAXMESSAGELENGTH,
                     "Bad resource requirement: resource key is not valid.");
        } else if (ret == PARSE_BAD_FILTER) {
            outhdr.reqcode = ERROR_PARSE_FILTER;
            snprintf(reply.error_message, MAXMESSAGELENGTH,
                     "Bad resource requirement: resource filter is not valid.");
        } else {
            outhdr.reqcode = ERROR_PARSE_REQUIREMENT;
            snprintf(reply.error_message, MAXMESSAGELENGTH,
                     "Bad resource requirement: cannot evaluate.");
        }
        gf_errorback(fd, &outhdr, reply.error_message);

        free_requirement(&req);
        gf_hash_free(tclNode.respair_table, free);
        gf_xdr_free(gf_xdr_request, &loadReq);

        return;
    }
    gf_hash_free(tclNode.respair_table, free);

    struct hostNode **candidates=NULL;
    unsigned int ncandidates=0;

    memset(&reply, 0, sizeof(struct QuickPoolHostLoadReply));
    if (1 == numHosts && 0 == strcmp(preferredHosts[0], gptrMyHost->name)) {

        reply.num_hosts = 1;
        reply.num_index = req.num_index;

        reply.host_load = (struct QuickPoolHostLoad *)calloc(reply.num_hosts,
                                                             sizeof(struct QuickPoolHostLoad));
        if (NULL == reply.host_load) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, reply.num_hosts*sizeof(struct QuickPoolHostLoad));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);

            return;
        }

        unsigned int j;

        reply.indicies = (char **) calloc(req.num_index, sizeof(char *));
        if (NULL == reply.indicies) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, req.num_index*sizeof(char *));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);

            return;
        }

        for (j = 0; j < req.num_index; j++) {
            int k;
            k = req.indicies[j];
            reply.indicies[j] = copy_string(gptrSharedConf->qpinfo->resource_table[k].name);
        }

        reply.host_load[0].host_name = copy_string(gptrMyHost->name);
        reply.host_load[0].status = (int *)calloc(1 + GET_INTNUM(reply.num_index), sizeof(int));
        if (NULL == reply.host_load[0].status) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, (1 + GET_INTNUM(reply.num_index))*sizeof(int));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);

            return;
        }

        reply.host_load[0].load_values = (double *)calloc(reply.num_index, sizeof(double));
        if (NULL == reply.host_load[0].load_values) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, reply.num_index*sizeof(double));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);

            return;
        }

        reply.host_load[0].status[0] = gptrMyHost->status[0];

        if (0 < gptrMyHost->num_GPU && NULL != gptrMyHost->gpuinfo_dynamic) {

            reply.host_load[0].gpu_load = (struct QuickPoolHostGPUload *)calloc(1, sizeof(struct QuickPoolHostGPUload));
            if (NULL == reply.host_load[0].gpu_load) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, sizeof(struct QuickPoolHostGPUload));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->avail_gpu_mem = (double *)calloc(gptrMyHost->num_GPU,
                                                                          sizeof(double));
            if (NULL == reply.host_load[0].gpu_load->avail_gpu_mem) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(double));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->gpu_id = (int *)calloc(gptrMyHost->num_GPU, sizeof(int));
            if (NULL == reply.host_load[0].gpu_load->gpu_id) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->status = (char **)calloc(gptrMyHost->num_GPU, sizeof(char *));
            if (NULL == reply.host_load[0].gpu_load->status) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(char *));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->gpu_mode = (int *)calloc(gptrMyHost->num_GPU, sizeof(int));
            if (NULL == reply.host_load[0].gpu_load->gpu_mode) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->gpu_temperature = (int *)calloc(gptrMyHost->num_GPU,
                                                                         sizeof(int));
            if (NULL == reply.host_load[0].gpu_load->gpu_temperature) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            reply.host_load[0].gpu_load->gpu_ut = (float *)calloc(gptrMyHost->num_GPU, sizeof(float));
            if (NULL == reply.host_load[0].gpu_load->gpu_ut) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(float));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);

               return;
            }

            unsigned int n;
            for (n = 0; n < gptrMyHost->num_GPU; n++) {
                reply.host_load[0].gpu_load->avail_gpu_mem[n] = gptrMyHost->gpuinfo_dynamic[n].avail_gpu_mem;
                reply.host_load[0].gpu_load->status[n] = copy_string(gptrMyHost->gpuinfo_dynamic[n].status);
                reply.host_load[0].gpu_load->gpu_id[n] = gptrMyHost->gpuinfo_dynamic[n].gpu_id;
                reply.host_load[0].gpu_load->gpu_mode[n] = gptrMyHost->gpuinfo_dynamic[n].gpu_mode;
                reply.host_load[0].gpu_load->gpu_ut[n] = gptrMyHost->gpuinfo_dynamic[n].gpu_ut;
                reply.host_load[0].gpu_load->gpu_temperature[n] = gptrMyHost->gpuinfo_dynamic[n].gpu_temperature;
                
            }
        }
        
        if (STATUS_ISUNAVAIL(gptrMyHost->status)) {

            for (j=0; j < req.num_index; j++)
                reply.host_load[0].load_values[j] = INFINIT_LOAD;
            for (j=0; j < GET_INTNUM(req.num_index); j++)
                reply.host_load[0].status[j + 1] = 0;

            goto sendback;
        }

        for (j = 0; j < reply.num_index; j++) {
            int indx;

            indx =  req.indicies[j];
            if (NODE_ISBUSYON(gptrMyHost->status, indx)) {
                SET_BIT(INTEGER_BITS + j, reply.host_load[0].status);
            }

            reply.host_load[0].load_values[j] = gptrMyHost->load[indx];
        }

        goto sendback;
    }

    if (!gboolMaster) {

        gf_xdr_free(gf_xdr_request, &loadReq);
        free_string_array(preferredHosts, numHosts);

        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "[%s]: I am not the quickpool master.",
                 gptrMyHost->name);
        gf_errorback(fd, &outhdr, reply.error_message);
        return;
    }

    candidates = (struct hostNode **)calloc(HASH_NUM_ENTS(gptrMyCluster->node_table),
                                            sizeof(struct hostNode *));
    if (NULL == candidates) {

        gf_xdr_free(gf_xdr_request, &loadReq);
        free_string_array(preferredHosts, numHosts);

        outhdr.reqcode = ERROR_MEMORY;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "Not enough memoery to handle load request.");
        gf_errorback(fd, &outhdr, reply.error_message);
        return;
    }

    HASH_WALK_T walk;
    char *key;
    gf_hash_walk_start(gptrMyCluster->node_table, &walk);
    struct hostNode *host;
    while (NULL != (host = (struct hostNode *)gf_hash_walk(&walk, &key))) {

        if (host->server == CLIENT) {
            continue;
        }

        if ((fabs(req.val[MEM]) < INFINIT_LOAD) && (req.val[MEM] > host->static_info.maxmem)) {
            continue;
        }

        if ((fabs(req.val[SWP]) < INFINIT_LOAD) && (req.val[SWP] > host->static_info.maxswap)) {
            continue;
        }

        if ((fabs(req.val[TMP]) < INFINIT_LOAD) && (req.val[TMP] > host->static_info.maxtmp)) {
            continue;
        }

        for (i = 0; i < numHosts; i++) {
            if (issame_host(host->name, preferredHosts[i])) {
                break;
            }
        }

        if (i == numHosts && numHosts > 0) {
            continue;
        }

        memset(&tclNode, 0, sizeof(struct Tcl_Node));
        set_tclnode(&tclNode, host, sptrLoadReqNode, FALSE);
        if (evaluate(req.select, &tclNode, options & DFT_FROMTYPE) != 1) {
            gf_hash_free(tclNode.respair_table, free);
            continue;
        }
        gf_hash_free(tclNode.respair_table, free);

        candidates[ncandidates++] = host;
        if (0 < numNeed && ncandidates >= numNeed) {
            break;
        }
    }
    gf_hash_walk_end(&walk);

    if (ncandidates <= 0 ) {
        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "No valid host is found in my cluster.");
        gf_errorback(fd, &outhdr, reply.error_message);

        free_requirement(&req);
        gf_xdr_free(gf_xdr_request, &loadReq);
        free_string_array(preferredHosts, numHosts);
        FREEUP(candidates);

        return;
    }

    if (numHosts > 0 && ncandidates < numHosts) {
        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(reply.error_message, MAXMESSAGELENGTH,
                 "Not all request host are found in my cluster.");
        gf_errorback(fd, &outhdr, reply.error_message);

        free_requirement(&req);
        gf_xdr_free(gf_xdr_request, &loadReq);
        free_string_array(preferredHosts, numHosts);
        FREEUP(candidates);

        return;
    }

    sort_hosts(&req, numHosts, preferredHosts, ncandidates, candidates, options);
    free_string_array(preferredHosts, numHosts);

    reply.num_hosts = numHosts > 0 ? numHosts : ncandidates;
    reply.num_index = req.num_index;

    reply.host_load = (struct QuickPoolHostLoad *)calloc(reply.num_hosts,
                                                         sizeof(struct QuickPoolHostLoad));
    if (NULL == reply.host_load) {
        outhdr.reqcode = ERROR_MEMORY;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%d) failed: %m",
                 __func__, reply.num_hosts);
        gf_errorback(fd, &outhdr, reply.error_message);

        free_requirement(&req);
        gf_xdr_free(gf_xdr_request, &loadReq);
        FREEUP(candidates);

        return;
    }

    reply.indicies=(char **)calloc(gptrSharedConf->qpinfo->num_index, sizeof(char *));
    if (NULL == reply.indicies) {
        outhdr.reqcode = ERROR_MEMORY;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%d) failed: %m",
                 __func__, gptrSharedConf->qpinfo->num_index);
        gf_errorback(fd, &outhdr, reply.error_message);

        free_requirement(&req);
        gf_xdr_free(gf_xdr_request, &loadReq);
        FREEUP(candidates);

        return;
    }

    unsigned int j;
    for (j = 0; j < req.num_index; j++) {
        int k;
        k = req.indicies[j];
        reply.indicies[j] = copy_string(gptrSharedConf->qpinfo->resource_table[k].name);
    }

    for (i = 0; i < reply.num_hosts; i++) {

        reply.host_load[i].host_name = copy_string(candidates[i]->name);
        reply.host_load[i].status = (int *)calloc(1 + GET_INTNUM(reply.num_index), sizeof(int));
        if (NULL == reply.host_load[i].status) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, (1 + GET_INTNUM(reply.num_index))*sizeof(int));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);
            FREEUP(candidates);

            return;
        }

        reply.host_load[i].load_values = (double *)calloc(reply.num_index, sizeof(double));
        if (NULL == reply.host_load[i].load_values) {
            outhdr.reqcode = ERROR_MEMORY;
            snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                     __func__, reply.num_index*sizeof(double));
            gf_errorback(fd, &outhdr, reply.error_message);

            free_requirement(&req);
            gf_xdr_free(gf_xdr_request, &loadReq);
            FREEUP(candidates);

            return;
        }

        reply.host_load[i].status[0] = candidates[i]->status[0];
        if (STATUS_ISUNAVAIL(candidates[i]->status)) {

            for (j=0; j < req.num_index; j++) {
                reply.host_load[i].load_values[j] = INFINIT_LOAD;
            }

            for (j=0; j < GET_INTNUM(req.num_index); j++) {
                reply.host_load[i].status[j + 1] = 0;
            }

            continue;
        }

        for (j = 0; j < reply.num_index; j++) {
            int indx;

            indx =  req.indicies[j];
            if (NODE_ISBUSYON(candidates[i]->status, indx)) {
                SET_BIT(INTEGER_BITS + j, reply.host_load[i].status);
            }

            reply.host_load[i].load_values[j] = candidates[i]->load[indx];
        }

        if (0 < candidates[i]->num_GPU && NULL != candidates[i]->gpuinfo_dynamic) {

            reply.host_load[i].gpu_load = (struct QuickPoolHostGPUload *)calloc(1, sizeof(struct QuickPoolHostGPUload));
            if (NULL == reply.host_load[i].gpu_load) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, sizeof(struct QuickPoolHostGPUload));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->avail_gpu_mem = (double *)calloc(candidates[i]->num_GPU,
                                                                          sizeof(double));
            if (NULL == reply.host_load[i].gpu_load->avail_gpu_mem) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(double));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->status = (char **)calloc(candidates[i]->num_GPU,
                                                                  sizeof(char *));
            if (NULL == reply.host_load[i].gpu_load->status) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(char *));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->gpu_id = (int *)calloc(candidates[i]->num_GPU, sizeof(int));
            if (NULL == reply.host_load[i].gpu_load->gpu_id) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->gpu_mode = (int *)calloc(candidates[i]->num_GPU, sizeof(int));
            if (NULL == reply.host_load[i].gpu_load->gpu_mode) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->gpu_temperature = (int *)calloc(candidates[i]->num_GPU,
                                                                         sizeof(int));
            if (NULL == reply.host_load[i].gpu_load->gpu_temperature) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(int));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->gpu_ut = (float *)calloc(candidates[i]->num_GPU,
                                                                  sizeof(float));
            if (NULL == reply.host_load[i].gpu_load->gpu_ut) {
                outhdr.reqcode = ERROR_MEMORY;
                snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%ld) failed: %m",
                         __func__, gptrMyHost->num_GPU*sizeof(float));
                gf_errorback(fd, &outhdr, reply.error_message);

                free_requirement(&req);
                gf_xdr_free(gf_xdr_request, &loadReq);
                FREEUP(candidates);

               return;
            }

            reply.host_load[i].gpu_load->num_GPU = candidates[i]->num_GPU;

            unsigned int n;
            for (n = 0; n < candidates[i]->num_GPU; n++) {
                reply.host_load[i].gpu_load->avail_gpu_mem[n] = candidates[i]->gpuinfo_dynamic[n].avail_gpu_mem;
                reply.host_load[i].gpu_load->status[n] = copy_string(candidates[i]->gpuinfo_dynamic[n].status);
                reply.host_load[i].gpu_load->gpu_id[n] = candidates[i]->gpuinfo_dynamic[n].gpu_id;
                reply.host_load[i].gpu_load->gpu_mode[n] = candidates[i]->gpuinfo_dynamic[n].gpu_mode;
                reply.host_load[i].gpu_load->gpu_ut[n] = candidates[i]->gpuinfo_dynamic[n].gpu_ut;
                reply.host_load[i].gpu_load->gpu_temperature[n] = candidates[i]->gpuinfo_dynamic[n].gpu_temperature;
            }
        }        
    }

 sendback:
    gf_xdr_free(gf_xdr_request, &loadReq);
    free_requirement(&req);
    FREEUP(candidates);

    unsigned int size;
    size = reply.num_hosts * ((reply.num_index + 1) * 20 + MAXNAMELEN);

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {

        outhdr.reqcode = ERROR_MEMORY;
        snprintf(reply.error_message, MAXMESSAGELENGTH, "%s: calloc(%d) failed: %m",
                 __func__, MAXMESSAGELENGTH);
        gf_errorback(fd, &outhdr, reply.error_message);

        gf_xdr_free(libxdr_load_reply, &reply);
        return;
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);
    if (!gf_xdr_message(&xdrs2, &reply, &outhdr, NULL, libxdr_load_reply, NULL)) {
        outhdr.reqcode = ERROR_XDR;
        snprintf(reply.error_message, MAXMESSAGELENGTH,
                 "%s: gf_xdr_message(libxdr_load_reply) failed.", __func__);
        gf_errorback(fd, &outhdr, reply.error_message);

        xdr_destroy(&xdrs2);
        gf_xdr_free(libxdr_load_reply, &reply);
        FREEUP(replyBuf);

        return;
    }
    gf_xdr_free(libxdr_load_reply, &reply);

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);

    return;
} // end function handle_load_req

/**
 * @brief socket handler: Handle JM_GET_RESOUINFO request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode query resource information request
 * @param[in]   from         #4: node who send me the request
 *
 * @note        Reply the cluster resource information.
 */
void
handle_resourceinfo_req(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from)
{
    struct QuickPoolResourceReply resourceInfoReply;
    memset(&resourceInfoReply, 0, sizeof(struct QuickPoolResourceReply));

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster) {

        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(resourceInfoReply.error_message, MAXMESSAGELENGTH, "I am not the quickpool master.");
    } else {
        struct request resourceRequest;
        memset(&resourceRequest, 0, sizeof(struct request));

        if (!gf_xdr_request(xdrs, &resourceRequest, hdr, NULL)) {

            log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_request");
            outhdr.reqcode = ERROR_XDR;
            snprintf(resourceInfoReply.error_message, MAXMESSAGELENGTH,
                     "Decode resource information request failed.");
        } else {
            outhdr.reqcode = filter_resources(&resourceRequest, &resourceInfoReply);
        }

        gf_xdr_free(gf_xdr_request, (void *)&resourceRequest);
    }

    unsigned int size = sizeof_resource_reply(&resourceInfoReply);

    char *replyBuf;
    replyBuf = (char *)calloc(size, sizeof(char));
    if (NULL == replyBuf) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
        outhdr.reqcode = ERROR_MEMORY;
        snprintf(resourceInfoReply.error_message, MAXMESSAGELENGTH,
                 "jm does not have enough memory to handle the request.");
    }

    XDR xdrs2;
    xdrmem_create(&xdrs2, replyBuf, size, XDR_ENCODE);

    if (!gf_xdr_message(&xdrs2, &resourceInfoReply, &outhdr, NULL, libxdr_resource_reply, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_message");
        FREEUP(replyBuf);
        xdr_destroy(&xdrs2);

        return;
    }

    gf_add_data(fd, replyBuf, XDR_GETPOS(&xdrs2));

    struct epoll_event ev;
    memset(&ev, 0, sizeof(struct epoll_event));
    ev.data.fd = fd;
    ev.events = EPOLLOUT | EPOLLET;
    epoll_ctl(gf_get_epfd(), EPOLL_CTL_MOD, fd, &ev);

    xdr_destroy(&xdrs2);
    gf_xdr_free(libxdr_resource_reply, &resourceInfoReply);

    return;
} // end function handle_resourceinfo_req

/**
 * @brief socket handler: Handle JM_ADD_HOST request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode addming dynamice node request
 * @param[in]   from         #4: node from where receive the request
 * @param[in]   auth         #5: who issue the command
 *
 * @note        Only cluster admin or root could add dynamic node.
 */
void
handle_add_dynamic(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from,
                   struct auth *auth)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster) {

        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH, "%s: I am not the master.", __func__);
        gf_errorback(fd, &outhdr, message);

        return;
    }

    if (!is_clusteradmin(auth->user_name) && auth->uid != 0 ) {

        log_message(ERR, NOCLASS, "%s: uid <%d> not allowed to perform control operation",
                    __func__, auth->uid);

        outhdr.reqcode = ERROR_PERMISSION;
        snprintf(message, MAXMESSAGELENGTH, "%s: user[%s] has no permission to add host.",
                 __func__, auth->user_name);
        gf_errorback(fd, &outhdr, message);

        return;
    }

    char addrbuf[24+1];

    struct request addnodeRequest;
    memset(&addnodeRequest, 0, sizeof(struct request));
    if (!gf_xdr_request(xdrs, &addnodeRequest, hdr, NULL)) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL, __func__, "gf_xdr_request");

        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: gf_xdr_request failed, request from %s.",
                 __func__, gf_sockaddr_str(from, addrbuf));
        gf_errorback(fd, &outhdr, message);
        return;
    }

    struct hostNode *node;
    unsigned int i;
    for (i=0; i<addnodeRequest.number; i++) {

        switch (addnodeRequest.keyvalues[i].key) {
        case REQUEST_KEY_HOSTNAME:

            node = (struct hostNode *)gf_hash_find(gptrMyCluster->node_table,
                                                   addnodeRequest.keyvalues[i].value);
            if (NULL != node) {

                log_message(WARNING, NOCLASS, "%s: trying to add already configured node %s from %s",
                            __func__, addnodeRequest.keyvalues[i].value,
                            gf_sockaddr_str(from, addrbuf));

                outhdr.reqcode = ERROR_HOST_DEFINED;
                snprintf(message, MAXMESSAGELENGTH,
                         "%s: node <%s> is already configured in the cluster.",
                         __func__, addnodeRequest.keyvalues[i].value);
                gf_errorback(fd, &outhdr, message);

                gf_xdr_free(gf_xdr_request, &addnodeRequest);
                return;
            }

            break;
        }
    }

    /* add the host */
    node = add_hostnode(&addnodeRequest, message);
    if (NULL == node) {

        outhdr.reqcode = quickpool_errno;
        gf_errorback(fd, &outhdr, message);

        gf_xdr_free(gf_xdr_request, &addnodeRequest);
        return;
    }

    log_addnode(&addnodeRequest);
    gf_xdr_free(gf_xdr_request, &addnodeRequest);

    /* mark the node as dynamic server */
    node->server = DYNAMIC_SERVER;

    gf_sendback_header(fd, &outhdr);

    return;
} // end function handle_add_dynamic

/**
 * @brief socket handler: Handle JM_RM_HOST request.
 *
 * @param[in]   fd           #1: socket fd
 * @param[in]   hdr          #2: XDR header
 * @param[in]   xdrs         #3: xdrs for decode deleting dynamice node request
 * @param[in]   from         #4: node from where receive the request
 * @param[in]   auth         #5: who issue the command
 *
 * @note        Only cluster admin or root could delete dynamic node.
 */
void
handle_delete_dynamic(int fd, struct header *hdr, XDR *xdrs, struct sockaddr_in *from,
                      struct auth *auth)
{
    char message[MAXMESSAGELENGTH+1];

    struct header outhdr;
    gf_init_header(&outhdr);

    if (!gboolMaster) {
        outhdr.reqcode = ERROR_WRONG_MASTER;
        snprintf(message, MAXMESSAGELENGTH, "%s: I am not the master.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    if (!is_clusteradmin(auth->user_name) && auth->uid != 0 ) {

        log_message(ERR, NOCLASS, "%s: uid <%d> not allowed to perform control operation",
                    __func__, auth->uid);

        outhdr.reqcode = ERROR_PERMISSION;
        snprintf(message, MAXMESSAGELENGTH, "%s: user[%s] has no permission to add host.",
                 __func__, auth->user_name);
        gf_errorback(fd, &outhdr, message);

        return;
    }

    char *hostName=NULL;
    if (!gf_xdr_string(xdrs, &hostName, hdr, NULL)) {
        outhdr.reqcode = ERROR_XDR;
        snprintf(message, MAXMESSAGELENGTH, "%s: decode host name failed.", __func__);
        gf_errorback(fd, &outhdr, message);
        return;
    }

    struct hostNode *node;
    node = (struct hostNode *)gf_hash_find(gptrMyCluster->node_table, hostName);
    if (NULL == node) {
        char addrbuf[24+1];

        log_message(WARNING, NOCLASS, "%s: trying to remove unknown host %s from %s",
                    __func__, hostName, gf_sockaddr_str(from, addrbuf));

        outhdr.reqcode = ERROR_NO_HOST;
        snprintf(message, MAXMESSAGELENGTH, "%s: node <%s> not found in the cluster.",
                 __func__, hostName);
        gf_errorback(fd, &outhdr, message);

        FREEUP(hostName);

        return;
    }

    if (DYNAMIC_SERVER != node->server) {

        /* Node is not a dynamic node...  */
        outhdr.reqcode = ERROR_DYNAMIC_HOST;
        snprintf(message, MAXMESSAGELENGTH, "%s: node <%s> is not a dynamic server.",
                 __func__, hostName);
        gf_errorback(fd, &outhdr, message);

        FREEUP(hostName);

        return;
    }

    log_removenode(node->name);

    gf_hash_remove(gptrMyCluster->node_table, node->name);
    free_hostnode(node);
    FREEUP(hostName);

    gf_sendback_header(fd, &outhdr);

    return;
} // end function handle_delete_dynamic

/**
 * @brief       Add node to the quickpool cluster.
 *
 * @param[in]   addnode      #1: add node request
 * @param[out]  message      #2: error message, set when something is wrong
 *
 * @retval      not NULL     #1: succeed, node that is added
 * @retval      NULL         #2: failed, error message is set
 *
 * @note        Create node structure and add it to the cluster node table.
 *              For dynamic node, its flag should be set after calling this 
 *              function.
 */
struct hostNode *
add_hostnode(struct request *addnode, char *message)
{
    char *strNode=NULL, *strModel=NULL, *strType=NULL, *strResource=NULL;
    int maxJobs=-1;
    unsigned int i;
    for (i=0; i<addnode->number; i++) {

        switch (addnode->keyvalues[i].key) {
        case REQUEST_KEY_HOSTNAME:
            strNode = addnode->keyvalues[i].value;
            break;
        case REQUEST_KEY_HOST_MODEL:
            strModel = addnode->keyvalues[i].value;
            break;
        case REQUEST_KEY_HOST_TYPE:
            strType = addnode->keyvalues[i].value;
            break;
        case REQUEST_KEY_RESOURCE:
            strResource = addnode->keyvalues[i].value;
            break;
        case REQUEST_KEY_MAXJOB:
            maxJobs = gf_atoi(addnode->keyvalues[i].value, INFINIT_INT, -1);
            if (INFINIT_INT == maxJobs) {
                maxJobs = -1;
            }
            break;
        }
    }

    if (NULL == strNode) {
        log_message(ERR, NOCLASS, "%s: input node name is NULL.", __func__);

	snprintf(message, MAXMESSAGELENGTH, "%s: input node name is NULL.", __func__);
        quickpool_errno = ERROR_NO_HOST;
        return NULL;
    }

    struct hostent *hp;
    hp = gf_gethostbyname(strNode);
    if (NULL == hp) {
        log_message(ERR, NOCLASS, "%s: cannot get node <%s> entry in system.",
                    __func__, strNode);

	snprintf(message, MAXMESSAGELENGTH, "%s: cannot get node <%s> entry.", __func__, strNode);
        quickpool_errno = ERROR_NO_HOST;
        return NULL;
    }

    struct hostNode *host;
    host = (struct hostNode *)gf_hash_find(gptrMyCluster->node_table, strNode);
    if (NULL != host) {
        log_message(WARNING, NOCLASS, "%s: host %s redefined, using previous definition",
                    __func__, strNode);

        return host;
    }

    host = create_hostnode();
    if (NULL == host) {
        log_message(ERR, NOCLASS, "%s: create_hostnode() failed.", __func__);

	snprintf(message, MAXMESSAGELENGTH, "%s: not enough memory to create node <%s>.",
                 __func__, strNode);
        quickpool_errno = ERROR_MEMORY;
        return NULL;
    }

    host->name = copy_string(strNode);

    if (NULL != strResource) {

        struct nameList *nodelist;
        nodelist = longstring_2namelist(strResource);
        if (NULL != nodelist) {

            for (i=0; i<nodelist->num_name; i++) {
                char *resName;

                if ('!' == nodelist->names[i][0]) {
                    resName = nodelist->names[i];
                    resName ++;
                } else {
                    resName = nodelist->names[i];
                }

                int resIndex;
                resIndex = get_resource_index_fromhash(resName);
                if (0 > resIndex) {
                    log_message(ERR, NOCLASS, "%s: unknown resource name <%s> is specified when adding node <%s>.",
                                __func__, resName, strNode);

                    continue;
                }

                SET_BIT(resIndex, host->res_bitmap);
            }

            free_namelist(nodelist);
        }
    }

    if (NULL == strModel) {
        host->model_index = 0;
    } else if ((host->model_index = get_hostmodel_index(strModel)) < 0) {
        log_message(ERR, NOCLASS, "%s: unknown model <%s> is specified when adding node <%s>.",
                    __func__, strModel, host->name);
        free_hostnode(host);
        return NULL;
    }

    if (NULL == strType) {
        /* The migrant host gets the first host type
         * configured in the shared file if not specified
         * otherwise at the command line.
         */
        host->type_index = 0;
    } else if ((host->type_index = get_hosttype_index(strType)) < 0) {
        log_message(ERR, NOCLASS, "%s: unknown node type <%s> is specified when adding node <%s>.",
                    __func__, strType, host->name);
        free_hostnode(host);
        return NULL;
    }

    host->index = HASH_NUM_ENTS(gptrMyCluster->node_table);
    host->max_jobs = maxJobs;

    host->noload_count = 0;
    host->server = STATIC_SERVER;

    gf_hash_install(gptrMyCluster->node_table, host->name, (void *)host);

    return host;
} // end function add_hostnode

/**
 * @brief       Write add node request to file.
 *
 * @param[in]   addnode      #1: add node request
 *
 * @retval      0            #1: succeed
 * @retval      -1           #2: failed, open file or write record failure
 *
 * @note        Log request into file for loading when JM restart.
 */
int
log_addnode(struct request *addnode)
{
    char strDynNodeFile[PATH_MAX+1];
    snprintf(strDynNodeFile, PATH_MAX, "%s/dynamic.nodes", baseParams[BASE_WORKDIR].string_value);

    FILE *fileHandle;
    fileHandle = fopen(strDynNodeFile, "a+");
    if (NULL == fileHandle) {
        log_message(ERR, NOCLASS, "%s: fopen(%s) failed due to %m", __func__, strDynNodeFile);
        return -1;
    }

    struct record nodeRecord;
    snprintf(nodeRecord.version, MAXVERSIONLEN, "%d", QP_XDR_VERSION);
    nodeRecord.type = RECORD_TYPE_ADDNODE;
    nodeRecord.log_time = time(NULL);

    nodeRecord.numKVs = addnode->number;
    nodeRecord.KVs = (key_value_t *)calloc(nodeRecord.numKVs, sizeof(key_value_t));
    if (NULL == nodeRecord.KVs) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", nodeRecord.numKVs*sizeof(key_value_t));
        return -1;
    }

    unsigned int i;
    for (i=0; i<addnode->number; i++) {
        switch (addnode->keyvalues[i].key) {
        case REQUEST_KEY_HOSTNAME:
            nodeRecord.KVs[i].key = RECORD_KEY_HOST_NAME;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_HOST_MODEL:
            nodeRecord.KVs[i].key = RECORD_KEY_HOST_MODEL;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_HOST_TYPE:
            nodeRecord.KVs[i].key = RECORD_KEY_HOST_TYPE;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_RUN_WINDOW:
            nodeRecord.KVs[i].key = RECORD_KEY_RUN_WINDOW;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_DISPATCH_WINDOW:
            nodeRecord.KVs[i].key = RECORD_KEY_DISPATCH_WINDOW;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_RESOURCE:
            nodeRecord.KVs[i].key = RECORD_KEY_RESOURCE;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        case REQUEST_KEY_MAXJOB:
            nodeRecord.KVs[i].key = RECORD_KEY_MAX_PROCESSOR;
            nodeRecord.KVs[i].value = copy_string(addnode->keyvalues[i].value);
            break;
        }
    }

    if (write_record(fileHandle, &nodeRecord) == -1) {
        fclose(fileHandle);
        return -1;
    }

    fclose(fileHandle);

    return (0);    
} // end function log_addnode

/* ------------------------------------------------
 *
 *  below are static functions used in this file
 *
 * ------------------------------------------------ */

/**
 * @brief       Write delete node request to file.
 *
 * @param[in]   nodename     #1: name of the node to be deleted
 *
 * @retval      0            #1: succeed
 * @retval      -1           #2: failed, open file or write record failure
 *
 * @note        Log request into file for loading when JM restart.
 */
static int
log_removenode(char *nodename)
{
    char strDynNodeFile[PATH_MAX+1];
    snprintf(strDynNodeFile, PATH_MAX, "%s/dynamic.nodes", baseParams[BASE_WORKDIR].string_value);

    FILE *fileHandle;
    fileHandle = fopen(strDynNodeFile, "a+");
    if (NULL == fileHandle) {
        log_message(ERR, NOCLASS, "%s: cannot fopen() the %s file; %m", __func__, strDynNodeFile);
        return -1;
    }

    struct record nodeRecord;
    snprintf(nodeRecord.version, MAXVERSIONLEN, "%d", QP_XDR_VERSION);
    nodeRecord.type = RECORD_TYPE_DELNODE;
    nodeRecord.log_time = time(NULL);
    
    unsigned int num = 1; /* hostname */

    nodeRecord.KVs = (key_value_t *)calloc(num, sizeof(key_value_t));
    if (NULL == nodeRecord.KVs) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", num*sizeof(key_value_t));
        return -1;
    }

    nodeRecord.numKVs = 0;   

    nodeRecord.KVs[nodeRecord.numKVs].key = RECORD_KEY_HOST_NAME;
    nodeRecord.KVs[nodeRecord.numKVs].value = copy_string(nodename);
    nodeRecord.numKVs ++;
    
    if (write_record(fileHandle, &nodeRecord) == -1) {
        fclose(fileHandle);
        return -1;
    }

    fclose(fileHandle); 

    return 0;
} // end function log_removenode

static void
set_tclnode(struct Tcl_Node *tclnode, struct hostNode *node,
            struct hostNode *from, bool_t checksyntax)
{
    tclnode->host_name = node->name;
    tclnode->max_cpu = node->static_info.maxcpu;
    tclnode->max_mem = node->static_info.maxmem;
    tclnode->max_swp = node->static_info.maxswap;
    tclnode->max_tmp = node->static_info.maxtmp;

    tclnode->status = node->status;
    tclnode->load = node->load;
    tclnode->host_type = (node->type_index>=0) ? gptrSharedConf->qpinfo->host_types[node->type_index] : (char *)"unknown";
    tclnode->host_model = (node->model_index>=0) ? gptrSharedConf->qpinfo->host_models[node->model_index] : (char *)"unknown";
    tclnode->from_type = (from->type_index>=0) ? gptrSharedConf->qpinfo->host_types[from->type_index] : (char *)"unknown";
    tclnode->from_model = (from->model_index>=0) ? gptrSharedConf->qpinfo->host_models[from->model_index] : (char *)"unknown";
    tclnode->ignDedicatedResource = TRUE;
    tclnode->res_bitmap = node->res_bitmap;

    tclnode->respair_table = gf_hash_make(HASH_NUM_ENTS(node->inst_table));
    HASH_WALK_T walk;
    char *key;
    struct resourceInstance *resInst;
    gf_hash_walk_start(node->inst_table, &walk);
    while (NULL != (resInst=(struct resourceInstance *)gf_hash_walk(&walk, &key))) {
        char *value = copy_string(resInst->value);
        gf_hash_install(tclnode->respair_table, resInst->resource->name, (void *)value);
    }

    if (checksyntax) {
        tclnode->check_flag = TCL_CHECK_SYNTAX;
    } else {
        tclnode->check_flag = TCL_CHECK_EXPRESSION;
    }

    return;
} // end function set_tclnode

static unsigned int 
sizeof_numa(numa_obj_t *obj)
{
    unsigned int size;

    size = sizeof(numa_obj_t) + sizeof(int);

    numa_obj_t *child = obj->child;
    for(; child != NULL; child = child->back) {
        size += sizeof_numa(child) + sizeof(int);
    }

    return size;
}

static unsigned int 
sizeof_host_reply(struct QuickPoolHostReply *hostReply)
{
    unsigned int i, size=0;

    struct QuickPoolHostInfo *host = NULL;
    size += sizeof(struct QuickPoolHostReply);

    for (i = 0; i < hostReply->num_hosts; i++) {
        host = &(hostReply->hosts[i]);
        size += sizeof(struct QuickPoolHostInfo);
        if (host->host != NULL) {
            size += strlen(host->host) + sizeof(int) + XDR_OFFSET;
        }
        if (host->host_type != NULL) {
            size += strlen(host->host_type) + sizeof(int) + XDR_OFFSET;
        }
        if (host->host_model != NULL) {
            size += strlen(host->host_model) + sizeof(int) + XDR_OFFSET;
        }
        if (host->load_threshold != NULL) {
            size += gptrSharedConf->qpinfo->num_index * sizeof(double);
        }
        if (host->load != NULL) {
            size += gptrSharedConf->qpinfo->num_index * sizeof(double);
        }       
        if (host->load_schedule != NULL) {
            size += gptrSharedConf->qpinfo->num_index * sizeof(float);
        }
        if (host->load_stop != NULL) {
            size += gptrSharedConf->qpinfo->num_index * sizeof(float);
        } 
        if (host->busy_schedule != NULL) {
            size += GET_INTNUM(gptrSharedConf->qpinfo->num_index) * sizeof(int);
        }
        if (host->busy_stop != NULL) {
            size += GET_INTNUM(gptrSharedConf->qpinfo->num_index) * sizeof(int);
        }  
        if (host->res_bitmap != NULL) {
            size += GET_INTNUM(gptrSharedConf->qpinfo->num_index) * sizeof(int);
        }  
        if (host->real_load != NULL) {
            size += gptrSharedConf->qpinfo->num_index * sizeof(double);
        }          
        if (host->dispatch_window != NULL) {
            size += strlen(host->dispatch_window) + sizeof(int) + XDR_OFFSET;
        }        
        if (host->numa != NULL) {
            size += sizeof_numa(host->numa) + sizeof(int);
        }

        unsigned int j;
        for (j=0; j<host->num_GPU; j++) {
            size += sizeof(struct hostGPUList) + sizeof(struct hostGPULoad) + 2 * sizeof(int);
            size += strlen(host->gpuinfo_static[j].gpu_model) + sizeof(int) + XDR_OFFSET;
            size += sizeof(int);
            if (NULL == host->gpuinfo_dynamic) {
                continue;
            }
            size += strlen(host->gpuinfo_dynamic[j].status) + sizeof(int) + XDR_OFFSET;
            size += strlen(host->gpuinfo_dynamic[j].error) + sizeof(int) + XDR_OFFSET;
        }
    }

    return size;
} // end function sizeof_host_reply

static int
is_from_known(struct sockaddr_in *from)
{
    if (from->sin_family != AF_INET) {
        char addrbuf[24+1];

        log_message(ERR, NOCLASS, "%s: %s sin_family != AF_INET",
                    __func__, gf_sockaddr_str(from, addrbuf));
        return FALSE;
    }

    return TRUE;
}

static int
find_preferred_hosts(int ncandidates, struct hostNode **hosts, int numprefs, char **preferredhosts)
{
    int i, j, nec = 0;

    if (numprefs <= 0) {
        return 0;
    }

    for (j = 0; j < ncandidates;j++) {

        for (i = 0; i < numprefs; i++) {

            hosts[j]->conStatus = FALSE;
            if (issame_host(preferredhosts[i], hosts[j]->name)) {
                hosts[j]->conStatus = TRUE;
                nec++;
                break;
            }
        }
    }

    return nec;
}

static int
order_bystatus(int j, struct hostNode **hosts)
{
    struct hostNode *tmp;
    int *status1, *status2;

    status1 = hosts[j-1]->status;
    status2 = hosts[j]->status;

    if ((STATUS_ISOK(status2) && !STATUS_ISOK(status1))
        || (!STATUS_ISUNAVAIL(status2) && STATUS_ISUNAVAIL(status1))) {

        tmp = hosts[j];
        hosts[j] = hosts[j-1];
        hosts[j-1] = tmp;
        return 0;
    }

    if (STATUS_ISOK(status1) && !STATUS_ISOK(status2)) {
        return 1;
    }

    if (!STATUS_ISUNAVAIL(status1) && STATUS_ISUNAVAIL(status2)) {
        return 1;
    }

    if (STATUS_ISUNAVAIL(status1) && STATUS_ISUNAVAIL(status2)) {
        return 1;
    }

    return 2;
} // end function order_bystatus

static void
mkexld(struct hostNode *hn1, struct hostNode *hn2, int lidx, double *exld1, double *exld2, float coef)
{
    if(hn1->conStatus == FALSE) {

        *exld1 = hn1->load[lidx] * coef;
        if(!gptrMyLoad[lidx].increasing) {
            *exld1 = - *exld1;
        }
    } else {
        *exld1 = 0;
    }

    if(hn2->conStatus == FALSE) {

        *exld2 = hn2->load[lidx] * coef;
        if(!gptrMyLoad[lidx].increasing) {
            *exld2 = - *exld2;
        }
    } else {
        *exld2 = 0;
    }

    return;
} // end function mkexld

#define NOTORDERED(inc,a,b)   ((inc) ? ((a) > (b)) : ((a) < (b)))

static int
sort_hosts(struct requirement *req, int numprefs, char **preferredhosts,
           int ncandidates, struct hostNode **hosts, int options)
{
    int i, j;
    int cc;
    int nec;
    float exld;
    float f;

    nec = find_preferred_hosts(ncandidates, hosts, numprefs, preferredhosts);

    for (i = req->num_order - 1; i >= 0; i--) {
        bool_t daoxu;

        int idx = req->order[i];
        if (idx < 0) {
            daoxu = TRUE;
        } else {
            daoxu = FALSE;
        }

        idx = abs(idx) - 1;

        int shrink;
        if (idx == R1M || idx == R5M || idx == R15M) {
            shrink = 5;
        } else {
            shrink = 8;
        }

        int cutoffs;
        if (i > 0) {
            int residual;

            residual = ncandidates - numprefs;
            if (residual < 1) {
                cutoffs = 0;
            } else {
                cutoffs = (residual - 1)/shrink + 1;
            }
        } else {
            if (numprefs > 0 && ncandidates >= numprefs) {
                cutoffs = numprefs;
            } else {
                cutoffs = ncandidates;
            }
        }

        bool_t incr;
        incr = gptrMyLoad[idx].increasing;
        if (daoxu) {
            incr = !incr;
        }

        float coef;
        if (numprefs > 0) {
            coef = 0.05 * nec/numprefs;
        } else {
            coef = 0.05;
        }

        bool_t swap;
        double exld1, exld2;
        if (i > 0) {
            double bestload = hosts[0]->load[idx];

            for (j = 1; j < ncandidates; j++) {
                if (NOTORDERED(incr, bestload, hosts[j]->load[idx]))
                    bestload = hosts[j]->load[idx];
            }

            swap = TRUE;
            j = 0;
            while (swap && (j < ncandidates-cutoffs)) {
                swap = FALSE;

                int k, order;
                for (k = ncandidates - 2; k >= j; k--) {
                    order = order_bystatus(k+1, hosts);
                    if (order == 0) {
                        swap = TRUE;
                        continue;
                    }

                    if (order == 1) {
                        continue;
                    }

                    mkexld(hosts[k], hosts[k+1], idx, &exld1, &exld2, coef);

                    if (NOTORDERED(incr, hosts[k]->load[idx] + exld1,
                                   hosts[k+1]->load[idx] + exld2)) {
                        struct hostNode *tmp;
                        swap = TRUE;
                        tmp = hosts[k];
                        hosts[k] = hosts[k+1];
                        hosts[k+1] = tmp;
                    }
                }

                j++;
            }

            for (j = ncandidates-cutoffs; j < ncandidates; j++) {
                if (fabs(hosts[j]->load[idx] - bestload) >= gptrMyLoad[idx].sigdiff) {
                    if (j == numprefs) {
                        if (i > 1) {
                            i = 1;
                        }
                    }
                    ncandidates = j;
                    break;
                }
            }
            if (j < ncandidates) {
                continue;
            }

            if (ncandidates == numprefs) {
                if (i > 1) {
                    i = 1;
                }
            }

            continue;
        }

        swap = TRUE;
        j = 0;
        while (swap && (j < cutoffs)) {
            swap = FALSE;

            int k, order;
            for (k=ncandidates-2; k>=j; k--) {
                order = order_bystatus(k+1, hosts);
                if (order == 0) {
                    swap = TRUE;
                    continue;
                }

                if (order == 1) {
                    continue;
	        }

                mkexld(hosts[k], hosts[k+1], idx, &exld1, &exld2, coef);

                if (NOTORDERED(incr, hosts[k]->load[idx] + exld1,
                               hosts[k+1]->load[idx] + exld2)) {
                    struct hostNode *tmp;
                    swap = TRUE;
                    tmp = hosts[k];
                    hosts[k] = hosts[k+1];
                    hosts[k+1] = tmp;
                }
            }
            j++;
        }

        if (ncandidates == numprefs) {
            if (i > 1) {
                i = 1;
            }
        }

        ncandidates = cutoffs;
    }

    if (NULL == sptrLoadReqNode) {
        return(ncandidates);
    }

    for (i = 0; i < ncandidates; i++) {
        if (hosts[i] == sptrLoadReqNode) {
            return(ncandidates);
        }
    }

    for (i = req->num_order - 1; i >= 0; i--) {
        double a, b;
        int lidx;

        lidx = req->order[i];
        if (hosts[ncandidates-1]->conStatus == TRUE) {
            cc = 1;
            exld = 0;
        } else {

            cc = 0;
            exld = hosts[ncandidates-1]->load[lidx] * nec /(ncandidates*25.0);
            if(!gptrMyLoad[lidx].increasing) {
                exld = -exld;
            }
        }
        a = sptrLoadReqNode->load[lidx];
        b = hosts[ncandidates-1]->load[lidx]+exld;

        if (lidx == R1M || lidx == R5M || lidx == R15M) {
            float cpuf;

            cpuf = (hosts[ncandidates-1]->model_index >= 0) ?
                gptrSharedConf->qpinfo->cpu_factor[hosts[ncandidates-1]->model_index]:1.0;

            if (0 <= sptrLoadReqNode->model_index) {
                f = gptrSharedConf->qpinfo->cpu_factor[sptrLoadReqNode->model_index]/cpuf;
            } else {
                f = 1.0 / cpuf;
            }

            f = f * gptrMyLoad[lidx].delta[cc] / cpuf;
        } else {
            f = gptrMyLoad[lidx].delta[cc];
        }

        if (gptrMyLoad[lidx].increasing ? (a - b > f) : (b - a > f)) {
            break;
        }
    }

    return ncandidates;
} // end function sort_hosts

static int
fill_resource_info(struct QuickPoolResourceReply *reply, struct sharedResource *sharedres)
{
    unsigned int num, numInstances;

    num = reply->num_resources;
    reply->resources[num].resource_name = copy_string(sharedres->resource->name);
    reply->resources[num].flags = sharedres->flags;

    if ((reply->resources[num].instances = (struct QuickPoolResourceMap *)
         calloc(LIST_NUM_ENTS(sharedres->inst_list), sizeof(struct QuickPoolResourceMap))) == NULL) {

        log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
        snprintf(reply->error_message, MAXMESSAGELENGTH,
                 "jm does not have enough memory to handle the request.");
        return (ERROR_MEMORY);
    }

    numInstances = 0;

    struct resourceInstance *inst;
    for (inst = (struct resourceInstance *)sharedres->inst_list->forw;
         inst != (struct resourceInstance *)sharedres->inst_list;
         inst = (struct resourceInstance *)inst->forw) {

        struct QuickPoolResourceMap *resMap;
        resMap = &(reply->resources[num].instances[numInstances]);
        resMap->total_value = copy_string(inst->value);
        resMap->reserve_value = copy_string("-");

        if (HASH_NUM_ENTS(inst->node_table) > 0) {

            resMap->hosts = (char **)calloc(HASH_NUM_ENTS(inst->node_table), sizeof(char *));
            if (NULL == resMap->hosts) {

                log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
                snprintf(reply->error_message, MAXMESSAGELENGTH,
                         "jm does not have enough memory to handle the request.");
                return (ERROR_MEMORY);
            }
        }

        HASH_WALK_T walkNode;
        char *key;
        struct hostNode *instHost;
        unsigned int j=0;
        gf_hash_walk_start(inst->node_table, &walkNode);
        while (NULL != (instHost=(struct hostNode *)gf_hash_walk(&walkNode, &key))) {
            resMap->hosts[j++] = copy_string(instHost->name);
        }
        gf_hash_walk_end(&walkNode);

        resMap->num_hosts = HASH_NUM_ENTS(inst->node_table);

        numInstances++;
    }

    reply->resources[num].num_instances = numInstances;

    return 0;
} // end function fill_resource_info

static int
filter_resources(struct request *request, struct QuickPoolResourceReply *reply)
{
    int baseReplyCode;

    if (0 == HASH_NUM_ENTS(ghashResource)) {
        snprintf(reply->error_message, MAXMESSAGELENGTH,
                 "No share resource in the quickpool cluster.");
        return ERROR_NO_RESOURCE;
    }

    reply->num_resources = 0;

    if ((reply->resources = (struct QuickPoolResourceInfo *)
         calloc(HASH_NUM_ENTS(ghashResource), sizeof(struct QuickPoolResourceInfo))) == NULL) {
        log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
        snprintf(reply->error_message, MAXMESSAGELENGTH,
                 "jm does not have enough memory to handle the request.");
        return (ERROR_MEMORY);
    }

    unsigned int i;
    unsigned int num=0, numres=0;
    char *names = NULL, **resnames=NULL;
    for (i=0; i<request->number; i++) {
        switch (request->keyvalues[i].key) {
        case REQUEST_KEY_NUMBER:
            num = atoi(request->keyvalues[i].value);
            if (num > 0) {
                resnames = (char **)calloc(num, sizeof(char *));
                if (NULL == resnames) {
                    log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
                    snprintf(reply->error_message, MAXMESSAGELENGTH,
                             "jm does not have enough memory to handle the request.");
                    return (ERROR_MEMORY);
                }
            }
            break;
        case REQUEST_KEY_NAMES:
            names = request->keyvalues[i].value;

            unsigned int len = strlen(names);
            char *resource = (char *)calloc(len+1, sizeof(char));
            if (NULL == resource) {
                log_message(ERR, NOCLASS, STR_FUNC_FAIL_M, __func__, "calloc");
                snprintf(reply->error_message, MAXMESSAGELENGTH,
                         "jm does not have enough memory to handle the request.");
                return (ERROR_MEMORY);
            }

            char *token;
            while (NULL != (token = get_string_token(&names, " ", resource, len+1))) {
                resnames[numres++] = copy_string(token);
                len = strlen(names);

                if (numres >= num) {
                    break;
                }
            }
            FREEUP(resource);
        }
    }

    struct sharedResource *res;
    if (0 == num) {
        HASH_WALK_T walk;
        char *key;
        gf_hash_walk_start(ghashResource, &walk);
        while (NULL != (res=(struct sharedResource *)gf_hash_walk(&walk, &key))) {

            if ((baseReplyCode = fill_resource_info(reply, res)) != 0) {

                free_string_array(resnames, numres);
                gf_hash_walk_end(&walk);

                return baseReplyCode;
            }

            reply->num_resources++;
        }
        gf_hash_walk_end(&walk);
    } else {
        for (i = 0; i < numres; i++) {

            res = (struct sharedResource *)gf_hash_find(ghashResource, resnames[i]);
            if (NULL != res) {

                if ((baseReplyCode = fill_resource_info(reply, res)) != 0) {
                    free_string_array(resnames, numres);
                    return baseReplyCode;
                }

                reply->num_resources++;
            } else {
                snprintf(reply->error_message, MAXMESSAGELENGTH,
                         "Resource <%s> is not used by cluster <%s>.",
                         resnames[i], gptrMyCluster->name);
                free_string_array(resnames, numres);

                return ERROR_NO_RESOURCE;
            }
        }
    }

    free_string_array(resnames, numres);

    return 0;
} // end function filter_resources

/**
 * @brief       get child jm service tmp directory .****.tmpdir directory file name jobid values
 *
 * @param[in]   childJobID           #1: job id value
 * @param[in]   jobIDNum             #2: job id num
 */
static int
childjm_get_tmp_tmpdir_jobid(char ***childJobID, unsigned int *jobIDNum)
{
    char dirName[PATH_MAX+1] = {0};
    char **strID = NULL;
    char *pos = NULL;
    int len = 0;
    DIR *dp = NULL;

    struct dirent *dirInfo = NULL;

    snprintf(dirName, PATH_MAX, ".%s.tmpdir.", gptrMyCluster->name);
    dp = opendir(gstrTmpDir);
    if (NULL != dp) {
        while ((dirInfo = readdir(dp))) {
            if (memcmp(dirName, dirInfo->d_name, strlen(dirName)) != 0) {
                continue;
            }

            (*jobIDNum)++;
            strID = (char **)realloc(strID, sizeof(char *) * (*jobIDNum));
             if (strID == NULL) {
                log_message(ERR, NOCLASS, "%s: realloc failed, errmsg:[%s]",
                        __func__, strerror(errno));
                return -1;
            }

            strID[*jobIDNum-1] = (char *) malloc(strlen(dirInfo->d_name) + 1);
            if (strID[*jobIDNum-1] == NULL) {
                log_message(ERR, NOCLASS, "%s: malloc failed, errmsg:[%s]",
                        __func__, strerror(errno));
                return -1;
            }

            pos = strstr(dirInfo->d_name, ".tmpdir.");
            if (NULL != pos) {
                len = strlen(".tmpdir.");
                sscanf(pos + len, "%s", strID[*jobIDNum-1]);
            }
        }
    }

    *childJobID = strID;
    closedir(dp);

    return 0;
} // end function childjm_get_tmp_tmpdir_jobid
