/*
 * Copyright (c) 2018-2023 北京华恒盛世科技有限公司
 * QUICKPOOL is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *     http://license.coscl.org.cn/MulanPSL2
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
*/

#include "tclinfo.h"
#include "jm.h"

static HASH_TABLE_T *shashNodeLoad;
static time_t stimeCheckJobs;

static void chkpnt_migrate_job(time_t);
static void suspend_job(struct jobEntry *, int, int, int);
static int resume_job(struct jobEntry *, int, int);
static void finish_signal_job(struct jobEntry *);

static int set_tclnode_load(struct QuickPoolHostInfo *, struct Tcl_Node *);
static void check_job_limits(struct jobEntry *);

static bool_t window_ok(struct jobEntry *);

/**
 * @brief       Go through JM job list and do check one by one.
 *
 * @param[in]   session      #1: session time
 *
 * @note        Do check for following cases:
 *              1. if job is finished or not.
 *              2. if job usage exceeds its setting limit.
 *              3. if job is running more than its runtime limit.
 *              4. if it is time to terminate a job.
 *              5. if run windows is closed or opend for suspend or resume a job.
 *              6. if to migrate a job, if check point a job.
 *              7. if resume a job when node load is ok.
 *              8. if suspend a job when node load is higher than load_stop.
 */
void
check_jmjobs(time_t session)
{
    if (0 == stimeCheckJobs) {
	stimeCheckJobs = session;
    }

    if (0 >= LIST_NUM_ENTS(glistJmJobs)) {
        stimeCheckJobs = session;
        return;
    }

    check_finish_jobs();

    struct jobEntry *job, *next;
    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = next) {

        next = (struct jobEntry *)job->forw;

        struct jobInfo *jobBase = &job->job_base;
        if (IS_JOB_ENDED(jobBase->status) || (jobBase->status & JOB_STATUS_PEND)) {
            continue;
        }

        check_job_limits(job);

        if ((jobBase->status & JOB_STATUS_RUN) && !(jobBase->status & JOB_STATUS_PREJOB)) {

            job->running += (session - stimeCheckJobs);
	}

        if (job->running > jobBase->resource_limits[RESOURCE_LIMIT_RUN].rlim_curl) {

            if ((jobBase->terminate_command == NULL) || (jobBase->terminate_command[0] == '\0')) {

                char strJobId[32];
                if (job->running > jobBase->resource_limits[RESOURCE_LIMIT_RUN].rlim_curl+WARN_TIME
                    && job->runtime_passed) {

                    if ((IS_JOB_SUSPEND(jobBase->status))
                        && (jobBase->reasons & SUSPEND_REASON_RESLIMIT)
                        && (SUBREASON_RUNTIME == jobBase->subreasons)) {
                        continue;
                    } else if (jobBase->status & JOB_STATUS_KILL) {
                        continue;
                    } else {

                        log_message(INFO, NOCLASS, "%s: warning period expired, killing the job <%d>.",
                                    __func__, jobid_2string(jobBase->jobid, strJobId));

                        signal_job_begin(job, QUICKPOOL_SIGKILL_RUNLIMIT, 0, TRUE);
                        jmlog_newstatus(job);
                        jobBase->status |= JOB_STATUS_KILL;
                    }
                } else if (!job->runtime_passed) {

                    log_message(INFO, NOCLASS, "%s: sending warning signal to job <%s>.",
                                __func__, jobid_2string(jobBase->jobid, strJobId));
                    send_signal_2job(job, SIGUSR2, FALSE);
                    job->runtime_passed = TRUE;
                }

                continue;
            }

            if ((IS_JOB_SUSPEND(jobBase->status))
                && (jobBase->reasons & SUSPEND_REASON_RESLIMIT)
                && (SUBREASON_RUNTIME == jobBase->subreasons)) {
                continue;
            } else {
                signal_job_begin(job, QUICKPOOL_SIGKILL_RUNLIMIT, 0, TRUE);
                jmlog_newstatus(job);
            }

	    continue;
	}

        if (jobBase->submit->terminate_time && session > jobBase->submit->terminate_time
            && !(jobBase->job_attribute & JOB_FORCE_KILL)) {

            if ((jobBase->terminate_command == NULL) || (jobBase->terminate_command[0] == '\0')) {
                if (session > jobBase->submit->terminate_time + WARN_TIME && job->runtime_passed) {

                    if ((IS_JOB_SUSPEND(jobBase->status))
                        && (jobBase->reasons & SUSPEND_REASON_RESLIMIT)
                        && (SUBREASON_DEADLINE == jobBase->subreasons)) {
                        continue;
		    } else if (jobBase->status & JOB_STATUS_KILL) {
			continue;
                    } else {

                        signal_job_begin(job, QUICKPOOL_SIGKILL_DEADLINE, 0, TRUE);
                        jmlog_newstatus(job);
			jobBase->status |= JOB_STATUS_KILL;
                    }
                } else if (!job->runtime_passed) {
                    send_signal_2job(job, SIGUSR2, FALSE);
                    job->runtime_passed = TRUE;
                }
            } else {
                if (session > jobBase->submit->terminate_time) {

                    if ((IS_JOB_SUSPEND(jobBase->status))
                        && (jobBase->reasons & SUSPEND_REASON_RESLIMIT)
                        && (SUBREASON_DEADLINE == jobBase->subreasons)) {
                        continue;
                    } else {
                        signal_job_begin(job, QUICKPOOL_SIGKILL_DEADLINE, 0, TRUE);
                        jmlog_newstatus(job);
                    }
                }
            }

            continue;
        }

        if (!window_ok(job) && !(jobBase->job_attribute & JOB_URGENT_NOSTOP)) {

            suspend_job(job, QUICKPOOL_SIGSTOP_WINDOW, SUSPEND_REASON_QUEUE_WINDOW, 0);
            continue;
        } else {

            resume_job(job, QUICKPOOL_SIGCONT_WINDOW, SUSPEND_REASON_QUEUE_WINDOW);
            continue;
	}
    } /* for (job = jobEntry; ..; ..) */

    chkpnt_migrate_job(session);

    if (session - stimeCheckJobs < gintJMSleepTime) {
        stimeCheckJobs = session;
        return;
    }

    if (NULL != shashNodeLoad) {

        gf_hash_free(shashNodeLoad, free);
        shashNodeLoad = gf_hash_make(23);
    } else {
        shashNodeLoad = gf_hash_make(23);
    }

    struct QuickPoolHostInfo *load;
    load = (struct QuickPoolHostInfo *)calloc(1, sizeof(struct QuickPoolHostInfo));
    if (NULL == load) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", sizeof(struct QuickPoolHostInfo));
        relaunch();
    }

    load->jm_status = gptrMyHost->status;
    load->load = gptrMyHost->load;
    load->host = gptrMyHost->name;

    load->host_type = gptrSharedConf->qpinfo->host_types[gptrMyHost->type_index];
    load->host_model = gptrSharedConf->qpinfo->host_models[gptrMyHost->model_index];
    load->ncpus = gptrMyHost->static_info.maxcpu;
    load->maxmem = gptrMyHost->static_info.maxmem;
    load->maxswap = gptrMyHost->static_info.maxswap;
    load->maxtmp = gptrMyHost->static_info.maxtmp;
    load->res_bitmap = gptrMyHost->res_bitmap;
    gf_hash_install(shashNodeLoad, load->host, (void *)load);

    for (job = (struct jobEntry *)glistJmJobs->back; job != (struct jobEntry *)glistJmJobs;
         job = next) {

        next = (struct jobEntry *)job->back;

        if (!(job->job_base.status & JOB_STATUS_SSUSP) || job->job_base.act_pid) {
            continue;
        }

        if ((job->job_base.reasons & SUSPEND_REASON_QUEUE_WINDOW)
            || (job->job_base.reasons & SUSPEND_REASON_USER_STOP)
            || (job->job_base.reasons & SUSPEND_REASON_JSLOCK)) {

            continue;
        }

        unsigned int numHosts = job->job_base.number_node;

        struct Tcl_Node *tclNodes = NULL;
        if (NULL != job->resume_cond) {

            tclNodes = (struct Tcl_Node *)calloc(numHosts, sizeof(struct Tcl_Node));
            if (NULL == tclNodes) {
                log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                            "calloc", numHosts*sizeof(struct Tcl_Node));
                relaunch();
            }
        }

        struct QuickPoolHostInfo **loadV;
        loadV = (struct QuickPoolHostInfo **)calloc(numHosts, sizeof(struct QuickPoolHostInfo *));
        if (NULL == loadV) {
            log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                        "calloc", numHosts*sizeof(struct QuickPoolHostInfo *));
            relaunch();
        }

        unsigned int i;
        struct QuickPoolHostInfo *load = NULL;
        for (i = 0; i < numHosts; i++) {

            struct nodeExecution *execInfo = &(job->job_base.node_execinfo[i]);
            load = (struct QuickPoolHostInfo *)gf_hash_find(shashNodeLoad, execInfo->host_name);
            if (NULL == load) {
                continue;
            }

            if (NULL != tclNodes) {
                if (set_tclnode_load(load, &tclNodes[i]) < 0) {
                    break;
                }
            }

            loadV[i] = load;
        }

        char strJobId[32];
        int lastReasons = job->job_base.reasons;
        int lastSubreasons = job->job_base.subreasons;
        bool_t resume = TRUE;
        if (0 < numHosts) {

            resume = check_resume_byload(job->job_base.jobid, numHosts,
                                         job->job_base.thresholds, loadV,
                                         &job->job_base.reasons, &job->job_base.subreasons,
                                         job->resume_cond, tclNodes, gptrSharedConf->qpinfo);

            if (NULL != tclNodes) {
                for (i = 0; i < numHosts; i++) {
                     FREEUP(tclNodes[i].load);
                }

                FREEUP(tclNodes);
            }
        } else {
            log_message(ERR, NOCLASS, "%s: no valid load information is found for job <%s>",
                        __func__, jobid_2string(job->job_base.jobid, strJobId));
        }

        FREEUP(loadV);

        if (!resume) {
            log_message(DEBUG, DEBUGSIGNAL, "%s: Can't resume job <%s> due to %d and %d.",
                        __func__, jobid_2string(job->job_base.jobid, strJobId),
                        job->job_base.reasons, job->job_base.subreasons);

            if ((job->job_base.reasons != lastReasons
                 || (job->job_base.reasons == lastReasons
                     && job->job_base.subreasons != lastSubreasons))
                && (session - job->report_time > gintUpdateUsage * gintJMSleepTime)) {

                job->comm_failcnt ++;
            }

            continue;
        }

        if (resume_job(job, QUICKPOOL_SIGCONT_LOAD, LOAD_REASONS) < 0) {
            continue;
        }

        break;
    }

    int reasons=0, subreasons=0;
    for (job=(struct jobEntry *)glistJmJobs->forw; job!=(struct jobEntry *)glistJmJobs;
         job=next) {

        next = (struct jobEntry *)job->forw;

        if ((job->job_base.status & JOB_STATUS_RUN)
            && session >= job->job_base.start_time + gintJMSleepTime) {

            if (job->post_started) {
                continue;
            }

            if (job->job_base.job_attribute & JOB_URGENT_NOSTOP) {
                continue;
            }

            if (!JOB_STARTED(job)) {
                continue;
            }

            unsigned int numHosts = job->job_base.number_node;

            struct Tcl_Node tclNode;
            memset(&tclNode, 0, sizeof(struct Tcl_Node));

            unsigned int i, j, numLoad=0;
            struct QuickPoolHostInfo *load=NULL;
            for (i = 0; i < numHosts; i++) {

                struct nodeExecution *execInfo = &(job->job_base.node_execinfo[i]);
                load = (struct QuickPoolHostInfo *)gf_hash_find(shashNodeLoad, execInfo->host_name);
                if (NULL == load) {
                    continue;
                }

                for (j=R1M; j<MIN(gptrSharedConf->qpinfo->num_index, job->job_base.thresholds.num_index);
                     j++) {

                    if (load->load[j] >= INFINIT_LOAD || load->load[j] <= -INFINIT_LOAD
                        || job->job_base.thresholds.load_stop[numLoad][j] >= INFINIT_LOAD
                        || job->job_base.thresholds.load_stop[numLoad][j] <= -INFINIT_LOAD) {

                        continue;
                    }

                    if (gptrSharedConf->qpinfo->resource_table[j].order_type == INCR) {

	                if (load->load[j] >= job->job_base.thresholds.load_stop[numLoad][j]) {
                            reasons |= SUSPEND_REASON_LOADSTOP;
                            subreasons = j;
                            break;
                        }
                    } else {

                        if (load->load[j] <= job->job_base.thresholds.load_stop[numLoad][j]) {
                            reasons |= SUSPEND_REASON_LOADSTOP;
                            subreasons = j;
                            break;
                        }
                    }
                }
                numLoad ++;

                if (0 == reasons && NULL != job->stop_cond) {
                    int returnCode;

                    returnCode = set_tclnode_load(load, &tclNode);
                    if (0 < returnCode
                        && evaluate(job->stop_cond->select, &tclNode, DFT_FROMTYPE) == 1) {

                        reasons |= SUSPEND_REASON_STOPCOND;
                        FREEUP(tclNode.load);

                        break;
                    }

                    FREEUP(tclNode.load);
                }
            }

            if (0 == reasons) {
                continue;
            }

            if (NULL == load) {
                continue;
            }

            suspend_job(job, QUICKPOOL_SIGSTOP_LOAD, reasons, subreasons);
            break;
        }
    }

    stimeCheckJobs = session;

    return;
} // end function check_jmjobs

/**
 * @brief       Go through JM job list to check if job is finished or not.
 *
 * @note        If job is not finished, kill 0 on job pgid or job pid.
 *              If job control script is started, kill 0 on act_pid.
 */
void
check_finish_jobs(void)
{
    struct jobEntry *job, *next;

    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = next) {

        next = (struct jobEntry *)job->forw;

        struct jobInfo *jobBase = &job->job_base;

        int ret;
        if (!(IS_JOB_ENDED(jobBase->status)) && !(IS_JOB_POST_DONE(jobBase->status)
                                                  || IS_JOB_POST_ERR(jobBase->status))) {

            ret = send_signal_2job(job, 0, FALSE);
            if (0 > ret
                || ((jobBase->job_attribute & JOB_FORCE_KILL)
                    && jobBase->submit->terminate_time < time(0)-MAX(6,gintJobTerminateInterval*3))) {

                jobpid_lost(job);
	    }

            if (1 < job->job_base.number_node) {
                read_job_acct(job);
            }
	}

	if (0 < jobBase->act_pid) {

            ret = killpg(jobBase->act_pid, SIGCONT);
            if (0 == ret) {
		continue;
            }

            ret = kill(jobBase->act_pid, SIGCONT);
	    if (0 == ret) {
		continue;
            }

            finish_signal_job(job);
	    continue;
        }

        if (IS_JOB_ENDED(jobBase->status) || IS_JOB_POST_DONE(jobBase->status)
            || IS_JOB_POST_ERR(jobBase->status) || (jobBase->status & JOB_STATUS_PEND)) {

            job_finish(job);
        }
    }

    return;
} // end function check_finish_jobs

/* ------------------------------------------------
 *
 *  below are static functions used in this file
 *
 * ------------------------------------------------ */

/**
 * @brief       Go through JM job list and do check point and migration.
 *
 * @param[in]   session      #1: session time
 *
 * @note        migrate one job one time.
 *              do checkpoint every chpnt_duration seconds.
 */
static void
chkpnt_migrate_job(time_t session)
{
    char migrating = FALSE;

    struct jobEntry*job, *next;
    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = (struct jobEntry *)job->forw) {

        if (job->job_base.status & JOB_STATUS_MIG) {
            migrating = TRUE;
            break;
        }
    }

    for (job = (struct jobEntry *)glistJmJobs->forw; job != (struct jobEntry *)glistJmJobs;
         job = next) {

        next = (struct jobEntry *)job->forw;

        if (job->lost) {
            continue;
        }

        if ((job->job_base.status & JOB_STATUS_SSUSP) && !migrating
            && !(job->job_base.status & JOB_STATUS_MIG) && 0 == job->job_base.act_pid
            && (job->job_base.submit->options & SUBOPT_RERUNNABLE
                && NULL != job->job_base.submit->chkpnt_dir
                && '\0' != job->job_base.submit->chkpnt_dir[0])
            && (session - job->job_base.suspend_time > job->job_base.mig)
            && (session - job->chkpnt_time > job->mig_times * gintJMSleepTime)
            && !(job->job_base.reasons & SUSPEND_REASON_QUEUE_WINDOW)) {

            if (signal_job_begin(job, QUICKPOOL_SIGCHKPNT, QP_CHKPNT_KILL, TRUE) == 0) {
                job->job_base.status |= JOB_STATUS_MIG;
                migrating = TRUE;
                jmlog_newstatus(job);
                continue;
            }
        }

        if (!(job->job_base.status & JOB_STATUS_MIG) && (job->job_base.status & JOB_STATUS_RUN)
            && 0 == job->job_base.act_pid && 0 < job->job_base.submit->chkpnt_duration
            && session - job->chkpnt_time > job->job_base.submit->chkpnt_duration) {

            if (signal_job_begin(job, QUICKPOOL_SIGCHKPNT, 0, TRUE) == 0) {
                jmlog_newstatus(job);
                continue;
            }
        }
    }

    return;
} // end function chkpnt_migrate_job

/**
 * @brief       Execute job post script for migration job.
 *
 * @param[in]   job          #1: job
 *
 * @retval      0            #1: no need to run job post script
 * @retval      >0           #2: succeed, child pid
 * @retval      <0           #3: failed, fork failure
 *
 * @note        Fork a child to execute job post script.
 */
static int
execute_postcmd_4migratejob(struct jobEntry *job)
{
    if (!job->job_base.postqueue_command || job->job_base.postqueue_command[0] == '\0') {
        return 0;
    }

    pid_t pid;
    char strJobId[32];
    if ((pid = fork()) < 0) {
        log_message(ERR, NOCLASS, STR_JOB_FAIL_S_M, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "fork");
        return (pid);
    }

    if (pid > 0) {
        return (pid);
    }

    gf_setenv("QP_EXECUTE", (char *)"END");

    if (prepare_4postcmd(job) == -1) {
        log_message(ERR, NOCLASS, STR_JOB_FAIL_S, __func__,
                    jobid_2string(job->job_base.jobid, strJobId), "prepare_4postcmd");
        exit(-1);
    }

    execute_queue_postcmd(job);

    exit(0);
} // end function execute_postcmd_4migratejob

/**
 * @brief       A checkpoint job finish checkpointing.
 *
 * @param[in]   job          #1: checkpoint job
 * @param[in]   w_status     #2: checkpoint end status
 * @param[out]  freed        #3: if job is deleted
 *
 * @note        We need tell JS that job checkpoint is done in this function.
 */
static void
finish_chkpnt_job(struct jobEntry *job, int w_status, bool_t *freed)
{
    int savePid, saveStatus;

    if (IS_JOB_SUSPEND(job->job_base.status) && !(job->job_base.status & JOB_STATUS_MIG)) {
        send_signal_2job(job, SIGSTOP, TRUE);
    }

    saveStatus = job->job_base.status;
    if (job->job_base.status & JOB_STATUS_MIG) {

        if (0 == w_status) {

            if (!job->lost) {
                job->lost = TRUE;
                gboolCheckFinishJobs = TRUE;
                return;
            } else if (0 == job->comm_failcnt) {
                return;
            }

            if (0 == remove_jobfile(job)) {
                return;
            }

            char strJobId[32];
            log_message(ERR, NOCLASS, "%s: unable to cleanup migrating job <%s>", 
                        __func__, jobid_2string(job->job_base.jobid, strJobId));

            JM_SET_STATE(job, JOB_STATUS_PEND);
        } else {
            job->job_base.status &= ~JOB_STATUS_MIG;
        }
    }

    savePid = job->job_base.act_pid;

    if (status_job(BATCH_STATUS_JOB, job, job->job_base.status,
                   w_status == 0 ? 0 : ERROR_SYS_CALL) < 0) {

        job->job_base.act_pid = savePid;
        job->job_base.status = saveStatus;
        return;
    }

    job->chkpnt_time = time(NULL);
    job->job_base.act_pid = 0;
    job->actcmd_state = ACT_NO;
    job->job_base.act_signal = QUICKPOOL_SIGNAL_NULL;

    if (0 == w_status) {
        job->mig_times = 1;
    }

    if (!(saveStatus & JOB_STATUS_MIG)) {
        return;
    }

    if (0 == w_status) {

        execute_postcmd_4migratejob(job);
        delete_jmjob(job);
        *freed = TRUE;
    } else {
        job->mig_times *= 2;
    }

    return;
} // end function finish_chkpnt_job

static int
resume_job(struct jobEntry *job, int signal, int suspreason)
{
    if (job->job_base.reasons & SUSPEND_REASON_JSLOCK) {
        return -1;
    }

    if (job->job_base.act_pid) {
        return 0;
    }

    if (!(job->job_base.reasons & suspreason)) {
        return -1;
    }

    char strJobId[32];
    log_message(DEBUG, DEBUGSIGNAL, "%s: resume job %s with the current reason %d and the triggered reason %d;",
                __func__, jobid_2string(job->job_base.jobid, strJobId),
                job->job_base.reasons, suspreason);

    if (signal_job_begin(job, signal, 0, TRUE) < 0) {
        if (send_signal_2job(job, 0, FALSE) < 0) {

            JM_SET_STATE(job, JOB_STATUS_EXIT);
            return -1;
        }
    }

    jmlog_newstatus(job);

    return 0;
} // end function resume_job

static void
suspend_job(struct jobEntry *job, int signal, int reason, int subreason)
{
    char strJobId[32];
    log_message(DEBUG, DEBUGSIGNAL, "%s: job <%s> reasons %d subresons %d signal %d status %x.",
                __func__, jobid_2string(job->job_base.jobid, strJobId),
                reason, subreason, signal, job->job_base.status);

    job->susp_reason = reason;
    job->susp_subreason = subreason;

    if (!JOB_RUNNING(job)) {
        return;
    }

    if (job->post_started) {
        return;
    }

    if (IS_JOB_SUSPEND(job->job_base.status)) {

        if (job->job_base.reasons & reason) {
            return;
        } else if (job->job_base.quickpool_signal[-signal] == 0) {
            return;
        }
    }

    if ((job->job_base.act_pid) && ((job->job_base.act_signal == signal)
        || (job->job_base.act_signal == (signal + job->job_base.quickpool_signal[-signal])))) {
        return;
    }

    signal_job_begin(job, signal + job->job_base.quickpool_signal[-(signal)], 0, TRUE);

    jmlog_newstatus(job);

    return;
} // end function suspend_job

/**
 * @brief       Job suspending script is ended.
 *
 * @param[in]   job          #1: job
 * @param[in]   w_status     #2: suspending script end status
 *
 * @note        We need tell JS that job suspending script is ended in this function.
 */
static void
finish_suspend_job(struct jobEntry *job, int w_status)
{
    char strJobId[32];
    log_message(DEBUG, DEBUGSIGNAL, "%s: Suspend job %s; reasons=%x, subresons=%d",
                __func__, jobid_2string(job->job_base.jobid, strJobId),
    	        job->susp_reason, job->susp_subreason);

    int jmStartStop;
    jmStartStop = (job->susp_reason & SUSPEND_REASON_JMSTART);

    job->job_base.suspend_time = time(NULL);
    job->job_base.reasons |= job->susp_reason & (~SUSPEND_REASON_JMSTART);
    job->job_base.subreasons = job->susp_subreason;

    if ((QUICKPOOL_SIGSTOP_USER == job->job_base.act_signal)
        || (QUICKPOOL_SIGKILL_USER == job->job_base.act_signal)) {
	SET_STATE(job->job_base.status, JOB_STATUS_USUSP);
    } else {
	SET_STATE(job->job_base.status, JOB_STATUS_SSUSP);
    }

    if (w_status == 0) {
	job->actcmd_state = ACT_DONE;
    } else {
        job->actcmd_state = ACT_FAIL;
    }

    if (jmStartStop) {
        job->actcmd_state = ACT_NO;
    }

    if (status_job(BATCH_STATUS_JOB, job, job->job_base.status,
                   w_status == 0 ? 0 : ERROR_SYS_CALL) < 0) {
        job->comm_failcnt ++;
        return;
    }

    if (0 < job->comm_failcnt) {
        job->comm_failcnt = 0;
    }

    job->actcmd_state = ACT_NO;
    job->job_base.act_signal = QUICKPOOL_SIGNAL_NULL;
    job->job_base.act_pid = 0;

    return;
} // end function finish_suspend_job

/**
 * @brief       Job resuming script is ended.
 *
 * @param[in]   job          #1: job
 * @param[in]   w_status     #2: resuming script end status
 *
 * @note        We need tell JS that job resuming script is ended in this function.
 */
static void
finish_resume_job(struct jobEntry *job, int w_status)
{
    job->job_base.reasons = 0;
    job->job_base.subreasons = 0;
    SET_STATE(job->job_base.status, JOB_STATUS_RUN);

    if (w_status == 0) {
        job->actcmd_state = ACT_DONE;
    } else {
        job->actcmd_state = ACT_FAIL;
    }

    if (status_job(BATCH_STATUS_JOB, job, job->job_base.status,
                   w_status == 0 ? 0 : ERROR_SYS_CALL) < 0) {
        job->comm_failcnt ++;
        return;
    }

    if (0 < job->comm_failcnt) {
        job->comm_failcnt = 0;
    }

    job->actcmd_state = ACT_NO;
    job->job_base.act_signal = QUICKPOOL_SIGNAL_NULL;
    job->job_base.act_pid = 0;

    return;
} // end function finish_resume_job

/**
 * @brief       Job signal (stop, resume, chkpnt) script is ended.
 *
 * @param[in]   job          #1: job
 */
static void
finish_signal_job(struct jobEntry *job)
{
    char strJobId[32];

    int w_status=0;
    if (0 > job->job_base.act_signal) {

        char tmpDirName[PATH_MAX+1];
        get_tmpdir_4job(tmpDirName, job->job_base.jobid, job->job_base.execute_uid);

        char exitFile[PATH_MAX+1];
        snprintf(exitFile, PATH_MAX, "%s/.%s.%s.%s", tmpDirName, job->job_base.submit->script,
                 jobid_2longstring(job->job_base.jobid, strJobId),
                 get_exitfile_suffix(job->job_base.act_signal));

        struct stat st;
        w_status = stat(exitFile, &st);
        if (w_status == 0) {
            job->actcmd_state = ACT_DONE;
        } else {
            job->actcmd_state = ACT_FAIL;
        }
    }

    job->job_base.status &= ~JOB_STATUS_SIGNAL;

    bool_t freed = FALSE;
    switch (job->job_base.act_signal) {
    case QUICKPOOL_SIGCHKPNT:
        finish_chkpnt_job(job, w_status, &freed);
        break;        
    case QUICKPOOL_SIGSTOP_USER:
    case QUICKPOOL_SIGSTOP_LOAD:
    case QUICKPOOL_SIGSTOP_PREEMPT:
    case QUICKPOOL_SIGSTOP_WINDOW:
    case QUICKPOOL_SIGSTOP_OTHER:
        finish_suspend_job(job, w_status);
        break;

    case QUICKPOOL_SIGCONT_USER:
    case QUICKPOOL_SIGCONT_LOAD:
    case QUICKPOOL_SIGCONT_WINDOW:
    case QUICKPOOL_SIGCONT_OTHER:
        finish_resume_job(job, w_status);
        break;
    case QUICKPOOL_SIGKILL_USER:
    case QUICKPOOL_SIGKILL_REQUEUE:
    case QUICKPOOL_SIGKILL_OTHER:
    case QUICKPOOL_SIGKILL_FORCE:

        if (status_job(BATCH_STATUS_JOB, job, job->job_base.status,
                       w_status==0?0:ERROR_SYS_CALL) < 0) {
            job->comm_failcnt ++;
            break;
        }

        if (0 < job->comm_failcnt) {
            job->comm_failcnt = 0;
        }

        job->actcmd_state = ACT_NO;
        job->job_base.act_pid = 0;
        job->job_base.act_signal = QUICKPOOL_SIGNAL_NULL;

        break;
    case QUICKPOOL_SIGKILL_RUNLIMIT:
    case QUICKPOOL_SIGKILL_DEADLINE:
    case QUICKPOOL_SIGKILL_PROCESSLIMIT:
    case QUICKPOOL_SIGKILL_CPULIMIT:
    case QUICKPOOL_SIGKILL_MEMLIMIT:
    case QUICKPOOL_SIGKILL_SWAPLIMIT:
        finish_suspend_job(job, w_status);
        break;
    default:
        log_message(ERR, NOCLASS, "%s: unknown signal <%d> for job <%s> at the job status <%x> with act_pid <%d>",
                    __func__, job->job_base.act_signal, jobid_2string(job->job_base.jobid, strJobId),
                    job->job_base.status, job->job_base.act_pid);

        job->job_base.act_pid = 0;
        return;
    }

    if (!freed) {
        // job has been deleted, no need to log its status.
	jmlog_newstatus(job);
    }

    return;
} // end function finish_signal_job

static int
set_tclnode_load(struct QuickPoolHostInfo *load, struct Tcl_Node *tclnode)
{
    tclnode->host_name = load->host;
    tclnode->host_type = load->host_type;
    tclnode->host_model = load->host_model;
    tclnode->max_cpu = load->ncpus;
    tclnode->max_mem = load->maxmem;
    tclnode->max_swp = load->maxswap;
    tclnode->max_tmp = load->maxtmp;
    tclnode->from_type = load->host_type;
    tclnode->from_model = load->host_model;
    tclnode->ignDedicatedResource = FALSE;
    tclnode->res_bitmap = load->res_bitmap;
    tclnode->exclres_bitmap = NULL;
    tclnode->check_flag = TCL_CHECK_EXPRESSION;
    tclnode->status = load->jm_status;

    tclnode->load = (double *)calloc(gptrSharedConf->qpinfo->num_index, sizeof(double));
    if (NULL == tclnode->load) {
        log_message(ERR, NOCLASS, STR_FUNC_D_FAIL_M, __func__,
                    "calloc", gptrSharedConf->qpinfo->num_index*sizeof(double));
        relaunch();
    }

    unsigned int i;
    for (i = 0; i < gptrSharedConf->qpinfo->num_index; i++) {
        tclnode->load[i] = load->load[i];
    }

    return 0;
} // end function set_tclnode_load

/**
 * @brief       Check if job use more resources than its setting limits.
 *
 * @param[in]   job          #1: job
 *
 * @note        Kill job if job resource usage exceed its limits.
 */
static void
check_job_limits(struct jobEntry *job)
{
    struct rlimit rlimit;

    set_resource_limit(&job->job_base.resource_limits[RESOURCE_LIMIT_CPU],
		       &rlimit, RESOURCE_LIMIT_CPU);
    if (rlimit.rlim_cur != RLIM_INFINITY && 0 < job->job_base.number_node) {

        long cpuTime;
        cpuTime = (long)job->inst_usage[0].utime + (long)job->inst_usage[0].stime;
        if ((long)rlimit.rlim_cur < cpuTime) {

            if (job->job_base.status & JOB_STATUS_KILL) {
            } else {
                signal_job_begin(job, QUICKPOOL_SIGKILL_CPULIMIT, 0, TRUE);
                jmlog_newstatus(job);

                job->job_base.status |= JOB_STATUS_KILL;
            }
        }
    }

    set_resource_limit(&job->job_base.resource_limits[RESOURCE_LIMIT_SWAP],
                       &rlimit, RESOURCE_LIMIT_SWAP);
    if (rlimit.rlim_cur != RLIM_INFINITY && 0 < job->job_base.number_node) {

        long swapVal;
        swapVal = (long)job->inst_usage[0].swap;
        if ((long)(rlimit.rlim_cur / 1024) < swapVal) {

            if (job->job_base.status & JOB_STATUS_KILL) {

	    } else {
                signal_job_begin(job, QUICKPOOL_SIGKILL_SWAPLIMIT, 0, TRUE);
		jmlog_newstatus(job);

		job->job_base.status |= JOB_STATUS_KILL;
            }
        }
    }

    set_resource_limit(&job->job_base.resource_limits[RESOURCE_LIMIT_PROCESS],
                       &rlimit, RESOURCE_LIMIT_PROCESS);

    if (rlimit.rlim_cur != RLIM_INFINITY && 0 < job->job_base.number_node) {
        if (rlimit.rlim_cur + 2 < job->inst_usage[0].npids) {

            if ((IS_JOB_SUSPEND(job->job_base.status))
                && (job->job_base.reasons & SUSPEND_REASON_RESLIMIT)
                && (SUBREASON_PROCESS == job->job_base.subreasons)) {
                return;
            } else {
                signal_job_begin(job, QUICKPOOL_SIGKILL_PROCESSLIMIT, 0, TRUE);
                jmlog_newstatus(job);

		job->job_base.status |= JOB_STATUS_KILL;
            }
        }
    }

    set_resource_limit(&job->job_base.resource_limits[RESOURCE_LIMIT_RSS],
                       &rlimit, RESOURCE_LIMIT_RSS);

    if (rlimit.rlim_cur != RLIM_INFINITY && 0 < job->job_base.number_node) {

        long memVal;
        memVal = (long)job->inst_usage[0].mem;
        if ((long)(rlimit.rlim_cur / 1024) < memVal) {

            if (job->job_base.status & JOB_STATUS_KILL) {

            } else {

                signal_job_begin(job, QUICKPOOL_SIGKILL_MEMLIMIT, 0, TRUE);
                jmlog_newstatus(job);

                job->job_base.status |= JOB_STATUS_KILL;
            }
        }
    }

    return;
} // end function check_job_limits

/**
 * @brief       Check if job's run time window is open or closed.
 *
 * @param[in]   job          #1: job
 *
 * @note        Tell caller if job should be active or not.
 */
static bool_t
window_ok(struct jobEntry *job)
{
    time_t timeNow;
    timeNow = time(0);

    time_t timeCheck;
    if (job->active && (job->job_base.signal > 0)) {
        timeCheck = timeNow + WARN_TIME;
    } else {
        timeCheck = timeNow;
    }

    if (job->winclose_time > timeCheck || 0 == job->winclose_time) {
        return (job->active);
    }

    struct dayhour dayhour;
    set_dayhour_4time(&dayhour, timeCheck);

    if (0 == LIST_NUM_ENTS(job->job_base.run_winlist)) {
        job->active = TRUE;
        job->winclose_time = timeNow + (24.0 - dayhour.hour) * ONEHOUR;
        return (job->active);
    }

    job->active = iswindow_active(&dayhour, timeNow, &job->winclose_time, job->job_base.run_winlist);

    return (job->active);
} // end function window_ok
