#!/usr/bin/bash
# Copyright (C) 2019 Checkmk GmbH - License: GNU General Public License v2
# This file is part of Checkmk (https://checkmk.com). It is subject to the terms and
# conditions defined in the file COPYING, which is part of this source code package.

#
# BEGIN COMMON AGENT CODE
#

###
# Note on agent package deployment modes:
# (Only relevant when deploying a Checkmk agent package manually)
# Agent paths (MK_LIBDIR, MK_CONFDIR, MK_VARDIR, MK_LOGDIR, MK_BIN)
# can be configured implicitly by setting MK_INSTALLDIR in function "set_up_single_directory()"
# or by setting the path variables explicitly under "set_default_paths()".
# Please refer to the official documentation for more details.
###

usage() {
    cat <<HERE
Usage: ${0} [OPTION...]

The Checkmk agent to monitor *nix style systems.

Options:
  -h, --help                 show this message and exit
  -d, --debug                emit debugging messages
  -p, --profile              create files containing the execution times
  --force-inventory          get the output of the agent plugin 'mk_inventory'
                             independent of the last run state.
HERE
}

inpath() {
    # replace "if type [somecmd]" idiom
    # 'command -v' tends to be more robust vs 'which' and 'type' based tests
    command -v "${1:?No command to test}" >/dev/null 2>&1
}

init_sudo() {
    if inpath sudo && [ "$(whoami)" != "root" ]; then
        ROOT_OR_SUDO="sudo --non-interactive"
    else
        ROOT_OR_SUDO=""
    fi
    export ROOT_OR_SUDO
}

get_file_atime() {
    stat -c %X "${1}" 2>/dev/null ||
        stat -f %a "${1}" 2>/dev/null ||
        perl -e 'if (! -f $ARGV[0]){die "0000000"};$atime=(stat($ARGV[0]))[8];print $atime."\n";' "${1}"
}

get_file_mtime() {
    stat -c %Y "${1}" 2>/dev/null ||
        stat -f %m "${1}" 2>/dev/null ||
        perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print $mtime."\n";' "${1}"
}

is_valid_plugin() {
    # test if a file is executable and does not have certain
    # extensions (remnants from distro upgrades).
    case "${1:?No plugin defined}" in
        *.dpkg-new | *.dpkg-old | *.dpkg-temp | *.dpkg-tmp) return 1 ;;
        *) [ -f "${1}" ] && [ -x "${1}" ] ;;
    esac
}

set_up_process_commandline_arguments() {
    while [ -n "${1}" ]; do
        case "${1}" in
            -d | --debug)
                set -xv
                DISABLE_STDERR=false
                shift
                ;;

            -p | --profile)
                LOG_SECTION_TIME=true
                # disable caching to get the whole execution time
                DISABLE_CACHING=true
                shift
                ;;

            --force-inventory)
                export MK_FORCE_INVENTORY=true
                shift
                ;;

            -h | --help)
                usage
                exit 1
                ;;

            *)
                shift
                ;;
        esac
    done
}

set_up_get_epoch() {
    # On some systems date +%s returns a literal %s
    if date +%s | grep "^[0-9].*$" >/dev/null 2>&1; then
        get_epoch() { date +%s; }
    else
        # do not check whether perl is even present.
        # in weird cases we may be fine without get_epoch.
        get_epoch() { perl -e 'print($^T."\n");'; }
    fi
}

set_up_current_shell() {
    # Note the current shell may not be the same as what is specified in the
    # shebang, e.g. when reconfigured in the xinetd/systemd/whateverd config file
    CURRENT_SHELL="$(ps -o args= -p $$ | cut -d' ' -f1)"
}

set_up_single_directory() {
    # Set this path when deploying the Checkmk agent installation
    # under a single directory.
    : "${MK_INSTALLDIR:=""}"
}

#
# END COMMON AGENT CODE
#

set_default_paths() {
    # Set/edit these paths when deploying the Checkmk agent installation
    # under multiple directories.
    # Will be ignored if MK_INSTALLDIR is already set and not empty.
    : "${MK_LIBDIR:="/usr/lib/check_mk_agent"}"
    : "${MK_CONFDIR:="/etc/check_mk"}"
    : "${MK_VARDIR:="/var/lib/check_mk_agent"}"
    : "${MK_LOGDIR:="/var/log/check_mk_agent"}"
    : "${MK_BIN:="/usr/bin"}"
}

set_up_grep() {
    # https://docs.oracle.com/cd/E19683-01/816-0210/6m6nb7m89/index.html:
    # The /usr/xpg4/bin/egrep utility is identical to /usr/xpg4/bin/grep -E
    # (see grep(1)). Portable applications should use /usr/xpg4/bin/grep -E.
    [ -x /usr/xpg4/bin/grep ] && grep() { "/usr/xpg4/bin/grep" "$@"; }
}

preamble_1() {
    # Provide information about the remote host. That helps when data
    # is being sent only once to each remote host.
    if [ "${REMOTE_HOST}" ]; then
        export REMOTE=${REMOTE_HOST}
    elif [ "${SSH_CLIENT}" ]; then
        export REMOTE=${SSH_CLIENT%% *}
    fi
}

#
# BEGIN COMMON AGENT CODE
#

determine_sync_async() {
    # some 'booleans'
    [ "${MK_RUN_SYNC_PARTS}" = "false" ] || MK_RUN_SYNC_PARTS=true
    [ "${MK_RUN_ASYNC_PARTS}" = "false" ] || MK_RUN_ASYNC_PARTS=true
}

provide_agent_paths() {
    # If MK_INSTALLDIR is set, this will always win over separately set agent paths
    [ -n "${MK_INSTALLDIR}" ] && {
        MK_LIBDIR="${MK_INSTALLDIR}/package"
        MK_CONFDIR="${MK_INSTALLDIR}/package/config"
        MK_VARDIR="${MK_INSTALLDIR}/runtime"
        MK_LOGDIR="${MK_INSTALLDIR}/runtime/log"
        MK_BIN="${MK_INSTALLDIR}/package/bin"
    }

    export MK_LIBDIR
    export MK_CONFDIR
    export MK_VARDIR
    export MK_LOGDIR
    export MK_BIN

    # Optionally set a tempdir for all subsequent calls
    #export TMPDIR=

    # All executables in PLUGINSDIR will simply be executed and their
    # ouput appended to the output of the agent. Plugins define their own
    # sections and must output headers with '<<<' and '>>>'
    PLUGINSDIR=${MK_LIBDIR}/plugins

    # All executables in LOCALDIR will by executabled and their
    # output inserted into the section <<<local>>>. Please
    # refer to online documentation for details about local checks.
    LOCALDIR=${MK_LIBDIR}/local

    # All files in SPOOLDIR will simply appended to the agent
    # output if they are not outdated (see below)
    SPOOLDIR=${MK_VARDIR}/spool

    # JOBDIR contains subfolders with snippets of agent output
    # coming from the mk-job executable.
    # These snippets will be used to create the <<<job>>> section.
    JOBDIR=${MK_VARDIR}/job

    # Cache directory for agent output from asynchonous parts of the agent and plugins.
    # Handled in a sophisticated way by our caching mechanism.
    CACHEDIR=${MK_VARDIR}/cache
}

# SC2089: Quotes/backslashes will be treated literally. Use an array.
# shellcheck disable=SC2089
MK_DEFINE_LOG_SECTION_TIME='_log_section_time() { "$@"; }'
finalize_profiling() { :; }

set_up_profiling() {

    PROFILING_CONFIG="${MK_CONFDIR}/profiling.cfg"
    if [ -e "${PROFILING_CONFIG}" ]; then
        # Config vars:
        #   LOG_SECTION_TIME=true/false
        #   DISABLE_CACHING=true/false

        # If LOG_SECTION_TIME=true via profiling.cfg do NOT disable caching in order
        # to get the real execution time during operation.
        # shellcheck disable=SC1090
        . "${PROFILING_CONFIG}"
    fi

    PROFILING_LOGFILE_DIR="${MK_LOGDIR}/profiling/$(date +%Y%m%d_%H%M%S)"

    if ${LOG_SECTION_TIME:-false}; then
        mkdir -p "${PROFILING_LOGFILE_DIR}"
        agent_start="$(perl -MTime::HiRes=time -le 'print time()')"

        # SC2016: Expressions don't expand in single quotes, use double quotes for that.
        # SC2089: Quotes/backslashes will be treated literally. Use an array.
        # shellcheck disable=SC2016,SC2089
        MK_DEFINE_LOG_SECTION_TIME='_log_section_time() {
            section_func="$@"

            base_name=$(echo "${section_func}" | sed "s/[^A-Za-z0-9.-]/_/g")
            profiling_logfile="'"${PROFILING_LOGFILE_DIR}"'/${base_name}.log"

            start="$(perl -MTime::HiRes=time -le "print time()")"
            { time ${section_func}; } 2>> "${profiling_logfile}"
            echo "runtime $(perl -MTime::HiRes=time -le "print time() - ${start}")" >> "${profiling_logfile}"
        }'

        finalize_profiling() {
            pro_log_file="${PROFILING_LOGFILE_DIR}/profiling_check_mk_agent.log"
            agent_end="$(perl -MTime::HiRes=time -le 'print time()')"
            echo "runtime $(echo "${agent_end} - ${agent_start}" | bc)" >>"${pro_log_file}"
        }
    fi

    eval "${MK_DEFINE_LOG_SECTION_TIME}"
    # SC2090: Quotes/backslashes in this variable will not be respected.
    # shellcheck disable=SC2090
    export MK_DEFINE_LOG_SECTION_TIME
}

unset_locale() {
    # eliminate localized outputs where possible
    # The locale logic here is used to make the Python encoding detection work (see CMK-2778).
    unset -v LANG LC_ALL
    if inpath locale && inpath paste; then
        # match C.UTF-8 at the beginning, but not e.g. es_EC.UTF-8!
        case "$(locale -a | paste -sd ' ' -)" in
            *' C.UTF-8'* | 'C.UTF-8'*) LC_ALL="C.UTF-8" ;;
            *' C.utf8'* | 'C.utf8'*) LC_ALL="C.utf8" ;;
        esac
    fi
    LC_ALL="${LC_ALL:-C}"
    export LC_ALL
}

read_python_version() {
    if inpath "${1}"; then
        version=$(${1} -c 'import sys; print("%s.%s"%(sys.version_info[0], sys.version_info[1]))')

        major=${version%%.*}
        minor=${version##*.}

        if [ "${major}" -eq "${2}" ] && [ "${minor}" -ge "${3}" ]; then
            echo "${1}"
            return 0
        fi
    fi
    return 1
}

detect_python() {
    PYTHON3=$(read_python_version python3 3 4 || read_python_version python 3 4)
    PYTHON2=$(read_python_version python2 2 6 || read_python_version python 2 6)
    if [ -f "${MK_CONFDIR}/python_path.cfg" ]; then
        # shellcheck source=/dev/null
        . "${MK_CONFDIR}/python_path.cfg"
    fi
    export PYTHON2 PYTHON3

    if [ -z "${PYTHON2}" ] && [ -z "${PYTHON3}" ]; then
        NO_PYTHON=true
    elif [ -n "${PYTHON3}" ] && [ "$(
        ${PYTHON3} -c 'pass' >/dev/null 2>&1
        echo $?
    )" -eq 127 ]; then
        WRONG_PYTHON_COMMAND=true
    elif [ -z "${PYTHON3}" ] && [ "$(
        ${PYTHON2} -c 'pass' >/dev/null 2>&1
        echo $?
    )" -eq 127 ]; then
        WRONG_PYTHON_COMMAND=true
    fi
}

#
# END COMMON AGENT CODE
#

encryption_panic() {
    echo "<<<check_mk>>>"
    echo "EncryptionPanic: true"
    exit 1
}

set_up_encryption() {
    # shellcheck source=agents/cfg_examples/encryption.cfg
    [ -f "${MK_CONFDIR}/encryption.cfg" ] && {
        . "${MK_CONFDIR}/encryption.cfg" || encryption_panic
    }
    define_optionally_encrypt "${ENCRYPTED:-"no"}"
}

hex_decode() {
    # We might not have xxd available, so we have to do it manually.
    # Be aware that this implementation is very slow and should not be used for large data.
    local hex="$1"
    for ((i = 0; i < ${#hex}; i += 2)); do
        printf '%b' "\x${hex:i:2}"
    done
}

parse_kdf_output() {
    local kdf_output="$1"
    salt_hex=$(echo "$kdf_output" | grep -oP "(?<=salt=)[0-9A-F]+")
    key_hex=$(echo "$kdf_output" | grep -oP "(?<=key=)[0-9A-F]+")
    iv_hex=$(echo "$kdf_output" | grep -oP "(?<=iv =)[0-9A-F]+")
    # Make sure this rather brittle grepping worked. For example, some openssl update might decide
    # to remove that odd-looking space behind 'iv'.
    # Note that the expected LENGTHS ARE DOUBLED because the values are hex encoded.
    if [ ${#salt_hex} -ne 16 ] || [ ${#key_hex} -ne 64 ] || [ ${#iv_hex} -ne 32 ]; then
        encryption_panic
    fi
    echo "$salt_hex" "$key_hex" "$iv_hex"
}

encrypt_then_mac() {
    # Encrypt the input data, calculate a MAC over IV and ciphertext, then
    # print mac and ciphertext.
    local salt_hex="$1"
    local key_hex="$2"
    local iv_hex="$3"
    local ciphertext_b64

    # We need the ciphertext twice: for the mac and for the output. But we can only store it in
    # encoded form because it can contain null bytes.
    ciphertext_b64=$(openssl enc -aes-256-cbc -K "$key_hex" -iv "$iv_hex" | openssl enc -base64)

    (
        hex_decode "$iv_hex"
        echo "$ciphertext_b64" | openssl enc -base64 -d
    ) | openssl dgst -sha256 -mac HMAC -macopt hexkey:"$key_hex" -binary

    echo "$ciphertext_b64" | openssl enc -base64 -d
}

define_optionally_encrypt() {
    # if things fail, make sure we don't accidentally send unencrypted data
    unset optionally_encrypt

    if [ "${1}" != "no" ]; then
        OPENSSL_VERSION=$(openssl version | awk '{print $2}' | awk -F . '{print (($1 * 100) + $2) * 100+ $3}')
        #
        # Encryption scheme for version 04 and 05:
        #
        #   salt    <- random_salt()
        #   key, IV <- version_specific_kdf( salt, password )
        #
        #   ciphertext <- aes_256_cbc_encrypt( key, IV, message )
        #   mac        <- hmac_sha256( key, iv:ciphertext )
        #
        #   // The output blob is formed as:
        #   // - 2 bytes version
        #   // - optional: rtc timestamp
        #   // - 8 bytes salt
        #   // - 32 bytes MAC
        #   // - the ciphertext
        #   result <- [version:salt:mac:ciphertext]

        if [ "${OPENSSL_VERSION}" -ge 10101 ]; then
            optionally_encrypt() {
                # version: 05
                # kdf: pbkdf2, 600.000 iterations

                local salt_hex key_hex iv_hex
                read -r salt_hex key_hex iv_hex <<<"$(
                    parse_kdf_output "$(openssl enc -aes-256-cbc -md sha256 -pbkdf2 -iter 600000 -k "${1}" -P)"
                )"

                printf "05"
                printf "%s" "${2}"
                hex_decode "$salt_hex"
                encrypt_then_mac "$salt_hex" "$key_hex" "$iv_hex"
            }
        else
            optionally_encrypt() {
                # version: 04
                # kdf: openssl custom kdf based on sha256

                local salt_hex key_hex iv_hex
                read -r salt_hex key_hex iv_hex <<<"$(
                    parse_kdf_output "$(openssl enc -aes-256-cbc -md sha256 -k "${1}" -P)"
                )"

                printf "04"
                printf "%s" "${2}"
                hex_decode "$salt_hex"
                encrypt_then_mac "$salt_hex" "$key_hex" "$iv_hex"
            }
        fi
    else
        optionally_encrypt() {
            [ -n "${2}" ] && printf "99%s" "${2}"
            cat
        }
    fi

}

preamble_2() {
    if [ -f "${MK_CONFDIR}/exclude_sections.cfg" ]; then
        # shellcheck source=/dev/null
        . "${MK_CONFDIR}/exclude_sections.cfg"
    fi
}

preamble_3() {
    # Find out what zone we are running in
    # Treat all pre-Solaris 10 systems as "global"
    if inpath zonename; then
        zonename=$(zonename)
        pszone="-z ${zonename}"
    else
        zonename="global"
        pszone="-A"
    fi
}

export_utility_functions() {
    # At the time of writing of this function, the solaris agent exports
    # some helper functions, so I consolidate those exports here.
    # I am not sure whether this is a good idea, though.
    # Their API is unstable.
    export -f run_mrpe
}

section_checkmk() {
    echo "<<<check_mk>>>"
    echo "Version: 2.5.0b1"
    echo "AgentOS: solaris"
    echo "Hostname: $(hostname)"

    if [ -n "${MK_INSTALLDIR}" ]; then
        echo "InstallationDirectory: ${MK_INSTALLDIR}"
        echo "PackageDirectory: ${MK_INSTALLDIR}/package"
        echo "RuntimeDirectory: ${MK_VARDIR}"
    else
        echo "ConfigurationDirectory: ${MK_CONFDIR}"
        echo "DataDirectory: ${MK_VARDIR}"
        echo "SpoolDirectory: ${SPOOLDIR}"
        echo "PluginsDirectory: ${PLUGINSDIR}"
        echo "LocalDirectory: ${LOCALDIR}"
    fi

    echo "OSType: unix"

    while read -r line; do
        raw_line="${line//\"/}"
        case $line in
            NAME=*) echo "OSName: ${raw_line##*=}" ;;
            VERSION_ID=*) echo "OSVersion: ${raw_line##*=}" ;;
        esac
    done <<<"$(cat /etc/os-release 2>/dev/null)"

    #
    # BEGIN COMMON AGENT CODE
    #

    if [ -n "${NO_PYTHON}" ]; then
        python_fail_msg="No suitable python installation found."
    elif [ -n "${WRONG_PYTHON_COMMAND}" ]; then
        python_fail_msg="Configured python command not found."
    fi

    cat <<HERE
FailedPythonReason: ${python_fail_msg}
SSHClient: ${SSH_CLIENT}
HERE
}

section_cmk_agent_ctl_status() {
    cmk-agent-ctl --version 2>/dev/null >&2 || return

    printf "<<<cmk_agent_ctl_status:sep(0)>>>\n"
    cmk-agent-ctl status --json --no-query-remote
}

section_checkmk_agent_plugins() {
    printf "<<<checkmk_agent_plugins_lnx:sep(0)>>>\n"
    printf "pluginsdir %s\n" "${PLUGINSDIR}"
    printf "localdir %s\n" "${LOCALDIR}"
    for script in \
        "${PLUGINSDIR}"/* \
        "${PLUGINSDIR}"/[1-9]*/* \
        "${LOCALDIR}"/* \
        "${LOCALDIR}"/[1-9]*/*; do
        if is_valid_plugin "${script}"; then
            script_version=$(grep -e '^__version__' -e '^CMK_VERSION' "${script}" || echo 'CMK_VERSION="unversioned"')
            printf "%s:%s\n" "${script}" "${script_version}"
        fi
    done
}

section_checkmk_failed_plugin() {
    ${MK_RUN_SYNC_PARTS} || return
    echo "<<<check_mk>>>"
    echo "FailedPythonPlugins: ${1}"
}

#
# END COMMON AGENT CODE
#

section_df() {
    # Filesystem usage for UFS and VXFS
    echo '<<<df>>>'
    for fs in ufs vxfs samfs lofs tmpfs; do
        # SC2162: read without -r will mangle backslashes.
        # The following suppression was added when we enabled the corresponding shellcheck.
        # It may well be that "read -r" would be more appropriate.
        # shellcheck disable=SC2162
        df -l -k -F ${fs} 2>/dev/null | sed 1d | grep -v "^[^ ]*/lib/[^ ]*\.so\.1 " |
            while read Filesystem kbytes used avail capacity Mountedon; do
                kbytes=$((used + avail))
                echo "${Filesystem} ${fs} ${kbytes} ${used} ${avail} ${capacity} ${Mountedon}"
            done
    done
}

section_zfs() {
    # Filesystem usage for ZFS
    if inpath zfs; then
        echo '<<<zfsget>>>'
        zfs get -t filesystem,volume -Hp name,quota,used,avail,mountpoint,type 2>/dev/null ||
            zfs get -Hp name,referenced,avail,mountpoint,type | sed 's/referenced/used/g'
        echo '[df]'
        df -l -k -F zfs 2>/dev/null | sed 1d
    fi
}

section_zfs_arc_cache() {
    # ZFS arc cache
    # newer Solaris (>=11.3) do not provide hits and misses via mdb -k
    echo '<<<zfs_arc_cache>>>'
    if inpath kstat; then
        kstat -p zfs:0:arcstats | sed -e 's/.*arcstats://g' | awk '{printf "%s = %s\n", $1, $2;}'

    elif inpath mdb; then
        echo '::arc' | mdb -k
    fi
}

section_ps() {
    # Processes
    echo '<<<ps>>>'
    echo "[time]"
    get_epoch
    echo "[processes]"
    # The default solaris ps command strips the command lines of the processes. But for good process
    # matching on the server we really need to whole command line. On linux there are arguments to
    # make ps output the whole command line, but on solaris this seems to be missing. We use the ucb
    # ps command to get the full command line instead. What a hack.
    if [ -x /usr/ucb/ps ]; then
        UCB_PS=$(/usr/ucb/ps -agwwwx)
        # shellcheck disable=SC2086 # yes, we want splitting on pszone
        PS=$(ps -o "user=USER............" -o vsz,rss,pcpu,etime,pid,args ${pszone} |
            sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4\/\5,\6) /')
        while read -r LINE; do
            STATS=${LINE%) *}
            PID=${STATS##*,}

            # Directly use ps output when line is too slow to be stripped
            if [ ${#LINE} -lt 100 ]; then
                echo "${LINE}"
                continue
            fi

            CMD=$(echo "${UCB_PS}" | grep "^[ ]*${PID} " | head -n1 |
                awk '{ s = ""; for (i = 5; i <= NF; i++) s = s $i " "; print s }')
            # Only use the ucb ps line when it's not empty (process might already been gone)
            if [ -z "${CMD}" ]; then
                echo "${LINE}"
            else
                echo "${STATS}) ${CMD}"
            fi
        done <<<"${PS}"
    else
        # shellcheck disable=SC2086 # yes, we want splitting on pszone
        ps -o "user=USER............" -o vsz,rss,pcpu,etime,pid,args ${pszone} |
            sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4\/\5,\6) /'
    fi
    echo '<<<mem_total_solaris:sep(0)>>>'
    prtconf | grep "Memory"
}

section_statgrab() {
    # Statgrab
    # source: http://www.i-scream.org/libstatgrab/
    # binary: http://www.opencsw.org/
    if inpath statgrab; then
        # "statgrab mem" is known to be slow on some systems, so we move it to the back.
        # this way only statgrab_mem section will be affected if statgrab is killed by timeout.
        statgrab_vars="general. const. user. cpu. page. disk. swap. mem."
        statgrab_sections="cpu disk page"

        # Collect net stats in the global zone and in local zones if dlstat is present.
        if [ "${zonename}" == "global" ] || inpath dlstat; then
            statgrab_vars="${statgrab_vars} net."
            statgrab_sections="${statgrab_sections} net"
        fi

        statgrab_temporary_file="/tmp/statgrab.$$"
        statgrab_cleanup() {
            # shellcheck disable=SC2317 # shellcheck doesn't understand trap
            if [ -f "${statgrab_temporary_file}" ]; then
                rm -f "${statgrab_temporary_file}"
            fi
        }
        trap statgrab_cleanup EXIT

        if inpath timeout; then
            statgrab_wrapped() { timeout 20 statgrab "$@"; }
        else
            statgrab_wrapped() { statgrab "$@"; }
        fi

        # shellcheck disable=SC2086
        if statgrab_wrapped ${statgrab_vars} 1>$statgrab_temporary_file; then
            # only output sections if statgrab did exit successfully and no timeout did occur
            for s in ${statgrab_sections}; do
                echo "<<<statgrab_${s}>>>"
                grep "^${s}\." $statgrab_temporary_file | grep -v md | cut -d. -f2-99 | sed 's/ *= */ /'
            done

            # <<<statgrab_mem>>> info is preferred over <<<solaris_mem>>>
            # since solaris_mem is under suspicion to be buggy.
            echo '<<<statgrab_mem>>>'
            # shellcheck disable=SC2196
            grep -E "^(swap|mem)\." $statgrab_temporary_file | grep -v md | sed 's/ *= */ /'
        fi

    else
        # Memory
        # <<<solaris_mem>>> should be used if statgrab is missing and top is available.
        if [ -x /usr/bin/top ] || [ -x /usr/local/bin/top ]; then
            echo "<<<solaris_mem>>>"
            if [ -x /usr/bin/top ]; then /usr/bin/top | grep '^Memory:'; fi
            if [ -x /usr/local/bin/top ]; then /usr/local/bin/top | grep '^Memory:'; fi
        fi
    fi
}

section_cpu() {
    # /proc/cpu
    # Simulated Output of Linux /proc/cpu
    echo '<<<cpu>>>'
    load=$(uptime | sed -e 's;.*average: \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\).*;\1 \2 \3;')
    nthreads=$(($(ps -AL | wc -l)))
    procs=$(($(psrinfo | wc -l)))
    echo "${load} 1/${nthreads} $$ ${procs}"
}

section_zpool() {
    # zpool status
    if [ -x /sbin/zpool ]; then
        _run_cached_internal "zpool_status" 120 120 360 240 "echo '<<<zpool_status>>>'; /sbin/zpool status -x"

        ${MK_RUN_SYNC_PARTS} || return

        echo '<<<zpool>>>'
        zpool list
    fi
}

section_uptime() {
    # Solaris doesn't always give a consisten output on uptime, thus include side information
    # Tested in VM for solaris 10/11
    echo '<<<uptime>>>'
    ctime=$(nawk 'BEGIN{print srand()}')
    btime=$(kstat '-p' 'unix:::boot_time' 2>&1 | grep 'boot_time' | awk '{print $2}')
    echo $((ctime - btime))
    echo '[uptime_solaris_start]'
    uname -a
    zonename
    uptime
    kstat -p unix:0:system_misc:snaptime
    echo '[uptime_solaris_end]'
}

section_ntp() {
    # shellcheck disable=SC2086 # yes, we want splitting on pszone
    ps -o comm ${pszone} | grep -w ".*ntpd" &>/dev/null || return

    echo '<<<ntp>>>'
    ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/'
}

section_solaris_prtg() {
    if inpath prtdiag; then
        # prtdiag does not work in local zones
        if [ "${zonename}" == "global" ]; then
            _run_cached_internal "solaris_prtdiag_status" 300 300 900 600 \
                'echo "<<<solaris_prtdiag_status>>>"; /usr/sbin/prtdiag 1>/dev/null 2>&1; echo $?'
        fi
    fi
}

section_tcp() {
    # TCP Connection stats
    echo '<<<tcp_conn_stats>>>'
    netstat -n -a -f inet -P tcp | tail +5 |
        nawk '{ c[$7]++; } END { for (x in c) { print x, c[x]; } }'
}

section_multipathing() {
    # Multipathing
    if inpath mpathadm; then
        if [ "${zonename}" == "global" ]; then
            echo '<<<solaris_multipath>>>'
            mpathadm list LU | nawk '{if(NR%3==1){dev=$1}
                                      if(NR%3==2){tc=$NF}
                                      if(NR%3==0){printf "%s %s %s\n",dev,tc,$NF}}'
        fi
    fi
}

#
# BEGIN COMMON AGENT CODE
#

section_job() {
    # Get statistics about monitored jobs.

    _cat_files() {
        # read file names from stdin and write like `head -n -0 -v file`
        while read -r file; do
            printf "==> %s <==\n" "${file##./}"
            cat "${file}"
        done
    }

    (
        cd "${JOBDIR}" 2>/dev/null || return
        printf "<<<job>>>\n"
        for user in *; do
            (
                cd "${user}" 2>/dev/null || return # return from subshell only
                # This folder is owned (and thus writable) by the user that ran the jobs.
                # The agent (root) must not read files that are not owned by the user.
                # This prevents symlink or hardlink attacks.
                find -L . -type f -user "${user}" | _cat_files
            )
        done
    )
}

section_fileinfo() {
    # fileinfo check: put patterns for files into /etc/check_mk/fileinfo.cfg
    perl -e '
    use File::Glob "bsd_glob";
    my @patterns = ();
    foreach (bsd_glob("$ARGV[0]/fileinfo.cfg"), bsd_glob("$ARGV[0]/fileinfo.d/*")) {
        open my $handle, "<", $_ or next;
        while (<$handle>) {
            chomp;
            next if /^\s*(#|$)/;
            my $pattern = $_;
            $pattern =~ s/\$DATE:(.*?)\$/substr(`date +"$1"`, 0, -1)/eg;
            push @patterns, $pattern;
        }
        warn "error while reading $_: $!\n" if $!;
        close $handle;
    }
    exit if ! @patterns;

    my $file_stats = "";
    foreach (@patterns) {
        foreach (bsd_glob("$_")) {
            if (! -f) {
                $file_stats .= "$_|missing\n" if ! -d;
            } elsif (my @infos = stat) {
                $file_stats .= "$_|ok|$infos[7]|$infos[9]\n";
            } else {
                $file_stats .= "$_|stat failed: $!\n";
            }
        }
    }

    print "<<<fileinfo:sep(124)>>>\n", time, "\n[[[header]]]\nname|status|size|time\n[[[content]]]\n$file_stats";
    ' -- "${MK_CONFDIR}"
}

#
# END COMMON AGENT CODE
#

section_libelle() {
    # Libelle Business Shadow
    if inpath trd; then
        echo '<<<libelle_business_shadow:sep(58)>>>'
        trd -s
    fi
}

section_solaris_fmadm() {
    # Displaying Information About Faults or Defects
    # If there are no faults the output of this command will be empty.
    if inpath fmadm; then
        echo '<<<solaris_fmadm:sep(58)>>>'
        fmadm faulty
    fi
}

section_solaris_services() {
    # Getting Information About Services Running on Solaris
    # We can get a list of all service instances, including disabled
    # or incomplete ones by 'svcs -a'
    if inpath svcs; then
        echo '<<<solaris_services>>>'
        svcs -a
    fi
}

section_checkmk_failed_plugins() {
    if [ -n "${FAILED_PYTHON_PLUGINS[*]}" ]; then
        echo "<<<check_mk>>>"
        echo "FailedPythonPlugins: ${FAILED_PYTHON_PLUGINS[*]}"
        if [ -n "${NO_PYTHON}" ]; then
            echo "FailedPythonReason: No suitable python installation found."
        elif [ -n "${WRONG_PYTHON_COMMAND}" ]; then
            echo "FailedPythonReason: Configured python command not found."
        fi
    fi
}

#
# BEGIN COMMON AGENT CODE
#

run_cached() {
    # Compatibility wrapper for plugins that might use run_cached.
    # We should have never exposed this as quasi API.
    NAME="${1}"
    MAXAGE="${2}"
    REFRESH_INTERVAL="${3}"
    shift 3

    OUTPUT_TIMEOUT=$((MAXAGE * 3))
    CREATION_TIMEOUT=$((MAXAGE * 2))

    _run_cached_internal "${NAME}" "${REFRESH_INTERVAL}" "${MAXAGE}" "${OUTPUT_TIMEOUT}" "${CREATION_TIMEOUT}" "$@"
}

_run_cached_internal() {
    # Run a command asynchronous by use of a cache file.
    # Usage: _run_cached_internal NAME REFRESH_INTERVAL MAXAGE OUTPUT_TIMEOUT CREATION_TIMEOUT [COMMAND ...]
    # Note that while multiple COMMAND arguments are considered, they are evaluated in a string.
    # This means that extra escaping is required.
    # For example:
    # To run a cat command every two minutes, considering the created data valid for three minutes,
    # send the created data for four minutes and allowing the command to run for 12 minutes, you'll have to call
    #
    #   _run_cached_interal "my_file_content" 120 180 240 720 "cat \"My File\""
    #
    # Mind the escaping...

    NAME="${1}"
    # name of the section (also used as cache file name)

    REFRESH_INTERVAL="${2}"
    # threshold in seconds when the cache file needs to be regenerated

    MAXAGE="${3}"
    # maximum cache livetime in seconds

    OUTPUT_TIMEOUT="${4}"
    # threshold in seconds for how long the cache file will be output (regardless of whether it is outdated)

    CREATION_TIMEOUT="${5}"
    # threshold in seconds for how long the process is allowed to be running before it is killed (see below for details)

    shift 5

    if ${DISABLE_CACHING:-false}; then
        # We need to be compatible with the caching case. This section mirrors the implementation
        # below.
        cat <<HERE | "${CURRENT_SHELL}"
            $*
HERE
        return
    fi

    [ -d "${CACHEDIR}" ] || mkdir -p "${CACHEDIR}"
    CACHEFILE="${CACHEDIR}/${NAME}.cache"
    FAIL_REPORT_FILE="${SPOOLDIR}/${NAME}.cachefail"

    NOW="$(get_epoch)"
    MTIME="$(get_file_mtime "${CACHEFILE}" 2>/dev/null)" || MTIME=0

    if ${MK_RUN_SYNC_PARTS}; then
        if [ -s "${CACHEFILE}" ] && [ $((NOW - MTIME)) -le "${OUTPUT_TIMEOUT}" ]; then
            # Output the file (if it is not too outdated)
            CACHE_INFO="cached(${MTIME},${MAXAGE})"
            # prefix or insert cache info, unless already present.
            # WATCH OUT: AIX does not allow us to pass this as a single '-e' option!
            if [ "${NAME%%_*}" = "local" ] || [ "${NAME%%_*}" = "mrpe" ]; then
                sed -e '/^<<<.*>>>/{p;d;}' -e '/^cached([0-9]*,[0-9]*) /{p;d;}' -e "s/^/${CACHE_INFO} /" "${CACHEFILE}"
            else
                sed -e '/^<<<.*\(:cached(\).*>>>/{p;d;}' -e 's/^<<<\([^>]*\)>>>$/<<<\1:'"${CACHE_INFO}"'>>>/' "${CACHEFILE}"
            fi
        fi

    fi

    if ${MK_RUN_ASYNC_PARTS}; then
        # Kill the process if it is running too long (cache file not accessed for more than CREATION_TIMEOUT seconds).
        # If killing succeeds, remove CACHFILE.new.PID.
        # Write info about the timed out process and the kill attempt to the SPOOLDIR.
        # It will be reported to the server in the next (synchronous) agent execution.
        # The file will be deleted as soon as the plugin/local check is functional again.
        # Do not output the file here, it will interrupt the local and mrpe sections, as well as any other
        # partially cached section.
        for cfile in "${CACHEFILE}.new."*; do
            [ -e "${cfile}" ] || break # no match
            TRYING_SINCE="$(get_file_atime "${cfile}")"
            [ -n "${TRYING_SINCE}" ] || break # race condition: file vanished
            if [ $((NOW - TRYING_SINCE)) -ge "${CREATION_TIMEOUT}" ]; then
                {
                    printf "<<<checkmk_cached_plugins:sep(124)>>>\n"
                    pid="${cfile##*.new.}"
                    printf "timeout|%s|%s|%s\n" "${NAME}" "${CREATION_TIMEOUT}" "${pid}"
                    kill -9 "${pid}" >/dev/null 2>&1 && sleep 2 # TODO: what about child processes?
                    if [ -n "$(ps -o args= -p "${pid}")" ]; then
                        printf "killfailed|%s|%s|%s\n" "${NAME}" "${CREATION_TIMEOUT}" "${pid}"
                    else
                        rm -f "${cfile}"
                    fi
                } >"${FAIL_REPORT_FILE}" 2>&1
            fi
        done

        # This does the right thing, regardless whether the pattern matches!
        _cfile_in_use() {
            for cfile in "${CACHEFILE}.new."*; do
                printf "%s\n" "${cfile}"
                break
            done
        }

        # Time to refresh cache file and new job not yet running?
        if [ $((NOW - MTIME)) -gt "${REFRESH_INTERVAL}" ] && [ ! -e "$(_cfile_in_use)" ]; then
            # Start it. If the command fails the output is thrown away
            cat <<HERE | nohup "${CURRENT_SHELL}" >/dev/null 2>&1 &
                eval '${MK_DEFINE_LOG_SECTION_TIME}'
                exec > "${CACHEFILE}.new.\$\$" || exit 1
                $* \
                && mv -f "${CACHEFILE}.new.\$\$" "${CACHEFILE}" && rm -f "${FAIL_REPORT_FILE}" \
                || rm -f "${CACHEFILE}.new.\$\$"
HERE
        fi

    fi

    unset NAME MAXAGE CREATION_TIMEOUT REFRESH_INTERVAL CACHEFILE NOW MTIME CACHE_INFO TRYING_SINCE OUTPUT_TIMEOUT
}

run_local_checks() {
    cd "${LOCALDIR}" || return

    if ${MK_RUN_SYNC_PARTS}; then
        echo '<<<local:sep(0)>>>'
        for script in ./*; do
            if is_valid_plugin "${script}"; then
                _log_section_time "${script}"
            fi
        done
    fi

    # Call some local checks only every X'th second
    for script in [1-9]*/*; do
        if is_valid_plugin "${script}"; then
            interval="${script%/*}"
            _run_cached_internal "local_${script##*/}" "${interval}" "${interval}" $((interval * 3)) $((interval * 2)) "_log_section_time '${script}'"
        fi
    done
}

run_spooler() {
    (
        cd "${SPOOLDIR}" 2>/dev/null || return

        now=$(get_epoch)

        for file in *; do
            [ "${file}" != "*" ] || return

            # If prefixed with a number, then that is the maximum age in seconds.
            # If the file is older than that, it is ignored.
            maxage="${file%%[^0-9]*}"
            if [ "${maxage}" ]; then
                mtime=$(get_file_mtime "${file}")
                [ $((now - mtime)) -le "${maxage}" ] || continue
            fi

            cat "${file}"
        done
    )
}

get_plugin_interpreter() {
    # Return the interpreter (or "") for the plugin file (or fail).
    # We return the interpreter instead of wrapping the call, so we don't
    # have to export the function (which is not portable).

    # normalize input
    agent_plugin="${1#./}"

    extension="${agent_plugin##*.}"
    filename="${agent_plugin%.*}"

    # Execute all non python plugins with ./foo
    if [ "${extension}" != "py" ]; then
        return 0
    fi

    if [ "${filename#"${filename%??}"}" != "_2" ]; then
        if [ -n "${NO_PYTHON}" ] || [ -n "${WRONG_PYTHON_COMMAND}" ]; then
            section_checkmk_failed_plugin "${agent_plugin}"
            return 1
        fi

        if [ -n "${PYTHON3}" ]; then
            echo "${PYTHON3}"
            return 0
        fi

        if [ ! -e "${filename}_2.py" ]; then
            section_checkmk_failed_plugin "${agent_plugin} (Missing Python 3 installation)"
            return 1
        fi

        # no python3 found, but python2 plugin file present
        return 1
    fi

    if [ -x "${filename%??}.py" ] && [ -n "${PYTHON3}" ]; then
        return 1
    fi

    if [ -n "${PYTHON2}" ]; then
        echo "${PYTHON2}"
        return 0
    fi

    section_checkmk_failed_plugin "${agent_plugin} (missing Python 2 installation)"
    return 1
}

run_plugins() {
    cd "${PLUGINSDIR}" || return

    if ${MK_RUN_SYNC_PARTS}; then
        for script in ./*; do
            if is_valid_plugin "${script}"; then
                if plugin_interpreter=$(get_plugin_interpreter "${script}"); then
                    # SC2086: We don't want to quote, interpreter is "nothing" if empty, not "''"
                    # shellcheck disable=SC2086
                    _log_section_time ${plugin_interpreter} "${script}"
                fi
            fi
        done
    fi

    # Call some plugins only every X'th second
    for script in [1-9]*/*; do
        if is_valid_plugin "${script}"; then
            if plugin_interpreter=$(get_plugin_interpreter "${script}"); then
                interval="${script%/*}"
                # shellcheck disable=SC2086
                _run_cached_internal "plugins_${script##*/}" "${interval}" "${interval}" $((interval * 3)) $((interval * 2)) _log_section_time ${plugin_interpreter} "${script}"
            fi
        fi
    done
}

_non_comment_lines() {
    grep -Ev '^[[:space:]]*($|#)' "${1}"
}

_mrpe_get_interval() {
    echo "${1}" | grep -E '^\([^)]*\)' | sed -n 's/^.*interval=\([^:)]*\).*$/\1/p'
}

_mrpe_normalize_spaces() {
    # watch out:
    # * [:blank:] does not include \t on AIX
    # * [:space:] does include \n on Linux
    tr -s '\t' ' '
}

run_remote_plugins() {
    configfile="${1}"
    prefix="${2}"
    [ -f "${configfile}" ] || return

    _non_comment_lines "${configfile}" | _mrpe_normalize_spaces | while read -r descr rest; do
        interval="$(_mrpe_get_interval "${rest}")"
        cmdline="${rest#\(*\) }"

        if [ -n "${prefix}" ]; then
            cmdline="${prefix} '${cmdline}'"
        fi

        if [ -z "${interval}" ]; then
            ${MK_RUN_SYNC_PARTS} && run_mrpe "${descr}" "${cmdline}"
        else
            # Sourcing the agent here is not very performant, but we need 'run_mrpe', and not all shells support exporting of functions.
            _run_cached_internal "mrpe_${descr}" "${interval}" "${interval}" $((interval * 3)) $((interval * 2)) "MK_SOURCE_AGENT=yes . '${0}'; run_mrpe \"${descr}\" \"${cmdline}\""
        fi

    done
}

run_mrpe() {
    descr="${1}"
    shift

    PLUGIN="${1%% *}"
    OUTPUT="$(eval "${MK_DEFINE_LOG_SECTION_TIME}; _log_section_time $*")"
    STATUS="$?"

    printf "<<<mrpe>>>\n"
    printf "(%s) %s %s %s" "${PLUGIN##*/}" "${descr}" "${STATUS}" "${OUTPUT}" | tr \\n \\1
    printf "\n"

    unset descr PLUGIN OUTPUT STATUS
}

#
# END COMMON AGENT CODE
#

run_purely_synchronous_sections() {
    _log_section_time section_checkmk

    _log_section_time section_cmk_agent_ctl_status

    [ -z "${MK_SKIP_CHECKMK_AGENT_PLUGINS}" ] && _log_section_time section_checkmk_agent_plugins

    [ -z "${MK_SKIP_JOB}" ] && _log_section_time section_job

    [ -z "${MK_SKIP_DF}" ] && _log_section_time section_df

    [ -z "${MK_SKIP_ZFS}" ] && _log_section_time section_zfs

    [ -z "${MK_SKIP_ZFS_ARC_CACHE}" ] && _log_section_time section_zfs_arc_cache

    [ -z "${MK_SKIP_PS}" ] && _log_section_time section_ps

    [ -z "${MK_SKIP_STATGRAB}" ] && _log_section_time section_statgrab

    [ -z "${MK_SKIP_CPU}" ] && _log_section_time section_cpu

    [ -z "${MK_SKIP_UPTIME}" ] && _log_section_time section_uptime

    [ -z "${MK_SKIP_NTP}" ] && _log_section_time section_ntp

    [ -z "${MK_SKIP_TCP}" ] && _log_section_time section_tcp

    [ -z "${MK_SKIP_MULTIPATHING}" ] && _log_section_time section_multipathing

    [ -z "${MK_SKIP_FILEINFO}" ] && _log_section_time section_fileinfo

    [ -z "${MK_SKIP_LIBELLE}" ] && _log_section_time section_libelle

    [ -z "${MK_SKIP_SOLARIS_FMADM}" ] && _log_section_time section_solaris_fmadm

    [ -z "${MK_SKIP_SOLARIS_SERVICES}" ] && _log_section_time section_solaris_services
}

run_partially_asynchronous_sections() {
    [ -z "${MK_SKIP_SOLARIS_PRTG}" ] && _log_section_time section_solaris_prtg

    [ -z "${MK_SKIP_ZPOOL}" ] && _log_section_time section_zpool
}

main_setup() {

    exec </dev/null

    set_up_process_commandline_arguments "$@"

    if "${DISABLE_STDERR:-true}"; then
        exec 2>/dev/null
    fi

    init_sudo

    set_up_get_epoch

    set_up_current_shell

    set_up_grep

    determine_sync_async

    set_up_single_directory
    set_default_paths
    provide_agent_paths

    unset_locale

    preamble_1

    set_up_profiling

    detect_python

    set_up_encryption

    preamble_2

    preamble_3

    export_utility_functions
}

main_sync_parts() {

    run_purely_synchronous_sections

    run_spooler

}

main_mixed_parts() {

    run_partially_asynchronous_sections

    run_remote_plugins "${MK_CONFDIR}/mrpe.cfg"

    run_local_checks

    run_plugins

}

main_async_parts() {
    # no run_real_time_checks available
    :
}

main_finalize_sync() {
    _log_section_time section_checkmk_failed_plugins

    finalize_profiling
}

#
# BEGIN COMMON AGENT CODE
#

main() {

    while true; do

        main_setup "$@"

        (

            ${MK_RUN_SYNC_PARTS} && main_sync_parts

            (${MK_RUN_ASYNC_PARTS} || ${MK_RUN_SYNC_PARTS}) && main_mixed_parts

            ${MK_RUN_ASYNC_PARTS} && main_async_parts

            ${MK_RUN_SYNC_PARTS} && main_finalize_sync

        ) | { if ${MK_RUN_SYNC_PARTS}; then optionally_encrypt "${PASSPHRASE}" ""; else cat; fi; }

        [ "${MK_LOOP_INTERVAL}" -gt 0 ] 2>/dev/null || return 0

        sleep "${MK_LOOP_INTERVAL}"

    done

}

[ -n "${MK_SOURCE_AGENT}" ] || main "$@"
