#!/bin/bash

#Lots of code copied from AWS nitro enclave bootstrap, and modified to
#be useful in qingtian_enclaves:
#https://github.com/aws/aws-nitro-enclaves-cli/blob/main/bootstrap/nitro-enclaves-allocator
#aws-nitro-enclaves-cli
#Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.


# This script is executed every time the qingtian enclave resources configuration serviceis restarted.
# Its purpose is to reserve the requested memory and CPUs

# Path to the allocator config file.
CONFIG_FILE_PATH="/etc/qingtian/enclave/qt-enclave-env.conf"

# The file which holds the CPU pool.
CPU_POOL_FILE="/sys/module/virtio_qtbox/parameters/sandbox_cpus"

# Path of the sandbox driver
QTBOX_DRIVER_PATH="/opt/qingtian/enclave/virtio-qtbox.ko"

# Config variables to be populated when parsing the config file.
declare -A CONFIG
CONFIG[hugepage_size]=
CONFIG[memory_mib]=
CONFIG[cpu_count]=
CONFIG[cpu_list]=

declare -A CPU_POOL_ARRAY

NUMA_NODE=""

# Identify all CPU IDs that have been off-lined for enclave use.
function get_cpu_list {
    [ -f "$CPU_POOL_FILE" ] || return

    # Split the CPU configuration into CPU groups.
    IFS=',' read -r -a cpu_groups <<< "$(cat "$CPU_POOL_FILE")"

    for cpu_group in "${cpu_groups[@]}"
    do
        # Print each individual CPU from each group.
        cpu_start=$(echo "$cpu_group" | cut -d'-' -f1)
        cpu_end=$(echo "$cpu_group" | cut -d'-' -f2)
        for cpu_id in $(seq "$cpu_start" "$cpu_end")
        do
            echo "$cpu_id"
        done
    done
}

# Determine the NUMA node which contains the enclave-available CPUs. If no arguments are given,
# the CPU pool is taken from the pool file, which must have been configurated earlier.
function get_numa_node_from_cpu_list {
    local offline_cpus="$*"
    local numa_node=""

    [ -n "$offline_cpus" ] || return $ERR_EMPTY_CPU_POOL

    # Next, check the NUMA node for each CPU.
    for cpu_id in $offline_cpus
    do
        node=$(basename "$(file /sys/devices/system/cpu/cpu"$cpu_id"/node* | cut -d':' -f1)")
        [ -n "$node" ] || return $ERR_MISSING_NODE

        # Ensure the NUMA node is the same for all off-line CPUs.
        if [ -z "$numa_node" ]
        then
            numa_node="$node"
        else
            [ "$numa_node" == "$node" ] || return $ERR_INVALID_NODE
        fi
    done

    # Set and validate the target NUMA node.
    NUMA_NODE="$numa_node"
    [ -n "$NUMA_NODE" ] || return $ERR_MISSING_NODE
}

function set_hugepages {
    conf_page_size=$1
    need_pages=$2
    local page_key=""

    # Make sure the NUMA node is set.
    [ -n "$NUMA_NODE" ] || return

    # configure hugepage_size used MB, transfer it to real size
    real_page_size=$((conf_page_size * (1 << 20)))

    hugepage_sizes=$(file /sys/devices/system/node/"$NUMA_NODE"/hugepages/hugepages-* | cut -d':' -f1)

    for hugepage in $hugepage_sizes
    do
        # Retain only the dimension of the hugepage.
        hugepage=$(basename "$hugepage" | cut -d'-' -f2)

        # clear previous hugepage reservations
        echo 0 > /sys/devices/system/node/$NUMA_NODE/hugepages/hugepages-$hugepage/nr_hugepages || return 1

        # Get the size of the huge page.
        page_size=$(echo "$hugepage" | tr -dc '0-9')

        # Get the multiplier (kB, mB etc.) in upper-case
        page_multiplier=$(echo "$hugepage" | tr -d '0-9')
        page_multiplier="${page_multiplier^}"

        case "$page_multiplier" in
            "KB")
            page_size=$((page_size * (1 << 10)))
            ;;
            "MB")
            page_size=$((page_size * (1 << 20)))
            ;;
            "GB")
            page_size=$((page_size * (1 << 30)))
            ;;
        esac
        if [ "$real_page_size" -eq $page_size ]; then
            page_key=$hugepage
        fi
    done

    [ -z "$page_key" ] && echo "Not support $conf_page_size MB hugepage_size" && exit 1

    echo "reserved hugepages:echo $need_pages > /sys/devices/system/node/$NUMA_NODE/hugepages/hugepages-$page_key/nr_hugepages"
    echo $need_pages > /sys/devices/system/node/$NUMA_NODE/hugepages/hugepages-$page_key/nr_hugepages || return 1

    actual_num_pages=$(cat /sys/devices/system/node/"$NUMA_NODE"/hugepages/hugepages-"$page_key"/nr_hugepages)

    [ "$actual_num_pages" -eq 0 ] || echo "reserved hugepages result:cat /sys/devices/system/node/"$NUMA_NODE"/hugepages/hugepages-"$page_key"/nr_hugepages is $actual_num_pages"

    [ "$need_pages" -ne "$actual_num_pages" ] && \
    echo "allocating hugepages error: actual_num_pages ("$actual_num_pages") is not equal to need_pages ("$need_pages")" && \
    online_all_cpus && recover_all_mems && exit 1

    return 0
}

function configure_hugepages {
    needed_mem="$1"
    NUMA_NODE="$2"
    config_hugepage_size="${CONFIG[hugepage_size]}"

    if [ -z "$NUMA_NODE" ] ; then
        get_numa_node_from_cpu_list "$(get_cpu_list)"
    fi

    need_pages=$((needed_mem / config_hugepage_size))

    set_hugepages "${CONFIG[hugepage_size]}" "$need_pages"
    rc=$?
    [ "$rc" -eq 0 ] || return $rc
}

# Configure the CPU pool.
function configure_cpu_pool {
    [ ! -f "$CPU_POOL_FILE" ] && echo "The CPU pool file is missing. Please make sure the virtio qtbox driver is inserted." && \
    online_all_cpus && recover_all_mems && exit 1

    echo "reserved CPU:echo $1 > $CPU_POOL_FILE"
    echo $1 > $CPU_POOL_FILE || exit 1
    echo "reserved CPU result:cat $CPU_POOL_FILE is $(cat $CPU_POOL_FILE)"
}

function obtain_cpu_info {
    local param_counts=$#

    for (( i = 0; i <= param_counts; i++ )); do
        case "${!i}" in
            # Get the number of available CPUs, CPU threads (siblings) per core and the NUMA nodes count.
            "g_cpu_count")
                eval ${!i}="$(lscpu | grep "^CPU(s):" | cut -d ":" -f 2 | tr -d " \t")"
                ;;
            # get the number of available NUMA nodes
            "g_numa_node_count")
                eval ${!i}="$(lscpu | grep "^NUMA node(s):" | cut -d ":" -f 2 | tr -d " \t")"
                ;;
            "g_thread_count_per_core")
                eval ${!i}="$(lscpu | grep "^Thread(s) per core:" | cut -d ":" -f 2 | tr -d " \t")"
                ;;
            # CPU 0 and its siblings need to remain available to the primary / parent VM.
            # Get its NUMA node to count for remaining CPUs in this NUMA node.
            "cpu_0_numa_node")
                eval ${!i}="$(lscpu -p=cpu,node | grep -v "#" | grep "^0," | cut -d "," -f 2)"
                ;;
        esac
    done
}

function check_cpu_infos {
    local param_counts=$#

    for (( i = 0; i <= param_counts; i++ )); do
        [ -z ${!i} ] && echo "Failed to get CPU topology!" && exit 1
    done
}

function check_cpu_count {
    local cpu_pool_count=$1
    local g_cpu_count=$2
    local g_thread_count_per_core=$3

    [[ ! "$cpu_pool_count" =~ ^[0-9]+$ ]] && \
        echo "The CPU count value ($cpu_pool_count) is invalid." && \
        exit 1


    [ "$g_cpu_count" -le "$cpu_pool_count" ] && \
        echo "Provided CPU count is higher than available CPUs - $g_cpu_count." && \
        exit 1

    [ $((cpu_pool_count % g_thread_count_per_core)) -ne 0 ] && \
        echo "The CPU count is not multiple of $g_thread_count_per_core (threads per core)." && \
        exit 1
}

function get_avail_cpus_of_numa_node {
    local numa_node=$1
    local cpu_0_numa_node=$2
    local g_thread_count_per_core=$3
    local cpu_pool_count=$4
    local cpu_count_per_numa_node=""
    local cpu_list_per_numa_node=""

    cpu_count_per_numa_node="$(lscpu -p=node | grep -v "#" | grep -c "^$numa_node$")"

    if [ -z "$cpu_count_per_numa_node" ] ; then
        cpu_list_per_numa_node=""
    else

        # Skip CPU 0 and its siblings.
        if [ "$numa_node" -eq "$cpu_0_numa_node" ] ; then
            cpu_count_per_numa_node=$((cpu_count_per_numa_node - g_thread_count_per_core))
        fi

        if [ "$cpu_pool_count" -gt "$cpu_count_per_numa_node" ] ; then
            cpu_list_per_numa_node=""
        else
            cpu_list_per_numa_node="$(lscpu -p=cpu,node | grep -v "#" | grep ",$numa_node$" | cut -d "," -f 1)"
            [ -z "$cpu_list_per_numa_node" ] && \
                echo "Failed to get the available CPUs of NUMA node $numa_node." && \
                exit 1
        fi

    fi

    echo "${cpu_list_per_numa_node}"
}

function get_all_threads_of_cpu {
    local cpu_per_numa_node=$1
    local core_id=""
    local thread_list_per_core=""
    local thread_count_per_core=""
    local g_thread_count_per_core=$2

    # Get all the CPU threads (siblings) from a CPU core.
    core_id="$(lscpu -p=cpu,core | grep -v "#" | grep "^$cpu_per_numa_node," | cut -d "," -f 2)"

    [ -z "$core_id" ] && \
        echo "Failed to get the core id for CPU $cpu_per_numa_node." && \
        exit 1

    thread_list_per_core="$(lscpu -p=cpu,core | grep -v "#" | grep -v "^0," | grep ",$core_id$" | cut -d "," -f 1)"

    [ -z "$thread_list_per_core" ] && \
        echo "Failed to get the threads for CPU core $core_id." && \
        exit 1

    thread_count_per_core="$(lscpu -p=cpu,core | grep -v "#" | grep -v "^0," | grep -c ",$core_id$")"

    if [ "$thread_count_per_core" -ne "$g_thread_count_per_core" ] ; then
        thread_list_per_core=""
    fi

    echo "${thread_list_per_core}"
}

function put_cpus_into_pool {
    local cpu=""
    local thread_list_per_core=$1

    # Include the CPU core in the CPU list.
    while read -r thread
    do

        if [ "${#CPU_POOL_ARRAY[@]}" -eq 0 ] ; then
            CPU_POOL_ARRAY=("$thread")
            continue
        fi

        for cpu in "${CPU_POOL_ARRAY[@]}"
        do
            if [ "$thread" -eq "$cpu" ] ; then
                continue 2
            fi
        done

        CPU_POOL_ARRAY=("${CPU_POOL_ARRAY[@]}" "$thread")
    done < <(echo "$thread_list_per_core")
}

function select_cpu_pool {
    local numa_node=""
    local g_numa_node_count=$1
    local cpu_0_numa_node=$2
    local cpu_pool_count=$3
    local cpu_list_per_numa_node=""
    local cpu_per_numa_node=""
    local g_thread_count_per_core=$4
    local memory_request=$5
    local thread_list_per_core=""

    for (( numa_node=0; numa_node<"$g_numa_node_count"; numa_node++ ))
    do
        unset CPU_POOL_ARRAY

        cpu_list_per_numa_node=$(get_avail_cpus_of_numa_node "$numa_node" "$cpu_0_numa_node" "$g_thread_count_per_core" "$cpu_pool_count")
        if [ -z "$cpu_list_per_numa_node" ] ; then
            continue
        fi

        while read -r cpu_per_numa_node
        do
            # Skip CPU 0.
            if [ "$cpu_per_numa_node" -eq 0 ] ; then
                continue
            fi

            thread_list_per_core=$(get_all_threads_of_cpu "$cpu_per_numa_node" "$g_thread_count_per_core")
            [[ ! $thread_list_per_core ]] && continue

            put_cpus_into_pool "$thread_list_per_core"

            # Found a CPU pool that matches all the necessary conditions.
            # Exit early only if the memory requirements are also satisfied. If not, continue
            # and try with the next NUMA node.
            if [ "${#CPU_POOL_ARRAY[@]}" -eq "$cpu_pool_count" ]; then
                if configure_hugepages "$memory_request" "node$numa_node"; then
                    configure_hugepages_ret="$?"
                    break 2
                else
                    configure_hugepages_ret="$?"
                    break 1
                fi
                break 2
            fi
        done < <(echo "$cpu_list_per_numa_node")
    done

    # Not enough CPUs found to be added in the CPU list of qtbox.
    [ "${#CPU_POOL_ARRAY[@]}" -ne "$cpu_pool_count" ] && \
    echo "Failed to find suitable CPUs for the qingtian enclave CPU pool after checking all NUMA nodes." && exit 1

    # Hugepages configuration failed.
    [ "$configure_hugepages_ret" != "0" ] && echo "Failed to configure hugepages:$configure_hugepages_ret" && exit 1

    echo "Found the CPU list."
}

function check_cpu_list {
    local cpu=""
    local cpu_pool=""

    for cpu in "${CPU_POOL_ARRAY[@]}"
    do
        if [ -z "$cpu_pool" ] ; then
            cpu_pool="$cpu"
            continue
        fi

        cpu_pool="$cpu_pool,$cpu"
    done

    echo ${cpu_pool[*]}
}

function configure_cpu_pool_by_cpu_count {
    local g_cpu_count=""
    local cpu_0_numa_node=""
    local g_thread_count_per_core=""
    local g_numa_node_count=""
    local cpu_pool_count="$1"
    local cpu_pool=""
    local memory_request="$2"

    # Ensure the CPU pool file is present.
    [ -z "$CPU_POOL_FILE" ] && \
        echo "The CPU pool file is missing. Please make sure the virtio crypto driver is inserted." && \
        exit 1

    # Get the CPU infos, including number of available CPUs,
    # CPU threads (siblings) per core and the NUMA nodes count.
    obtain_cpu_info g_cpu_count cpu_0_numa_node g_thread_count_per_core g_numa_node_count

    # Check the CPU infos.
    check_cpu_infos "$g_cpu_count" "$cpu_0_numa_node" "$g_thread_count_per_core" "$g_numa_node_count"

    # Check the required CPU count based on above CPU infos.
    check_cpu_count "$cpu_pool_count" "$g_cpu_count" "$g_thread_count_per_core"

    # Iterate through each NUMA node and try to get a CPU pool that matches all requirements.
    select_cpu_pool "$g_numa_node_count" "$cpu_0_numa_node" "$cpu_pool_count" "$g_thread_count_per_core" "$memory_request"

    # Check whether the obtained CPUs are available.
    cpu_pool=($(check_cpu_list))

    configure_cpu_pool $cpu_pool
}

function parse_config {
    local count=0

    while read line; do
        count=$((count+1))

        # Skip comment lines and blank lines
        echo "$line" | grep -Eq "^$|^#" && continue

        local key
        key=$(echo "$line" | cut -d: -f1 | egrep -io "^([-_a-z0-9])+")
        [[ -z ${CONFIG[$key]+x} ]] && echo "Error in $CONFIG_FILE_PATH:$count - unexpected: $key"

        local value
        value=$(echo "$line" | sed -E "s/^.+://" | sed -E "s/\\s+\"?//" | sed -E "s/\"?\$//")
        [[ -z $value ]] && echo "Error in $CONFIG_FILE_PATH:$count - missing value for $key"

        CONFIG[$key]="$value"
    done < "$CONFIG_FILE_PATH"

    # Some trivial config validation.
    [[ -z ${CONFIG[hugepage_size]} ]] && \
        echo "Config error: missing memory reservation (\`hugepage_size\`)." && exit 1
    [[ ${CONFIG[hugepage_size]} -ne 2 ]] && [[ ${CONFIG[hugepage_size]} -ne 1024 ]] && \
        echo "Config error: (\`hugepage_size\`) can only be 2(for 2M hugepage) or 1024(for 1G hugepage)" && exit 1

    [[ -z ${CONFIG[memory_mib]} ]] && \
        echo "Config error: missing memory reservation (\`memory_mib\`)." && exit 1
    [ $((${CONFIG[memory_mib]} % ${CONFIG[hugepage_size]})) -ne 0 ] && \
        echo "Config error: The (\`memory_mib\`) is not multiple of (\`hugepage_size\`)${CONFIG[hugepage_size]}" && exit 1

    [[ -z ${CONFIG[cpu_count]} ]] && [[ -z ${CONFIG[cpu_list]} ]] &&\
        echo "Config error: missing CPU reservation (either \`cpu_count\` or \`cpu_list\`)." && exit 1
    [[ ! -z ${CONFIG[cpu_count]} ]] && [[ ! -z ${CONFIG[cpu_list]} ]] && \
        echo "Config error: \`cpu_count\` conflicts with \`cpu_list\`." && exit 1
}

# Set all CPUs online before trying to reconfigure the CPU pool.
function online_all_cpus {
    echo "" > $CPU_POOL_FILE 2> .tmp_file
    err_info=$(cat .tmp_file)
    rm .tmp_file

    # Check error code in order to determine if there are any enclaves running
    if [[ $err_info == *"Operation not permitted"* ]]; then
        echo "Please stop all enclaves before re-starting the qt-enclave-env.service." && exit 1
    fi
}

function recover_all_mems {
    local g_numa_node=""
    local g_numa_node_total=""
    local g_cpu_count_per_numa=""

    eval g_numa_node_total="$(lscpu | grep "^NUMA node(s):" | cut -d ":" -f 2 | tr -d " \t")"

    for (( g_numa_node=0; g_numa_node<"$g_numa_node_total"; g_numa_node++ ))
    do
        g_cpu_count_per_numa="$(lscpu -p=node | grep -v "#" | grep -c "^$g_numa_node$")"

        if [ -z "$g_cpu_count_per_numa" ] ; then
            continue
        fi

        hugepage_sizes=$(file /sys/devices/system/node/"node$g_numa_node"/hugepages/hugepages-* | cut -d':' -f1)
        for hugepage in $hugepage_sizes
        do
            # Retain only the dimension of the hugepage.
            hugepage=$(basename "$hugepage" | cut -d'-' -f2)

            # clear previous hugepage reservations
            echo 0 > /sys/devices/system/node/node$g_numa_node/hugepages/hugepages-$hugepage/nr_hugepages
        done
    done

    echo "Recover all hugepages."
}

function check_driver_exists {
    ret=$(lsmod | grep "qtbox")
    if [ -z "$ret" ]; then
        echo "sandbox driver is uninstalled"
        return 1
    fi
    echo "sandbox driver is installed already"
    return 0
}

function insmod_qtbox {
    insmod $QTBOX_DRIVER_PATH
    rc=$?
    if [ $rc -ne 0 ]; then
        exit 1
    fi
}

function pre_install {
    check_driver_exists
    rc=$?
    if [ $rc -eq 0 ]; then
        return 0
    fi

    if [ -f $QTBOX_DRIVER_PATH ]; then
        insmod_qtbox
    else
        echo "File $QTBOX_DRIVER_PATH not exists, please install sandbox driver first"
        exit 1
    fi
}

function main() {
    # Set all CPUs online before trying to reconfigure the CPU pool.
    online_all_cpus
    # Set all MEMs recovered before trying to reconfigure the hugepages.
    recover_all_mems

    pre_install
    parse_config

    if [ -n "${CONFIG[cpu_count]}" ]; then
        configure_cpu_pool_by_cpu_count ${CONFIG[cpu_count]} ${CONFIG[memory_mib]}
    elif [ -n "${CONFIG[cpu_list]}" ]; then
        configure_cpu_pool "${CONFIG[cpu_list]}"
        configure_hugepages "${CONFIG[memory_mib]}"
    fi
}

main "$@"
