#!/usr/bin/groovy
package org.centos.pipeline

import org.centos.*

import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
import org.csanchez.jenkins.plugins.kubernetes.pipeline.PodTemplateAction
import groovy.json.JsonOutput

/**
 * Library to setup and configure the host the way ci-pipeline requires
 * @param stage
 * @param sshKey
 * @return
 */
def setupStage(String stage, String sshKey) {
    echo "Currently in stage: ${stage} in setupStage"

    // TODO: Either remove sshKey arg, or determine how to invoke second credentialsID and variable name based on arg.
    // Currently having an sshKey isn't that useful as we're still hard-coding the public credentialsID entry
    withCredentials([file(credentialsId: sshKey, variable: 'FEDORA_ATOMIC_KEY'),
                     file(credentialsId: 'fedora-atomic-pub-key', variable: 'FEDORA_ATOMIC_PUB_KEY')]) {
        sh script: '''
            #!/bin/bash
            set -xeuo pipefail

            mkdir -p ~/.ssh
            cp ${FEDORA_ATOMIC_KEY} ~/.ssh/id_rsa
            cp ${FEDORA_ATOMIC_PUB_KEY} ~/.ssh/id_rsa.pub
            chmod 600 ~/.ssh/id_rsa
            chmod 644 ~/.ssh/id_rsa.pub

            # Keep compatibility with earlier cciskel-duffy
            if test -f ${ORIGIN_WORKSPACE}/inventory.${ORIGIN_BUILD_TAG}; then
                ln -fs ${ORIGIN_WORKSPACE}/inventory.${ORIGIN_BUILD_TAG} ${WORKSPACE}/inventory
            fi

            if test -n "${playbook:-}"; then
                ansible-playbook --private-key=${FEDORA_ATOMIC_KEY} -u root -i ${WORKSPACE}/inventory "${playbook}"
            else
                ansible --private-key=${FEDORA_ATOMIC_KEY} -u root -i ${WORKSPACE}/inventory all -m ping
            fi
            exit
        ''',
        label: "Providing ssh keys"
    }
}

/**
 * Library to execute a task and rsync the logs back to artifacts.ci.centos.org
 * @param stage
 * @param duffyKey
 * @return
 */
def runTaskAndReturnLogs(String stage, String duffyKey) {
    echo "Currently in stage: ${stage} in runTaskAndReturnLogs"

    withCredentials([file(credentialsId: duffyKey, variable: 'DUFFY_KEY'),
                     file(credentialsId: 'fedora-keytab', variable: 'FEDORA_KEYTAB')]) {
        sh script: '''
            #!/bin/bash
            set -xeuo pipefail

            echo $HOME

            cp ${DUFFY_KEY} ~/duffy.key
            chmod 600 ~/duffy.key

            cp ${FEDORA_KEYTAB} fedora.keytab
            chmod 0600 fedora.keytab

            echo "Host *.ci.centos.org" > ~/.ssh/config
            echo "    StrictHostKeyChecking no" >> ~/.ssh/config
            echo "    UserKnownHostsFile /dev/null" >> ~/.ssh/config
            chmod 600 ~/.ssh/config

            source ${ORIGIN_WORKSPACE}/task.env
            (echo -n "export RSYNC_PASSWORD=" && cat ~/duffy.key | cut -c '-13') > rsync-password.sh

            rsync -Hrlptv --stats -e ssh ${ORIGIN_WORKSPACE}/task.env rsync-password.sh fedora.keytab builder@${DUFFY_HOST}:${JENKINS_JOB_NAME}
            for repo in ci-pipeline sig-atomic-buildscripts; do
                rsync -Hrlptv --stats --delete -e ssh ${repo}/ builder@${DUFFY_HOST}:${JENKINS_JOB_NAME}/${repo}
            done

            # Use the following in ${task} to authenticate.
            #kinit -k -t ${FEDORA_KEYTAB} ${FEDORA_PRINCIPAL}
            build_success=true
            if ! ssh -tt builder@${DUFFY_HOST} "pushd ${JENKINS_JOB_NAME} && . rsync-password.sh && . task.env && ${task}"; then
                build_success=false
            fi

            rsync -Hrlptv --stats -e ssh builder@${DUFFY_HOST}:${JENKINS_JOB_NAME}/logs/ ${ORIGIN_WORKSPACE}/logs || true
            # Exit with code from the build
            if test "${build_success}" = "false"; then
                echo 'Build failed, see logs above'; exit 1
            fi
            exit
        ''',
        label: "Synchronizing files"
    }
}

/**
 * Library to check last image
 * @param stage
 * @return
 */
def checkLastImage(String stage) {
    echo "Currently in stage: ${stage} in checkLastImage"

    sh script: '''
        set +e

        header=$(curl -sI "${HTTP_BASE}/${branch}/images/latest-atomic.qcow2"|grep -i '^Last-Modified:')
        curl_rc=$?
        if [ ${curl_rc} -eq 0 ]; then
            l_modified=$(echo ${header}|sed s'/Last-Modified: //')
            prev=$( date --date="$l_modified" +%s )
            cur=$( date +%s )
            if [ $((cur - prev)) -gt 86400 ]; then
                echo "New atomic image needed. Existing atomic image is more than 24 hours old"
                touch ${WORKSPACE}/NeedNewImage.txt

            else
                echo "No new atomic image need. Existing atomic image is less than 24 hours old"
            fi
        else
            echo "New atomic image needed. Unable to find existing atomic image"
            touch ${WORKSPACE}/NeedNewImage.txt

        fi
    ''',
    label: "Checking for the last atomic image"
}

/**
 *
 *
 * variables
 *  stage - current stage running
 *  imageFilePath - path to the file to examine last modified time for. Defaults to 'images/latest-atomic.qcow2'
 */
/**
 * Library to check last modified date for a given image file.
 * @param stage
 * @param imageFilePath
 * @return
 */
def checkImageLastModifiedTime(String stage, String imageFilePath='images/latest-atomic.qcow2'){

    def url = new URL("${HTTP_BASE}/${branch}/${imageFilePath}")
    def fileName = imageFilePath.split('/')[-1]
    def filePath = imageFilePath.split('/')[0..-2].join('/').replaceAll("^/", "")

    echo "Currently in stage: ${stage} in checkImageLastModifiedTime for ${fileName} in /${filePath}/"

    def connection = (HttpURLConnection)url.openConnection()
    connection.setRequestMethod("HEAD")

    try {
        connection.connect()
        def reponseCode = connection.getResponseCode()
        def needNewImage = false

        if (reponseCode == 200) {
            // Get our last modified date for the file in milliseconds
            def lastModifiedDate = connection.getLastModified()

            // Create a calendar instance for right now and subtract 24 hours,
            // then get that time in milliseconds
            def calendar = Calendar.getInstance()
            calendar.add(Calendar.HOUR_OF_DAY, -24) // 24 hours ago
            def comparisonDate = calendar.getTimeInMillis()

            // Determine if our last modified date is greater than or equal to 24 hours ago.
            if ( lastModifiedDate <= comparisonDate ) {
                echo "Creating new image. Last modified time of existing image >= 24 hours ago."
                needNewImage = true
            } else {
                echo "Not creating new image. Last modified time of existing image is < 24 hours ago."
            }
        } else if (reponseCode == 404) {
            echo "Creating new image. Unable to locate existing image."
            needNewImage = true
        } else {
            echo "Error: ${connection.responseCode}: ${connection.getResponseMessage()}"
            echo "Creating new image due to some error when getting last modified time of previous image"
            needNewImage = true
        }

        if (needNewImage) {
            new File("${WORKSPACE}/NeedNewImage.txt").createNewFile()
        }

    } catch (err) {
        echo "There was a fatal error getting the last modified time: ${err}, unable to determine if new image is needed"
    }
}


/**
 * Library to check branch to rsync to as rawhide should map to a release number
 * @return
 */
def getRsyncBranch() {
    echo "Currently in getRsyncBranch for ${branch}"

    if ( branch != 'rawhide' ) {
        return branch
    } else {
        def rsync_branch = sh (returnStdout: true, script: '''
            echo $(curl -s https://src.fedoraproject.org/rpms/fedora-release/raw/master/f/fedora-release.spec | awk '/%define dist_version/ {print $3}')
        ''', label: "Getting dist_version").trim()
        try {
            assert rsync_branch.isNumber()
        }
        catch (AssertionError e) {
            echo "There was a fatal error finding the proper mapping for ${branch}"
            echo "We will not continue without a proper RSYNC_BRANCH value. Throwing exception..."
            throw new Exception('Rsync branch identifier failed!')
        }
        rsync_branch = 'f' + rsync_branch
        return rsync_branch
    }
}

/**
 * Library to set message fields to be published
 * @param messageType: ${MAIN_TOPIC}.ci.pipeline.<defined-in-README>
 * @return
 */
def setMessageFields(String messageType) {
    topic = "${MAIN_TOPIC}.ci.pipeline.${messageType}"

    // Create a HashMap of default message content keys and values
    // These properties should be applicable to ALL message types.
    // If something is applicable to only some subset of messages,
    // add it below per the existing examples.

    def messageContent = [
            branch           : env.branch,
            build_id         : env.BUILD_ID,
            build_url        : env.JENKINS_URL + 'blue/organizations/jenkins/' + env.JOB_NAME + '/detail/' + env.JOB_NAME + '/' + env.BUILD_NUMBER + '/pipeline/',
            compose_rev      : messageType == 'compose.running' ? '' : env.commit,
            namespace        : env.fed_namespace,
            nvr              : env.nvr,
            original_spec_nvr: env.original_spec_nvr,
            ref              : env.basearch,
            repo             : env.fed_repo,
            rev              : env.fed_rev,
            status           : currentBuild.currentResult,
            test_guidance    : "''",
            username         : env.RSYNC_USER,
    ]

    // Add compose_url to appropriate message types
    if (messageType in ['compose.running', 'compose.complete', 'compose.test.integration.queued',
                        'compose.test.integration.running', 'compose.test.integration.complete', 'image.running', 'image.complete',
                        'image.test.smoke.running', 'image.test.smoke.complete'
    ]) {
        messageContent.compose_url = "${env.HTTP_BASE}/${env.branch}/ostree"
    }

    // Add image type to appropriate message types
    if (messageType in ['image.running', 'image.complete', 'image.test.smoke.running', 'image.test.smoke.complete'
    ]) {
        messageContent.type = messageType == 'image.running' ? "''" : 'qcow2'
    }

    // Add image_url to appropriate message types
    if (messageType in ['image.complete', 'image.test.smoke.running', 'image.test.smoke.complete']) {
        messageContent.image_url = messageType == 'image.running' ? "''" : env.image2boot
    }

    // Add image_name to appropriate message types
    if (messageType in ['image.complete', 'image.test.smoke.running', 'image.test.smoke.complete']) {
        messageContent.image_name = messageType == 'image.running' ? "''" : env.image_name
    }

    // Create a string to hold the data from the messageContent hash map
    String messageContentString = JsonOutput.toJson(messageContent)

    def messagePropertiesString = ''

    return [ 'topic': topic, 'properties': messagePropertiesString, 'content': messageContentString ]
}

/**
 * Library to send message
 * @param msgProps - Message Properties - empty string for fedmsg
 * @param msgContent - Message content in map form
 * @return
 */
def sendMessage(String msgTopic, String msgProps, String msgContent) {

    if (binding.hasVariable('env') && env?.SKIP_UMB_SEND?.trim()) {
        println "ci-pipeline sendMessage: env var is not empty: SKIP_UMB_SEND. Do not send message."
        return
    }

    retry(10) {
        try {
            // 1 minute should be more than enough time to send the topic msg
            timeout(1) {
                try {
                    // Send message and return SendResult
                    sendResult = sendCIMessage messageContent: msgContent,
                            messageProperties: msgProps,
                            messageType: 'Custom',
                            overrides: [topic: msgTopic],
                            failOnError: true,
                            providerName: "${MSG_PROVIDER}"
                    return sendResult
                } catch(e) {
                    throw e
                }
            }
        } catch(e) {
            echo "FAIL: Could not send message to ${MSG_PROVIDER}"
            echo e.getMessage()
            sleep 30
            error e.getMessage()
        }
    }
}

/**
 * Library to send message
 * @param msgProps - Message Properties - empty string for fedmsg
 * @param msgContent - Message content in map form
 * @param msgAuditFile - File containing all past messages. It will get appended to.
 * @param fedmsgRetryCount number of times to keep trying.
 * @return
 */
def sendMessageWithAudit(String msgTopic, String msgProps, String msgContent, String msgAuditFile, fedmsgRetryCount) {
    // Get contents of auditFile
    auditContent = readJSON file: msgAuditFile.replace("\n", "\\n")

    if (binding.hasVariable('env') && env?.SKIP_UMB_SEND?.trim()) {
        println "ci-pipeline sendMessageWithAudit: env var is not empty: SKIP_UMB_SEND. Do not send message."
        return
    }

    // Send message and get handle on SendResult
    sendResult = sendMessage(msgTopic, msgProps, msgContent)

    String id = sendResult.getMessageId()
    String msg = sendResult.getMessageContent()

    auditContent[id] = msg

    // write to auditFile and archive
    writeJSON pretty: 4, file: msgAuditFile, json: auditContent

    archiveArtifacts allowEmptyArchive: false, artifacts: msgAuditFile

    trackMessage(id, fedmsgRetryCount)
}

/**
 * Initialize message audit file
 * @param auditFile audit file for messages
 * @return
 */
def initializeAuditFile(String auditFile) {
    // Ensure auditFile is available
    sh script: "rm -f ${auditFile}", label: "Removing ${auditFile}"
    String msgAuditFileDir = sh(script: "dirname ${auditFile}", label: "Getting dirname of ${auditFile}", returnStdout: true).trim()
    sh script: "mkdir -p ${msgAuditFileDir}", label: "Creating directory ${msgAuditFileDir}"
    sh script: "touch ${auditFile}", label: "Create ${auditFile}"
    sh script: "echo '{}' >> ${auditFile}", label: "Writing {} to the ${auditFile}"
}
/**
 * Check data grepper for presence of a message
 * @param messageID message ID to track.
 * @param retryCount number of times to keep trying.
 * @return
 */
def trackMessage(String messageID, int retryCount) {
    retry(retryCount) {
        echo "Checking datagrapper for presence of message..."
        def STATUSCODE = sh (returnStdout: true, script: """
            curl --insecure --silent --output /dev/null --write-out "%{http_code}" \'${env.dataGrepperUrl}/id?id=${messageID}&chrome=false&is_raw=false\'
        """, label: "Checking datagrapper for presence of message").trim()
        // We only want to wait if there are 404 errors
        echo "${STATUSCODE}"
        if (STATUSCODE.equals("404")) {
            error("message not found on datagrepper...")
        }
        if (STATUSCODE.startsWith("5")) {
            echo("WARNING: internal datagrepper server error...")
        } else {
            echo "found!"
        }
    }
}

/**
 * Library to parse CI_MESSAGE and inject its key/value pairs as env variables.
 *
 */
def injectFedmsgVars(String message) {

    // Parse the message into a Map
    def ci_data = readJSON text: message.replace("\n", "\\n")

    // If we have a 'commit' key in the CI_MESSAGE, for each key under 'commit', we
    // * prepend the key name with fed_
    // * replace any '-' with '_'
    // * truncate the value for the key at the first '\n' character
    // * replace any double-quote characters with single-quote characters in the value for the key.

    if (ci_data['commit']) {
        ci_data.commit.each { key, value ->
            env."fed_${key.toString().replaceAll('-', '_')}" =
                    value.toString().split('\n')[0].replaceAll('"', '\'')
        }
        if (env.fed_branch == 'master'){
            env.branch = 'rawhide'
        } else {
            env.branch = env.fed_branch
        }
    }
}

/**
 * Library to parse Pagure PR CI_MESSAGE and inject
 * its key/value pairs as env variables.
 * @param prefix - String to prefix env variables with
 * @param message - The CI_MESSAGE
 */
def injectPRVars(String prefix, String message) {

    // Parse the message into a Map
    def ci_data = readJSON text: message.replace("\n", "\\n")

    // If we have a 'pullrequest' key in the CI_MESSAGE, for each key under 'pullrequest', we
    // * prepend the key name with prefix_
    // * replace any '-' with '_'
    // * truncate the value for the key at the first '\n' character
    // * replace any double-quote characters with single-quote characters in the value for the key.

    if (ci_data['pullrequest']) {
        ci_data.pullrequest.each { key, value ->
            env."${prefix}_${key.toString().replaceAll('-', '_')}" =
                    value.toString().split('\n')[0].replaceAll('"', '\'')
        }
        if (env."${prefix}_branch" == 'master'){
            env.branch = 'rawhide'
        } else {
            env.branch = env."${prefix}_branch"
        }
        // To support existing workflows, create some env vars
        // that map to vars from commit CI_MESSAGEs
        // Get the repo name
        if (ci_data['pullrequest']['project']['name']) {
            env."${prefix}_repo" = ci_data['pullrequest']['project']['name'].toString().split('\n')[0].replaceAll('"', '\'')
        }
        // Get the namespace value
        if (ci_data['pullrequest']['project']['namespace']) {
            env."${prefix}_namespace" = ci_data['pullrequest']['project']['namespace'].toString().split('\n')[0].replaceAll('"', '\'')
        }
        // Get the username value
        if (ci_data['pullrequest']['user']['name']) {
            env."${prefix}_username" = ci_data['pullrequest']['user']['name'].toString().split('\n')[0].replaceAll('"', '\'')
        }
        // Create a bogus rev value to use in build descriptions
        if (env."${prefix}_id") {
            env."${prefix}_rev" = "PR-" + env."${prefix}_id"
            env."${prefix}_pr_id" = env."${prefix}_id"
        }
        // Get the last comment id as it was requested
        if (ci_data['pullrequest']['comments']) {
            env."${prefix}_lastcid" = ci_data['pullrequest']['comments'].last()['id']
        }
        if (ci_data['pullrequest']['uid']) {
            env."${prefix}_pr_uid" = ci_data['pullrequest']['uid']
        }
        if (ci_data['pullrequest']['commit_stop']) {
            env."${prefix}_last_commit_hash" = ci_data['pullrequest']['commit_stop']
        }
    }
}

/**
 * Library to parse Pagure PR CI_MESSAGE and check if
 * it is for a new commit added, the comment contains
 * some keyword, or if the PR was rebased
 * If notification = true, commit was added or it was rebased
 * @param message - The CI_MESSAGE
 * @param keyword - The keyword we care about
 * @return bool
 */
def checkUpdatedPR(String message, String keyword) {

    // Parse the message into a Map
    def ci_data = readJSON text: message.replace("\n", "\\n")

    if (ci_data['pullrequest']['status']) {
        if (ci_data['pullrequest']['status'] != 'Open') {
            return false
        }
    }
    if (ci_data['pullrequest']['comments']) {
        if (ci_data['pullrequest']['comments'].last()['notification'] || ci_data['pullrequest']['comments'].last()['comment'].contains(keyword)) {
            return true
        } else {
            return false
        }
    }
    // Default to return true because this is called for pr.new messages as well
    return true
}

/**
 * Library to prepare credentials
 * @return
 */
def prepareCredentials() {
    withCredentials([file(credentialsId: 'fedora-keytab', variable: 'FEDORA_KEYTAB')]) {
        sh script: '''
            #!/bin/bash
            set -xeuo pipefail

            cp ${FEDORA_KEYTAB} fedora.keytab
            chmod 0600 fedora.keytab

            mkdir -p ~/.ssh

            echo "Host *.ci.centos.org" > ~/.ssh/config
            echo "    StrictHostKeyChecking no" >> ~/.ssh/config
            echo "    UserKnownHostsFile /dev/null" >> ~/.ssh/config
            chmod 600 ~/.ssh/config
        ''',
        label: "Preparing credentials"
    }
    // Initialize RSYNC_PASSWORD from credentialsId
    env.RSYNC_PASSWORD = getPasswordFromDuffyKey('duffy-key')
}
/**
 * Library to set default environmental variables. Performed once at start of Jenkinsfile
 * @param envMap: Key/value pairs which will be set as environmental variables.
 * @return
 */
def setDefaultEnvVars(Map envMap=null){

    // Check if we're working with a staging or production instance by
    // evaluating if env.ghprbActual is null, and if it's not, whether
    // it is something other than 'master'
    // If we're working with a staging instance:
    //      We default to an MAIN_TOPIC of 'org.centos.stage'
    // If we're working with a production instance:
    //      We default to an MAIN_TOPIC of 'org.centos.prod'
    // Regardless of whether we're working with staging or production,
    // if we're provided a value for MAIN_TOPIC in the build parameters:

    // We also set dataGrepperUrl which is needed for message tracking
    // and the correct jms-messaging message provider

    if (env.ghprbActualCommit != null && env.ghprbActualCommit != "master") {
        env.MAIN_TOPIC = env.MAIN_TOPIC ?: 'org.centos.stage'
        env.dataGrepperUrl = 'https://apps.stg.fedoraproject.org/datagrepper'
        env.MSG_PROVIDER = "fedora-fedmsg-stage"
    } else {
        env.MAIN_TOPIC = env.MAIN_TOPIC ?: 'org.centos.prod'
        env.dataGrepperUrl = 'https://apps.fedoraproject.org/datagrepper'
        env.MSG_PROVIDER = "fedora-fedmsg"
    }

    // Set our base HTTP_SERVER value
    env.HTTP_SERVER = env.HTTP_SERVER ?: 'http://artifacts.ci.centos.org'

    // Set our base RSYNC_SERVER value
    env.RSYNC_SERVER = env.RSYNC_SERVER ?: 'artifacts.ci.centos.org'
    env.RSYNC_USER = env.RSYNC_USER ?: 'fedora-atomic'

    // Check if we're working with a staging or production instance by
    // evaluating if env.ghprbActual is null, and if it's not, whether
    // it is something other than 'master'
    // If we're working with a staging instance:
    //      We default to an RSYNC_DIR of fedora-atomic/staging
    //      We default to an HTTP_DIR of fedora-atomic/staging
    // If we're working with a production instance:
    //      We default to an RSYNC_DIR of fedora-atomic
    //      We default to an HTTP_DIR of fedora-atomic
    // Regardless of whether we're working with staging or production,
    // if we're provided a value for RSYNC_DIR or HTTP_DIR in the build parameters:
    //      We set the RSYNC_DIR or HTTP_DIR to the value(s) provided (this overwrites staging or production paths)

    if (env.ghprbActualCommit != null && env.ghprbActualCommit != "master") {
        env.RSYNC_DIR = env.RSYNC_DIR ?: 'fedora-atomic/staging'
        env.HTTP_DIR = env.HTTP_DIR ?: 'fedora-atomic/staging'
    } else {
        env.RSYNC_DIR = env.RSYNC_DIR ?: 'fedora-atomic'
        env.HTTP_DIR = env.HTTP_DIR ?: 'fedora-atomic'
    }

    // Set env.HTTP_BASE to our env.HTTP_SERVER/HTTP_DIR,
    //  ex: http://artifacts.ci.centos.org/fedora-atomic/ (production)
    //  ex: http://artifacts.ci.centos.org/fedora-atomic/staging (staging)
    env.HTTP_BASE = "${env.HTTP_SERVER}/${env.HTTP_DIR}"

    env.basearch = env.basearch ?: 'x86_64'
    env.OSTREE_BRANCH = env.OSTREE_BRANCH ?: ''
    env.commit = env.commit ?: ''
    env.image2boot = env.image2boot ?: ''
    env.image_name = env.image_name ?: ''
    env.FEDORA_PRINCIPAL = env.FEDORA_PRINCIPAL ?: 'bpeck/jenkins-continuous-infra.apps.ci.centos.org@FEDORAPROJECT.ORG'
    env.package_url = env.package_url ?: ''
    env.nvr = env.nvr ?: ''
    env.original_spec_nvr = env.original_spec_nvr ?: ''
    env.ANSIBLE_HOST_KEY_CHECKING = env.ANSIBLE_HOST_KEY_CHECKING ?: 'False'

    // If we've been provided an envMap, we set env.key = value
    // Note: This may overwrite above specified values.
    envMap.each { key, value ->
        env."${key.toSTring().trim()}" = value.toString().trim()
    }
}

/**
 * Library to set stage specific environmental variables.
 * @param stage - Current stage
 * @return
 */
def setStageEnvVars(String stage){
    def stages =
            ["ci-pipeline-rpmbuild"                : [
                    task                     : "./ci-pipeline/tasks/rpmbuild-test",
                    playbook                 : "ci-pipeline/playbooks/setup-rpmbuild-system.yml",
                    ref                      : "fedora/${env.branch}/${env.basearch}/atomic-host",
                    repo                     : "${env.fed_repo}",
                    rev                      : "${env.fed_rev}",
            ],
             "ci-pipeline-ostree-compose"          : [
                     task                     : "./ci-pipeline/tasks/ostree-compose",
                     playbook                 : "ci-pipeline/playbooks/rdgo-setup.yml",
                     ref                      : "fedora/${env.branch}/${env.basearch}/atomic-host",
                     repo                     : "${env.fed_repo}",
                     rev                      : "${env.fed_rev}",
                     basearch                 : "x86_64",
             ],
             "ci-pipeline-ostree-image-compose"    : [
                     task                     : "./ci-pipeline/tasks/ostree-image-compose",
                     playbook                 : "ci-pipeline/playbooks/rdgo-setup.yml",

             ],
             "ci-pipeline-ostree-image-boot-sanity": [
                     task                     : "./ci-pipeline/tasks/ostree-boot-image",
                     playbook                 : "ci-pipeline/playbooks/system-setup.yml",
             ],
             "ci-pipeline-ostree-boot-sanity"      : [
                     task    : "./ci-pipeline/tasks/ostree-boot-image",
                     playbook: "ci-pipeline/playbooks/system-setup.yml",
                     DUFFY_OP: "--allocate"
             ],
             "ci-pipeline-functional-tests"            : [
                     package                  : "${env.fed_repo}"
             ],
             "ci-pipeline-atomic-host-tests"       : [
                     task    : "./ci-pipeline/tasks/atomic-host-tests",
                     playbook: "ci-pipeline/playbooks/system-setup.yml",
                     package : "${env.fed_repo}"
             ]
            ]

    // Get the map of env var keys and values and write them to the env global variable
    if(stages.containsKey(stage)) {
        stages.get(stage).each { key, value ->
            env."${key}" = value
        }
    }
}

/**
 * Library to create a text string which is written to the file 'task.env' in the {env.ORIGIN_WORKSPACE} and call
 * runTaskAndReturnLogs()
 * @param stage - Current stage
 * @return
 */
def rsyncData(String stage){
    def text = "export JENKINS_JOB_NAME=\"${env.JOB_NAME}-${stage}\"\n" +
            "export RSYNC_USER=\"${env.RSYNC_USER}\"\n" +
            "export RSYNC_SERVER=\"${env.RSYNC_SERVER}\"\n" +
            "export RSYNC_DIR=\"${env.RSYNC_DIR}\"\n" +
            "export FEDORA_PRINCIPAL=\"${env.FEDORA_PRINCIPAL}\"\n" +
            "export JENKINS_BUILD_TAG=\"${env.BUILD_TAG}-${stage}\"\n" +
            "export OSTREE_BRANCH=\"${env.OSTREE_BRANCH}\"\n"

    if (stage in ['ci-pipeline-ostree-compose', 'ci-pipeline-ostree-image-compose',
                         'ci-pipeline-ostree-image-boot-sanity', 'ci-pipeline-ostree-boot-sanity']) {
        text = text +
                "export HTTP_BASE=\"${env.HTTP_BASE}\"\n" +
                "export PUSH_IMAGE=\"${env.PUSH_IMAGE}\"\n" +
                "export branch=\"${env.branch}\"\n"
    }
    if (stage == 'ci-pipeline-rpmbuild') {
        text = text +
                "export fed_repo=\"${env.fed_repo}\"\n" +
                "export fed_branch=\"${env.fed_branch}\"\n" +
                "export fed_rev=\"${env.fed_rev}\"\n"

    } else if (stage == 'ci-pipeline-ostree-image-boot-sanity') {
        text = text +
                "export image2boot=\"${env.image2boot}\"\n" +
                "export commit=\"${env.commit}\"\n" +
                "export ANSIBLE_HOST_KEY_CHECKING=\"False\"\n"
    } else if (stage == 'ci-pipeline-ostree-boot-sanity') {
        text = text +
                "export fed_repo=\"${env.fed_repo}\"\n" +
                "export image2boot=\"${env.image2boot}\"\n" +
                "export commit=\"${env.commit}\"\n" +
                "export ANSIBLE_HOST_KEY_CHECKING=\"False\"\n"
    } else if (stage == 'ci-pipeline-functional-tests') {
        text = text +
                "export package=\"${env.fed_repo}\"\n"
    }

    writeFile file: "${env.ORIGIN_WORKSPACE}/task.env",
            text: text
    runTaskAndReturnLogs(stage, 'duffy-key')

}

/**
 * Library to provision resources used in the stage
 * @param stage - Current stage
 * @return
 */
def provisionResources(String stage){
    def utils = new Utils()

    utils.allocateDuffyCciskel(stage)

    echo "Duffy Allocate ran for stage ${stage} with option --allocate\r\n" +
            "ORIGIN_WORKSPACE=${env.ORIGIN_WORKSPACE}\r\n" +
            "ORIGIN_BUILD_TAG=${env.ORIGIN_BUILD_TAG}\r\n" +
            "ORIGIN_CLASS=${env.ORIGIN_CLASS}"

    job_props = "${env.ORIGIN_WORKSPACE}/job.props"
    job_props_groovy = "${env.ORIGIN_WORKSPACE}/job.groovy"
    utils.convertProps(job_props, job_props_groovy)
    load(job_props_groovy)

}

/**
 * Function to execute script in container
 * Container must have been defined in a podTemplate
 *
 * @param stageName Name of the stage
 * @param containerName Name of the container for script execution
 * @param script Complete path to the script to execute
 * @param vars Optional list of key=values to add to env
 * @return
 */
def executeInContainer(String stageName,
                       String containerName,
                       String script,
                       ArrayList<String> vars=null) {
    //
    // Kubernetes plugin does not let containers inherit
    // env vars from host. We force them in.
    //
    def containerEnv = env.getEnvironment().collect { key, value -> return "${key}=${value}" }
    if (vars){
        vars.each {x->
            containerEnv.add(x)
        }
    }

    sh script: "mkdir -p ${stageName}", label: "Creating directory ${stageName}"
    try {
        withEnv(containerEnv) {
            container(containerName) {
                sh script
            }
        }
    } catch (err) {
        throw err
    } finally {
        sh script: """
        if [ -d "logs" ]; then
            mv -vf logs ${stageName}/logs || true
        else
            echo "No logs for executeInContainer(). Ignoring this." >&2
        fi
        """,
        label: "Checking for the logs directory"
    }
}

/**
 *
 * @param nodeName podName we are going to verify.
 * @return
 */
def ocVerifyPod(String nodeName) {
    def describeStr = openshift.selector("pods", nodeName).describe()
    out = describeStr.out.trim()

    sh script: 'mkdir -p podInfo', label: "Create directory"

    writeFile file: 'podInfo/node-pod-description-' + nodeName + '.txt',
                text: out
    archiveArtifacts 'podInfo/node-pod-description-' + nodeName + '.txt'

    timeout(60) {
        echo "Ensuring all containers are running in pod: ${env.NODE_NAME}"
        echo "Container names in pod ${env.NODE_NAME}: "
        names       = openshift.raw("get", "pod",  "${env.NODE_NAME}", '-o=jsonpath="{.status.containerStatuses[*].name}"')
        containerNames = names.out.trim()
        echo containerNames

        waitUntil {
            def readyStates = openshift.raw("get", "pod",  "${env.NODE_NAME}", '-o=jsonpath="{.status.containerStatuses[*].ready}"')

            echo "Container statuses: "
            echo containerNames
            echo readyStates.out.trim().toUpperCase()
            def anyNotReady = readyStates.out.trim().contains("false")
            if (anyNotReady) {
                echo "One or more containers not ready...see above message ^^"
                return false
            } else {
                echo "All containers ready!"
                return true
            }
        }
    }
}

/**
 *
 * @param openshiftProject name of openshift namespace/project.
 * @param nodeName podName we are going to verify.
 * @return
 */
def verifyPod(String openshiftProject, String nodeName) {
    openshift.withCluster() {
        openshift.withProject(openshiftProject) {
            return ocVerifyPod(nodeName)
        }
    }
}

/**
 *
 * @param nodeName podName we are going to get container logs from.
 * @return
 */
@NonCPS
def ocGetContainerLogsFromPod(String nodeName) {
    sh script: 'mkdir -p podInfo', label: "Create directory"
    names       = openshift.raw("get", "pod",  "${env.NODE_NAME}", '-o=jsonpath="{.status.containerStatuses[*].name}"')
    String containerNames = names.out.trim()

    containerNames.split().each {
        String log = containerLog name: it, returnLog: true
        writeFile file: "podInfo/containerLog-${it}-${nodeName}.txt",
                    text: log
    }
    archiveArtifacts "podInfo/containerLog-*.txt"
}

/**
 *
 * @param openshiftProject name of openshift namespace/project.
 * @param nodeName podName we are going to get container logs from.
 * @return
 */
@NonCPS
def getContainerLogsFromPod(String openshiftProject, String nodeName) {
    openshift.withCluster() {
        openshift.withProject(openshiftProject) {
            ocGetContainerLogsFromPod(nodeName)
        }
    }
}

/**
 *
 * @param nick nickname to connect to IRC with
 * @param channel channel to connect to
 * @param message message to send
 * @param ircServer optional IRC server defaults to irc.freenode.net:6697
 * @return
 */
def sendIRCNotification(String nick, String channel, String message, String ircServer="irc.freenode.net:6697") {
    sh script: """
        (
        echo NICK ${nick}
        echo USER ${nick} 8 * : ${nick}
        sleep 5
        echo "JOIN ${channel}"
        sleep 10
        echo "NOTICE ${channel} :${message}"
        echo QUIT
        ) | openssl s_client -connect ${ircServer}
    """,
    label: "Joining the IRC channel"
}

/**
 *
 * @param credentialsId Credential ID for Duffy Key
 * @return password
 */
def getPasswordFromDuffyKey(String credentialsId) {
    withCredentials([file(credentialsId: credentialsId, variable: 'DUFFY_KEY')]) {
        return sh(script: 'cat ' + DUFFY_KEY +
                ' | cut -c \'-13\'', label: "Getting password from Duffy Key", returnStdout: true).trim()
    }
}

/**
 * Library to teardown resources used in the current stage
 *
 * variables
 *   currentStage - current stage running
 */
def teardownResources(String stage){
    def utils = new Utils()

    utils.teardownDuffyCciskel(stage)

    echo "Duffy Deallocate ran for stage ${stage} with option --teardown\r\n" +
            "DUFFY_HOST=${env.DUFFY_HOST}"
}

/**
 * Based on tagMap, add comment to GH with
 * instructions to manual commands
 *
 * @param map of tags
 * @return
 */
def sendPRCommentforTags(imageOperationsList) {
    if (imageOperationsList.size() == 0) {
        return
    }
    def msg = "\nThe following image promotions have taken place:\n\n"
    imageOperationsList.each {
        msg = msg + "+ ${it}\n"
    }

    echo "Prepare GHI tool"
    withCredentials([string(credentialsId: 'paas-bot', variable: 'TOKEN')]) {
        sh script: "git config --global ghi.token ${TOKEN}", label: "Configuring git"
        sh  script: 'curl -sL https://raw.githubusercontent.com/stephencelis/ghi/master/ghi > ghi && chmod 755 ghi', label: "Cloning repository"
        sh  script: './ghi comment ' + env.ghprbPullId + ' -m "' + msg + '"', label: "Commenting"
    }
}

/**
 * info about tags to be used
 * @param map
 */
def printLabelMap(map) {
    for (tag in map) {
        echo "tag to be used for ${tag.key} -> ${tag.value}"
    }
}

/**
 * Setup container templates in openshift
 * @param openshiftProject Openshift Project
 * @return
 */
def setupContainerTemplates(String openshiftProject) {
    openshift.withCluster() {
        openshift.withProject(openshiftProject) {
            dir('config/s2i') {
                sh script: './create-containers.sh', label: "Executing create-containers.sh script"
            }
        }
    }
}

/**
 * Build image in openshift
 * @param openshiftProject Openshift Project
 * @param buildConfig
 * @return
 */
def buildImage(String openshiftProject, String buildConfig) {
    // - build in Openshift
    // - startBuild with a commit
    // - Get result Build and get imagestream manifest
    // - Use that to create a unique tag
    // - This tag will then be passed as an image input
    //   to the podTemplate/containerTemplate to create
    //   our slave pod.
    openshift.withCluster() {
        openshift.withProject(openshiftProject) {
            def result = openshift.startBuild(buildConfig,
                    "--commit",
                    "refs/pull/" + env.ghprbPullId + "/head",
                    "--wait")
            def out = result.out.trim()
            echo "Resulting Build: " + out

            def describeStr = openshift.selector(out).describe()
            outTrim = describeStr.out.trim()

            // --wait is being lost due to socket timeouts
            buildRunning = true
            while (buildRunning) {
                describeStr = openshift.selector(out).describe()
                outTrim = describeStr.out.trim()
                buildRunning = sh(script: "echo \"${outTrim}\" | grep '^Status:' | grep -E 'New|Pending|Running'", label: "Checking build status", returnStatus: true) == 0
                sleep 60
            }

            def imageHash = sh(
                    script: "echo \"${outTrim}\" | grep 'Image Digest:' | cut -f2- -d:",
                    label: "Getting Image Hash",
                    returnStdout: true
            ).trim()
            echo "imageHash: ${imageHash}"

            echo "Creating CI tag for ${openshiftProject}/${buildConfig}: ${buildConfig}:PR-${env.ghprbPullId}"

            openshift.tag("${openshiftProject}/${buildConfig}@${imageHash}",
                    "${openshiftProject}/${buildConfig}:PR-${env.ghprbPullId}")

            return "PR-" + env.ghprbPullId
        }
    }
}

/**
 * Build stable image in openshift
 * @param openshiftProject Openshift Project
 * @param buildConfig
 * @return
 */
def buildStableImage(String openshiftProject, String buildConfig, String buildTag = 'stable') {
    // - build in Openshift
    // - startBuild using ref in openshift
    // - Get result Build and get imagestream manifest
    // - Use that to create a stable tag
    openshift.withCluster() {
        openshift.withProject(openshiftProject) {
            def result = openshift.startBuild(buildConfig,
                    "--wait")
            def out = result.out.trim()
            echo "Resulting Build: " + out

            def describeStr = openshift.selector(out).describe()
            outTrim = describeStr.out.trim()

            // --wait is being lost due to socket timeouts
            buildRunning = true
            while (buildRunning) {
                describeStr = openshift.selector(out).describe()
                outTrim = describeStr.out.trim()
                buildRunning = sh(script: "echo \"${outTrim}\" | grep '^Status:' | grep -E 'New|Pending|Running'",
                label: "Checking build status",
                returnStatus: true) == 0
                sleep 60
            }

            def imageHash = sh(
                    script: "echo \"${outTrim}\" | grep 'Image Digest:' | cut -f2- -d:",
                    label: "Getting Image Hash",
                    returnStdout: true
            ).trim()
            echo "imageHash: ${imageHash}"

            echo "Creating stable tag for ${openshiftProject}/${buildConfig}: ${buildConfig}:${buildTag}"

            openshift.tag("${openshiftProject}/${buildConfig}@${imageHash}",
                        "${openshiftProject}/${buildConfig}:${buildTag}")

        }
    }
}

/**
 * Using the currentBuild, get a string representation
 * of the changelog.
 * @return String of changelog
 */
@NonCPS
def getChangeLogFromCurrentBuild() {
    MAX_MSG_LEN = 100
    def changeString = ""

    echo "Gathering SCM changes"
    def changeLogSets = currentBuild.changeSets
    for (int i = 0; i < changeLogSets.size(); i++) {
        def entries = changeLogSets[i].items
        for (int j = 0; j < entries.length; j++) {
            def entry = entries[j]
            truncated_msg = entry.msg.take(MAX_MSG_LEN)
            changeString += " - ${truncated_msg} [${entry.author}]\n"
            def files = new ArrayList(entry.affectedFiles)
            for (int k = 0; k < files.size(); k++) {
                def file = files[k]
                changeString += "    | (${file.editType.name})  ${file.path}\n"
            }
        }
    }

    if (!changeString) {
        changeString = " - No new changes\n"
    }
    return changeString
}

/**
 * Sets the Build displayName and Description based on whether it
 * is a PR or a prod run.
 */
def setBuildDisplayAndDescription() {
    currentBuild.displayName = "Build#: ${env.BUILD_NUMBER} - Branch: ${env.branch} - Package: ${env.fed_repo}"
    if (env.ghprbActualCommit != null && env.ghprbActualCommit != "master") {
        currentBuild.description = "<a href=\"https://github.com/${env.ghprbGhRepository}/pull/${env.ghprbPullId}\">PR #${env.ghprbPullId} (${env.ghprbPullAuthorLogin})</a>"
    } else {
        currentBuild.description = "${currentBuild.currentResult}"
    }
}

/**
 * Update the Build displayName and Description based on whether it
 * is a PR or a prod run.
 * Used at start of pipeline to decorate the build with info
 */
def updateBuildDisplayAndDescription() {
    currentBuild.displayName = "Build#: ${env.BUILD_NUMBER} - Branch: ${env.branch} - Package: ${env.fed_repo}"
    if (env.ghprbActualCommit != null && env.ghprbActualCommit != "master") {
        currentBuild.description = "<a href=\"https://github.com/${env.ghprbGhRepository}/pull/${env.ghprbPullId}\">PR #${env.ghprbPullId} (${env.ghprbPullAuthorLogin})</a>"
    }
}

/**
 * Sets the Build displayName and Description based on params
 * @param buildName
 * @param buildDesc
 */
def setCustomBuildNameAndDescription(String buildName, String buildDesc) {
    if (buildName?.trim()) {
        currentBuild.displayName = buildName
    }
    if (buildDesc?.trim()) {
        currentBuild.description = buildDesc
    }
}

/**
 * Clears previous pod template's name to avoid implied nesting
 */
def clearTemplateNames() {
  currentBuild.rawBuild.getAction( PodTemplateAction.class )?.stack?.clear()
}

/**
 * get Variables From Message
 * @param message trigger message
 * @return map of message vars
 */
@NonCPS
def getVariablesFromMessage(String message) {

    messageVars = [:]

    // Parse the message into a Map
    def ci_data = readJSON text: message.replace("\n", "\\n")
    if (ci_data['commit']) {
        ci_data.commit.each { key, value ->
            String varKey = key.toString().replaceAll('-', '_')
            String varValue = value.toString().split('\n')[0].replaceAll('"', '\'')
            messageVars[varKey] = varValue
        }
        if (messageVars['branch'] == 'master') {
            messageVars['branch'] = 'rawhide'
        }
    } else {
        error "Incorrect dist-git message format: ${message}"
    }
    return messageVars
}
/**
 * Watch for messages and verify their contents
 * @param msg_provider jms-messaging message provider
 * @param message trigger message
 */
def watchForMessages(String msg_provider, String message) {

    def messageVars = getVariablesFromMessage(message)

    // Common attributes that all messages should have
    def commonAttributes = ["branch", "build_id", "build_url", "namespace",
            "ref", "repo", "rev", "status", "topic",
            "username"]

    // "nvr", "original_spec_nvr" are not added as common since they only get
    // getting resolved AFTER package.complete.
    //
    messageContentValidationMap = [:]
    messageContentValidationMap['org.centos.stage.ci.pipeline.package.running'] =
            []
    messageContentValidationMap['org.centos.stage.ci.pipeline.package.complete'] =
            ["nvr", "original_spec_nvr"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.compose.running'] =
            ["nvr", "original_spec_nvr", "compose_url"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.compose.complete'] =
            ["nvr", "original_spec_nvr", "compose_url"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.compose.test.integration.queued'] =
            ["nvr", "original_spec_nvr", "compose_url"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.compose.test.integration.running'] =
            ["nvr", "original_spec_nvr", "compose_url"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.compose.test.integration.complete'] =
            ["nvr", "original_spec_nvr", "compose_url"]
    messageContentValidationMap['org.centos.stage.ci.pipeline.complete'] =
            []

    messageContentValidationMap.each { k, v ->
        echo "Waiting for topic : ${k}"
        msg = waitForCIMessage providerName: "${msg_provider}",
                selector: "topic = \'${k}\'",
                checks: [[expectedValue: "${messageVars['branch']}", field: '$.branch'],
                         [expectedValue: "${messageVars['rev']}", field: '$.rev'],
                         [expectedValue: "${messageVars['repo']}", field: '$.repo']
                ],
                overrides: [topic: 'org.centos.stage']
        echo msg
        def msg_data = readJSON text: msg.replace("\n", "\\n")
        allFound = true

        def errorMsg = ""
        v.addAll(commonAttributes)
        v.each {
            if (!msg_data.containsKey(it)) {
                String err = "Error: Did not find message property: ${it}"
                errorMsg = "${errorMsg}\n${err}"
                echo "${err}"
                allFound = false
            } else {
                if (!msg_data[it]) {
                    allFound = false
                    String err = "Error: Found message property: ${it} - but it was empty!"
                    echo "${err}"
                    errorMsg = "${errorMsg}\n${err}"
                } else {
                    echo "Found message property: ${it} = ${msg_data[it]}"
                }
            }
        }
        if (!allFound) {
            errorMsg = "Message did not contain all expected message properties:\n\n${errorMsg}"
            error errorMsg
        }
    }
}

/**
 * Test if $tag tests exist for $mypackage on $mybranch in fedora dist-git
 * For mybranch, use fXX or master and pr_id is PR number (digits only)
 * @param mypackage
 * @param mybranch - Fedora branch
 * @param tag
 * @param pr_id    - PR number
 * @param namespace - rpms (default) or container
 * @return
 */
def checkTests(String mypackage, String mybranch, String tag, String pr_id=null, String namespace='rpms') {
    echo "Currently checking if package tests exist"
    sh script: "rm -rf ${mypackage}", label: "Deleting ${mypackage}"
    def repo_url = "https://src.fedoraproject.org/${namespace}/${mypackage}/"
    sh script: "git clone -b ${mybranch} --single-branch --depth 1 ${repo_url}", label: "Cloning ${repo_url}"
    if (pr_id != null) {
        dir("${mypackage}") {
            sh script: "git fetch -fu origin refs/pull/${pr_id}/head:pr", label: "Fetching changes"
            // If fail to apply patch do not exit with error, but instead ignore the patch
            // this should avoid the pipeline to exit here without sending any topic to fedmsg
            try {
                // Setting git config and merge message in case we try to merge a closed PR, like it is done on stage instance
                sh script: "git -c 'user.name=Fedora CI' -c 'user.email=ci@lists.fedoraproject.org'  merge pr -m 'Fedora CI pipeline'", label: "Merging PR"
            } catch (err) {
                echo "FAIL to apply patch from PR, ignoring it..."
            }
        }
    }

    def tests_path = "${mypackage}/tests"
    if (namespace == "tests") {
        tests_path = "${mypackage}"
    }

    if (sh(returnStatus: true, script: "ls -allh ${tests_path}/tests*.yml", label: "Checking if ${tests_path} exist") != 0) {
        return false
    }

    // if STR is installed use it to check for tags as it is more reliable
    if (sh(returnStatus: true, script: """rpm -q standard-test-roles""", label: "Checking if STR is installed") == 0) {
        // It should leave with exception if playbook is invalid
        sh(script: "ansible-playbook --list-tags ${tests_path}/tests*.yml > playbook-tags.txt", label: "Getting playbook-tags for the: ${tests_path}")
        return sh (returnStatus: true, script: "grep -e \"TASK TAGS: \\[.*\\<${tag}\\>.*\\]\" playbook-tags.txt", label: "Checking if playbook is valid") == 0
    } else {
        return sh (returnStatus: true, script: """grep -r '\\- '${tag}'\$' ${tests_path}""", label: "Checking if playbook is valid") == 0
    }
}

/**
 * Test to check if CI_MESSAGE is for a user's fork
 * @param message - The CI_MESSAGE
 * @return boolean
 */
def checkIfFork(String message) {
    def ciMessage = readJSON text: message.replace("\n", "\\n")
    def request = ciMessage['commit']['path']
    return request.contains('repositories/forks')
}

/**
 * Mark stage stageName as skipped
 * @param stageName
 * @return
 */
def skip(String stageName) {
    Utils.markStageSkippedForConditional(stageName)
}

/**
 * Lock a directory on localhost
 * @param fileLocation - The location to store the lock file
 * @param duration - The number of seconds that if lock is this age, overwrite it
 * @param myuuid - The pod uuid that is taking the lock
 * @return myuuid - Generate uuid
 */
def obtainLock(String fileLocation, int duration, String myuuid) {
    echo "Currently in obtainLock function"
    sh script: """ mkdir -p \$(dirname "${fileLocation}") """, label: "Creating directory ${fileLocation}"

    sh script: """
        (
        flock 9
        currentTime=\$(date +%s)
        while true ; do
            # Check if lock file exists
            while [ -f "${fileLocation}" ] ; do
                storeduuid=\$(cat "${fileLocation}" || echo "")
                if [ "\${storeduuid}" == "" ]; then
                    # Could not read the file, try again...
                    continue
                fi
                lockAge=\$(stat -c %Y "${fileLocation}")
                ageDiff=\$((\${currentTime} - \${lockAge}))
                # Break if lock file is too old
                if [ \${ageDiff} -ge "${duration}" ]; then
                    break
                fi
                # Break if stored uuid pod is no longer running
                if [ "\$(oc get pods | grep \${storeduuid} | grep Running | sed 's/ //g')" == "" ]; then
                    break
                fi
                sleep 30
                currentTime=\$(date +%s)
            done
            # Now, either lock file is older than duration
            # or the lock is gone, so proceed
            echo "${myuuid}" > "${fileLocation}"
            testuuid=\$(cat "${fileLocation}")
            # If uuid matches, we got the lock
            if [ \${testuuid} == "${myuuid}" ]; then
                break
            fi
            sleep 30
        done
        # fileLocation.lck isn't important, but redirect somewhere
        ) 9>"${fileLocation}".lck
    """,
    label: "Locking a directory on localhost"
    return myuuid
}

/**
 * Remove lock file on localhost
 * @param fileLocation - The location to store the lock file
 * @param myuuid - The uuid to check that the file contains
 * @return
 */
def releaseLock(String fileLocation, String myuuid) {
    if (fileExists(fileLocation)) {
        def storeduuid = readFile(fileLocation).trim()
        if (storeduuid == myuuid) {
            sh script: "rm -f ${fileLocation} ${fileLocation}.lck", label: "Deleting files: ${fileLocation}, ${fileLocation}"
            return
        } else {
            // We were told to release a lock we didn't have
            throw new Exception("Lock didn't belong to this build")
        }
    } else {
        println "Lock file didn't exist: ${fileLocation}"
        println "WARN: This build ran without a lock!"
        throw new Exception("Ran without lock")
    }
}

/**
 * Reads package test.log and return a map of test_name -> test_result
 * @param fileLocation
 * @return
 */
def parseTestLog(String fileLocation) {
    def contents = readFile(fileLocation)

    def newContents = contents.split('\n')

    def testMap = [:]
    newContents.each { test ->
        def splitTest = test.split()
        testMap[splitTest[1]] = splitTest[0]
    }

    return testMap

}

/**
 * General function to check existence of a file
 * @param fileLocation
 * @return boolean
 */
def fileExists(String fileLocation) {
    def status = false
    try {
        def contents = readFile(fileLocation)
        status = true
    } catch(e) {
        println "file not found: ${fileLocation}"
    }

    return status
}

/**
 * Check the package test results
 * @param logFile - the location of the package-tests test.log
 * @return return the build status
 */
def checkTestResults(Map testResults) {
    def buildResult = null

    testResults.each { test, result ->
        if (result != 'PASSED') {
            buildResult = 'UNSTABLE'
        }
    }

    return buildResult
}

/**
 * Wrapper to parse json before injecting env variables
 * @param prefix
 * @param message
 * @return
 */
def flattenJSON(String prefix, String message) {
    def ciMessage = readJSON text: message.replace("\n", "\\n")
    injectCIMessage(prefix, ciMessage)
}

/**
 * Set branch and $prefix_branch based on the candidate branch
 * This is meant to be run with a CI_MESSAGE from a build task
 * You should call flattenJSON on the CI_MESSAGE before using
 * this function
 * @param tag - The tag from the request field e.g. f27-candidate
 * @param prefix - The prefix to add to the keys e.g. fed
 * @return
 */
def setBuildBranch(String tag, String prefix) {
    try {
        if (tag.toLowerCase() == 'rawhide') {
            env.branch = tag
            env."${prefix}_branch" = 'master'
        } else {
            // assume that tag is branch-candidate
            tokentag = tag.tokenize('-')
            env."${prefix}_branch" = tokentag[0..tokentag.size()-2].join('-')
            env.branch = env."${prefix}_branch"
        }
    } catch(e) {
        throw new Exception('Something went wrong parsing branch', e)
    }
}

/**
 * Traverse a CI_MESSAGE with nested keys.
 * @param prefix
 * @param message
 * @return env map with all keys at top level
 */
def injectCIMessage(String prefix, def ciMessage) {

    ciMessage.each { key, value ->
        def new_key = key.replaceAll('-', '_')
        // readJSON uses JSON* and slurper uses LazyMap and ArrayList
        if (value instanceof groovy.json.internal.LazyMap || value instanceof net.sf.json.JSONObject) {
            injectCIMessage("${prefix}_${new_key}", value)
        } else if (value instanceof java.util.ArrayList || value instanceof net.sf.json.JSONArray) {
            // value was an array itself
            injectArray("${prefix}_${new_key}", value)
        } else {
            env."${prefix}_${new_key}" =
                value.toString().split('\n')[0].replaceAll('"', '\'')
        }
    }
}

/**
 * Inject array values
 * @param prefix
 * @param message
 * @return
 */
def injectArray(String prefix, def message) {
    message.eachWithIndex { value, index ->
        env."${prefix}_${index}" =
            value.toString().split('\n')[0].replaceAll('"', '\'')

    }
}

/**
 * Find the node name of the Jenkins pod
 * @param podName - name of Jenkins Master pod
 * e.g. jenkins, jenkins-myservice, not the full pod name with build #-uuid
 * @return
 */
def getMasterNode(String podName) {
    openshift.withCluster() {
        // Get Jenkins Pod description
        def longPodDesc = openshift.raw(
            "describe pod -l name=${podName}")
        def nodeLine = longPodDesc['actions']['out'] =~ /(?m)(?<=^Node:).*/
        // Strip all the extra spaces to make splitting cleaner
        def niceNodeLine = nodeLine[0].replaceAll("\\s","")
        String[] nodeName = niceNodeLine.split('/')
        // There should be both a hostname and an ip address
        if (nodeName.length != 2) {
            throw new IllegalStateException(
                "Something went wrong determining Jenkins master node " +
                "Expected the node variable to be a hostname and " +
                "IP separated by a /, but it was not.")
        }

        return nodeName[0]
    }
}

/**
 * @param request - the url that refers to the package
 * @param prefix - env prefix
 * @return
 */
def repoFromRequest(String request, String prefix) {

    try {
        def gitMatcher = request =~ /git.+?\/([a-z0-9A-Z_\-\+\.]+?)(?:\.git|\?|#).*/
        def buildMatcher = request =~ /(?:koji-shadow|cli-build).+?\/([a-zA-Z0-9\-_\+\.]+)-.*/
        def pkgMatcher = request =~ /^([a-zA-Z0-9\-_\+\.]+$)/
        def srpmMatcher = request =~ /.+?\/([a-zA-Z0-9\.\-_\+]+)-[0-9a-zA-Z\.\_]+-[0-9\.].+.src.rpm/


        if (gitMatcher.matches()) {
            env."${prefix}_repo" = gitMatcher[0][1]
        } else if (srpmMatcher.matches()) {
            env."${prefix}_repo" = srpmMatcher[0][1]
        } else if (buildMatcher.matches()) {
            env."${prefix}_repo" = buildMatcher[0][1]
        } else if (pkgMatcher.matches()) {
            env."${prefix}_repo" = pkgMatcher[0][1]
        } else {
            throw new Exception("Invalid request url: ${request}")
        }
    } catch(e) {
        throw e
    }
}
