#
# THIS FILE IS GENERATED, PLEASE DO NOT EDIT.
#

# <template: e2e_workflow_template>
name: 'destroy cluster: EKS'
on:
  workflow_dispatch:
    inputs:
      run_id:
        description: 'GitHub Action running id with failed e2e test'
        required: true
      state_artifact_name:
        description: 'GitHub artifact name with dhctl/terraform status'
        required: true
      cluster_prefix:
        description: 'Dhctl cluster prefix'
        required: true
      issue_number:
        description: 'ID of comment in issue with creation message'
        required: true
      comment_id:
        description: 'ID of comment in issue where to put workflow run status'
        required: true
      layout:
        description: 'Cloud provider layout which was tested'
        required: true
      cri:
        description: 'CRI which was tested'
        required: true
      k8s_version:
        description: 'CRI which was tested'
        required: true
      # needs for run correct installer image for abort
      installer_image_path:
        description: 'Installer image without host'
        required: true
      # needs for destroy cloud clusters if cluster was bootstrapped fully, but e2e was not pass
      ssh_master_connection_string:
        description: 'SSH connection string'
        required: false
env:

  # <template: werf_envs>
  WERF_CHANNEL: "ea"
  WERF_ENV: "FE"
  TEST_TIMEOUT: "15m"
  # Use fixed string 'sys/deckhouse-oss' for repo name. ${CI_PROJECT_PATH} is not available here in GitHub.
  DEV_REGISTRY_PATH: "${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/sys/deckhouse-oss"
  # Registry for additional repositories used for testing Github Actions workflows.
  GHA_TEST_REGISTRY_PATH: "ghcr.io/${{ github.repository }}"
  # </template: werf_envs>

# Note: no concurrency section for e2e workflows.
# Usually you run e2e and wait until it ends.

jobs:
  started_at:
    name: Save start timestamp
    outputs:
      started_at: ${{ steps.started_at.outputs.started_at }}
    runs-on: "ubuntu-latest"
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>



  # <template: e2e_run_job_template>
  run_containerd_1_25:
    name: "destroy cluster: EKS, Containerd, Kubernetes 1.25"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.25' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.25"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.25';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.25"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.25';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>

  # <template: e2e_run_job_template>
  run_containerd_1_26:
    name: "destroy cluster: EKS, Containerd, Kubernetes 1.26"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.26' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.26"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.26';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.26"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.26';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>

  # <template: e2e_run_job_template>
  run_containerd_1_27:
    name: "destroy cluster: EKS, Containerd, Kubernetes 1.27"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.27' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.27"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.27';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.27"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.27';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>

  # <template: e2e_run_job_template>
  run_containerd_1_28:
    name: "destroy cluster: EKS, Containerd, Kubernetes 1.28"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.28' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.28"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.28';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.28"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.28';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>

  # <template: e2e_run_job_template>
  run_containerd_1_29:
    name: "destroy cluster: EKS, Containerd, Kubernetes 1.29"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.29' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.29"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.29';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.29"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes 1.29';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>

  # <template: e2e_run_job_template>
  run_containerd_automatic:
    name: "destroy cluster: EKS, Containerd, Kubernetes Automatic"
    if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == 'Automatic' && github.event.inputs.layout == 'WithoutNAT' }}
    env:
      PROVIDER: EKS
      CRI: Containerd
      LAYOUT: WithoutNAT
      KUBERNETES_VERSION: "1.25"
      EVENT_LABEL: ${{ github.event.label.name }}
    runs-on: [self-hosted, e2e-common]
    steps:

      # <template: started_at_output>
      - name: Job started timestamp
        id: started_at
        run: |
          unixTimestamp=$(date +%s)
          echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
      # </template: started_at_output>

      # <template: checkout_from_event_ref_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2
        with:
          ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
          fetch-depth: 0
      # </template: checkout_from_event_ref_step>
      # <template: update_comment_on_start>
      - name: Update comment on start
        if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const name = 'destroy cluster: EKS, Containerd, Kubernetes Automatic';

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnStart({github, context, core, name})

      # </template: update_comment_on_start>


      # <template: login_dev_registry_step>
      - name: Check dev registry credentials
        id: check_dev_registry
        env:
          HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to dev registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_dev_registry_step>

      # <template: login_rw_registry_step>
      - name: Check rw registry credentials
        id: check_rw_registry
        env:
          HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
        run: |
          if [[ -n $HOST ]]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
          fi
      - name: Login to rw registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
        with:
          registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
          username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
          password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
          logout: false
      - name: Login to Github Container Registry
        uses: docker/login-action@v2.1.0
        if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
        with:
          registry: ghcr.io
          username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
          password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
          logout: false
      # </template: login_rw_registry_step>

      # <template: werf_install_step>
      - name: Install werf CLI
        uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
        with:
          channel: ${{env.WERF_CHANNEL}}
      # </template: werf_install_step>

      - name: Setup
        id: setup
        env:
          DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
          DHCTL_PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_PATH: ${{ github.event.inputs.installer_image_path }}
        run: |
          # Create tmppath for test script.
          TMP_DIR_PATH="/mnt/cloud-layouts/layouts/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${DHCTL_PREFIX}"
          if [[ -d "${TMP_DIR_PATH}" ]] ; then
            echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
            ls -la ${TMP_DIR_PATH}
            exit 1
          else
            echo "Create temporary dir for job: ${TMP_DIR_PATH}."
            mkdir -p "${TMP_DIR_PATH}"
          fi

          INSTALL_IMAGE_NAME="${DECKHOUSE_REGISTRY_HOST:-}${INSTALL_IMAGE_PATH}"

          SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
          echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"

          # Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
          echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
          docker pull "${INSTALL_IMAGE_NAME}"

          arrPath=(${INSTALL_IMAGE_PATH//:/ })
          DECKHOUSE_IMAGE_TAG="${arrPath[1]}"
          IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
          BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
          TERRAFORM_IMAGE_NAME="${BRANCH_REGISTRY_PATH}/e2e-terraform:${DECKHOUSE_IMAGE_TAG}"

          echo '::echo::on'
          echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
          echo "install-image-full=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "deckhouse-image-tag=${DECKHOUSE_IMAGE_TAG}" >> $GITHUB_OUTPUT
          echo "terraform-image-name=${TERRAFORM_IMAGE_NAME}" >> $GITHUB_OUTPUT
          echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT

          echo '::echo::off'

      - name: "Download state"
        id: download_artifact_with_state
        uses: dawidd6/action-download-artifact@v2.23.0
        with:
          github_token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          run_id: ${{github.event.inputs.run_id}}
          name: ${{github.event.inputs.state_artifact_name}}
          path: ${{ steps.setup.outputs.tmp-dir-path}}

      - name: Cleanup bootstrapped cluster
        if: ${{ success() }}
        id: cleanup_cluster
        env:
          PROVIDER: EKS
          CRI: Containerd
          LAYOUT: WithoutNAT
          KUBERNETES_VERSION: "1.25"
          LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
          LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
          TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
          PREFIX: ${{ github.event.inputs.cluster_prefix }}
          INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-full }}
          DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
          TERRAFORM_IMAGE_NAME: ${{ steps.setup.outputs.terraform-image-name }}
          SSH_MASTER_CONNECTION_STRING: ${{ github.event.inputs.ssh_master_connection_string }}
        # <template: e2e_run_template>
          LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
          LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
          COMMENT_ID: ${{ inputs.comment_id }}
          GITHUB_API_SERVER: ${{ github.api_url }}
          REPOSITORY: ${{ github.repository }}
          DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
          GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
        run: |
          echo "Execute 'script_eks.sh cleanup' via 'docker run', using environment:
            TERRAFORM_IMAGE_NAME=${TERRAFORM_IMAGE_NAME}
            INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
            DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
            INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
            PREFIX=${PREFIX}
            PROVIDER=${PROVIDER}
            CRI=${CRI}
            LAYOUT=${LAYOUT}
            KUBERNETES_VERSION=${KUBERNETES_VERSION}
            TMP_DIR_PATH=${TMP_DIR_PATH}
          "

          ls -lh $(pwd)/testing

          dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
          echo "DHCTL log file: $dhctl_log_file"

          user_runner_id=$(id -u):$(id -g)
          echo "user_runner_id $user_runner_id"
          chmod 755 $(pwd)/testing/cloud_layouts/script_eks.sh

          docker run --rm \
          -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
          -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
          -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
          -e CRI=${CRI} \
          -e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
          -e LAYOUT_AWS_DEFAULT_REGION=eu-central-1 \
          -e LAYOUT=${LAYOUT} \
          -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
          -e CRI=${CRI} \
          -e USER_RUNNER_ID=${user_runner_id} \
          -v $(pwd)/testing:/deckhouse/testing \
          -v ${TMP_DIR_PATH}:/tmp \
          ${TERRAFORM_IMAGE_NAME} \
          bash /deckhouse/testing/cloud_layouts/script_eks.sh cleanup

        # </template: e2e_run_template>

      - name: Remove failed cluster label
        if: ${{ success() }}
        uses: actions-ecosystem/action-remove-labels@v1
        with:
          github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
          number: ${{ github.event.inputs.issue_number }}
          labels: "e2e/cluster/failed"

      - name: Cleanup temp directory
        if: always()
        env:
          TMPPATH: ${{ steps.setup.outputs.tmppath}}
        run: |
          echo "Remove temporary directory '${TMPPATH}' ..."
          if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
            rm -rf "${TMPPATH}"
          else
            echo Not a directory.
          fi
          if [ -n $USER_RUNNER_ID ]; then
            echo "Fix temp directories owner..."
            chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
            chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
            chown -R $USER_RUNNER_ID /tmp || true
          else
            echo "Fix temp directories permissions..."
            chmod -f -R 777 "$(pwd)/testing" || true
            chmod -f -R 777 "/deckhouse/testing" || true
            chmod -f -R 777 /tmp || true
          fi

      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'job,separate';
            const name = 'destroy cluster: EKS, Containerd, Kubernetes Automatic';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>
  # </template: e2e_run_job_template>


  last_comment:
    name: Update comment on finish
    needs: ["started_at","run_containerd_1_25","run_containerd_1_26","run_containerd_1_27","run_containerd_1_28","run_containerd_1_29","run_containerd_automatic"]
    if: ${{ always() }}
    runs-on: ubuntu-latest
    env:
      JOB_NAMES: |
        {"run_containerd_1_25":"destroy cluster: EKS, Containerd, Kubernetes 1.25","run_containerd_1_26":"destroy cluster: EKS, Containerd, Kubernetes 1.26","run_containerd_1_27":"destroy cluster: EKS, Containerd, Kubernetes 1.27","run_containerd_1_28":"destroy cluster: EKS, Containerd, Kubernetes 1.28","run_containerd_1_29":"destroy cluster: EKS, Containerd, Kubernetes 1.29","run_containerd_automatic":"destroy cluster: EKS, Containerd, Kubernetes Automatic"}
    steps:

      # <template: checkout_step>
      - name: Checkout sources
        uses: actions/checkout@v3.5.2

      # </template: checkout_step>
      # <template: update_comment_on_finish>
      - name: Update comment on finish
        id: update_comment_on_finish
        if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
        env:
          NEEDS_CONTEXT: ${{ toJSON(needs) }}
          JOB_CONTEXT: ${{ toJSON(job) }}
          STEPS_CONTEXT: ${{ toJSON(steps) }}
        uses: actions/github-script@v6.4.1
        with:
          github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
          retries: 3
          script: |
            const statusConfig = 'workflow,final,no-skipped,restore-separate';
            const name = 'destroy cluster: EKS';
            const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
            const jobContext = JSON.parse(process.env.JOB_CONTEXT);
            const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
            let jobNames = null
            if (process.env.JOB_NAMES) {
              jobNames = JSON.parse(process.env.JOB_NAMES);
            }

            core.info(`needsContext: ${JSON.stringify(needsContext)}`);
            core.info(`jobContext: ${JSON.stringify(jobContext)}`);
            core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
            core.info(`jobNames: ${JSON.stringify(jobNames)}`);

            const ci = require('./.github/scripts/js/ci');
            return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
      # </template: update_comment_on_finish>

# </template: e2e_workflow_template>
