#!/bin/bash

# ==============================================================================
# HELPER FUNCTIONS FOR PACKAGE MANAGEMENT
# ==============================================================================
#
# This file provides unified helper functions for robust package installation
# and repository management across Debian/Ubuntu OS upgrades.
#
# Key Features:
#   - Automatic retry logic for transient APT/network failures
#   - Unified keyring cleanup from all 3 locations
#   - Legacy installation cleanup (nvm, rbenv, rustup)
#   - OS-upgrade-safe repository preparation
#   - Service pattern matching for multi-version tools
#
# Usage in install scripts:
#   source /dev/stdin <<< "$FUNCTIONS"  # Load from build.func
#   prepare_repository_setup "mysql"
#   install_packages_with_retry "mysql-server" "mysql-client"
#
# Quick Reference (Core Helpers):
#   cleanup_tool_keyrings()          - Remove keyrings from all 3 locations
#   stop_all_services()              - Stop services by pattern (e.g. "php*-fpm")
#   verify_tool_version()            - Validate installed version matches expected
#   cleanup_legacy_install()         - Remove nvm, rbenv, rustup, etc.
#   prepare_repository_setup()       - Cleanup repos + keyrings + validate APT
#   install_packages_with_retry()    - Install with 3 retries and APT refresh
#   upgrade_packages_with_retry()    - Upgrade with 3 retries and APT refresh
#
# ==============================================================================

# ------------------------------------------------------------------------------
# Cache installed version to avoid repeated checks
# ------------------------------------------------------------------------------
cache_installed_version() {
  local app="$1"
  local version="$2"
  mkdir -p /var/cache/app-versions
  echo "$version" >"/var/cache/app-versions/${app}_version.txt"
}

get_cached_version() {
  local app="$1"
  mkdir -p /var/cache/app-versions
  if [[ -f "/var/cache/app-versions/${app}_version.txt" ]]; then
    cat "/var/cache/app-versions/${app}_version.txt"
    return 0
  fi
  return 0
}

# ------------------------------------------------------------------------------
# Clean up ALL keyring locations for a tool (unified helper)
# Usage: cleanup_tool_keyrings "mariadb" "mysql" "postgresql"
# ------------------------------------------------------------------------------
cleanup_tool_keyrings() {
  local tool_patterns=("$@")

  for pattern in "${tool_patterns[@]}"; do
    rm -f /usr/share/keyrings/${pattern}*.gpg \
      /etc/apt/keyrings/${pattern}*.gpg \
      /etc/apt/trusted.gpg.d/${pattern}*.gpg 2>/dev/null || true
  done
}

# ------------------------------------------------------------------------------
# Stop and disable all service instances matching a pattern
# Usage: stop_all_services "php*-fpm" "mysql" "mariadb"
# ------------------------------------------------------------------------------
stop_all_services() {
  local service_patterns=("$@")

  for pattern in "${service_patterns[@]}"; do
    # Find all matching services (grep || true to handle no matches)
    local services
    services=$(systemctl list-units --type=service --all 2>/dev/null |
      grep -oE "${pattern}[^ ]*\.service" 2>/dev/null | sort -u) || true

    if [[ -n "$services" ]]; then
      while read -r service; do
        $STD systemctl stop "$service" 2>/dev/null || true
        $STD systemctl disable "$service" 2>/dev/null || true
      done <<<"$services"
    fi
  done

}

# ------------------------------------------------------------------------------
# Verify installed tool version matches expected version
# Returns: 0 if match, 1 if mismatch (with warning)
# Usage: verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')"
# ------------------------------------------------------------------------------
verify_tool_version() {
  local tool_name="$1"
  local expected_version="$2"
  local installed_version="$3"

  # Extract major version for comparison
  local expected_major="${expected_version%%.*}"
  local installed_major="${installed_version%%.*}"

  if [[ "$installed_major" != "$expected_major" ]]; then
    msg_warn "$tool_name version mismatch: expected $expected_version, got $installed_version"
    return 1
  fi

  return 0
}

# ------------------------------------------------------------------------------
# Clean up legacy installation methods (nvm, rbenv, rustup, etc.)
# Usage: cleanup_legacy_install "nodejs" -> removes nvm
# ------------------------------------------------------------------------------
cleanup_legacy_install() {
  local tool_name="$1"

  case "$tool_name" in
  nodejs | node)
    if [[ -d "$HOME/.nvm" ]]; then
      msg_info "Removing legacy nvm installation"
      rm -rf "$HOME/.nvm" "$HOME/.npm" "$HOME/.bower" "$HOME/.config/yarn" 2>/dev/null || true
      sed -i '/NVM_DIR/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
      msg_ok "Legacy nvm installation removed"
    fi
    ;;
  ruby)
    if [[ -d "$HOME/.rbenv" ]]; then
      msg_info "Removing legacy rbenv installation"
      rm -rf "$HOME/.rbenv" 2>/dev/null || true
      sed -i '/rbenv/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
      msg_ok "Legacy rbenv installation removed"
    fi
    ;;
  rust)
    if [[ -d "$HOME/.cargo" ]] || [[ -d "$HOME/.rustup" ]]; then
      msg_info "Removing legacy rustup installation"
      rm -rf "$HOME/.cargo" "$HOME/.rustup" 2>/dev/null || true
      sed -i '/cargo/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
      msg_ok "Legacy rustup installation removed"
    fi
    ;;
  go | golang)
    if [[ -d "$HOME/go" ]]; then
      msg_info "Removing legacy Go workspace"
      # Keep user code, just remove GOPATH env
      sed -i '/GOPATH/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
      msg_ok "Legacy Go workspace cleaned"
    fi
    ;;
  esac
}

# ------------------------------------------------------------------------------
# Unified repository preparation before setup
# Cleans up old repos, keyrings, and ensures APT is working
# Usage: prepare_repository_setup "mariadb" "mysql"
# ------------------------------------------------------------------------------
prepare_repository_setup() {
  local repo_names=("$@")

  # Clean up all old repository files
  for repo in "${repo_names[@]}"; do
    cleanup_old_repo_files "$repo"
  done

  # Clean up all keyrings
  cleanup_tool_keyrings "${repo_names[@]}"

  # Ensure APT is in working state
  ensure_apt_working || return 1

  return 0
}

# ------------------------------------------------------------------------------
# Install packages with retry logic
# Usage: install_packages_with_retry "mysql-server" "mysql-client"
# ------------------------------------------------------------------------------
install_packages_with_retry() {
  local packages=("$@")
  local max_retries=2
  local retry=0

  while [[ $retry -le $max_retries ]]; do
    if $STD apt install -y "${packages[@]}" 2>/dev/null; then
      return 0
    fi

    retry=$((retry + 1))
    if [[ $retry -le $max_retries ]]; then
      msg_warn "Package installation failed, retrying ($retry/$max_retries)..."
      sleep 2
      # Fix any interrupted dpkg operations before retry
      $STD dpkg --configure -a 2>/dev/null || true
      $STD apt update 2>/dev/null || true
    fi
  done

  return 1
}

# ------------------------------------------------------------------------------
# Upgrade specific packages with retry logic
# Usage: upgrade_packages_with_retry "mariadb-server" "mariadb-client"
# ------------------------------------------------------------------------------
upgrade_packages_with_retry() {
  local packages=("$@")
  local max_retries=2
  local retry=0

  while [[ $retry -le $max_retries ]]; do
    if $STD apt install --only-upgrade -y "${packages[@]}" 2>/dev/null; then
      return 0
    fi

    retry=$((retry + 1))
    if [[ $retry -le $max_retries ]]; then
      msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..."
      sleep 2
      # Fix any interrupted dpkg operations before retry
      $STD dpkg --configure -a 2>/dev/null || true
      $STD apt update 2>/dev/null || true
    fi
  done

  return 1
}

# ------------------------------------------------------------------------------
# Check if tool is already installed and optionally verify exact version
# Returns: 0 if installed (with optional version match), 1 if not installed
# Usage: is_tool_installed "mariadb" "11.4" || echo "Not installed"
# ------------------------------------------------------------------------------
is_tool_installed() {
  local tool_name="$1"
  local required_version="${2:-}"
  local installed_version=""

  case "$tool_name" in
  mariadb)
    if command -v mariadb >/dev/null 2>&1; then
      installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
    fi
    ;;
  mysql)
    if command -v mysql >/dev/null 2>&1; then
      installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
    fi
    ;;
  mongodb | mongod)
    if command -v mongod >/dev/null 2>&1; then
      installed_version=$(mongod --version 2>/dev/null | awk '/db version/{print $3}' | cut -d. -f1,2)
    fi
    ;;
  node | nodejs)
    if command -v node >/dev/null 2>&1; then
      installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+')
    fi
    ;;
  php)
    if command -v php >/dev/null 2>&1; then
      installed_version=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
    fi
    ;;
  postgres | postgresql)
    if command -v psql >/dev/null 2>&1; then
      installed_version=$(psql --version 2>/dev/null | awk '{print $3}' | cut -d. -f1)
    fi
    ;;
  ruby)
    if command -v ruby >/dev/null 2>&1; then
      installed_version=$(ruby --version 2>/dev/null | awk '{print $2}' | cut -d. -f1,2)
    fi
    ;;
  rust | rustc)
    if command -v rustc >/dev/null 2>&1; then
      installed_version=$(rustc --version 2>/dev/null | awk '{print $2}')
    fi
    ;;
  go | golang)
    if command -v go >/dev/null 2>&1; then
      installed_version=$(go version 2>/dev/null | awk '{print $3}' | sed 's/go//')
    fi
    ;;
  clickhouse)
    if command -v clickhouse >/dev/null 2>&1; then
      installed_version=$(clickhouse --version 2>/dev/null | awk '{print $2}')
    fi
    ;;
  esac

  if [[ -z "$installed_version" ]]; then
    return 1 # Not installed
  fi

  if [[ -n "$required_version" && "$installed_version" != "$required_version" ]]; then
    echo "$installed_version"
    return 1 # Version mismatch
  fi

  echo "$installed_version"
  return 0 # Installed and version matches (if specified)
}

# ------------------------------------------------------------------------------
# Remove old tool version completely (purge + cleanup repos)
# Usage: remove_old_tool_version "mariadb" "repository-name"
# ------------------------------------------------------------------------------
remove_old_tool_version() {
  local tool_name="$1"
  local repo_name="${2:-$tool_name}"

  case "$tool_name" in
  mariadb)
    stop_all_services "mariadb"
    $STD apt purge -y 'mariadb*' >/dev/null 2>&1 || true
    cleanup_tool_keyrings "mariadb"
    ;;
  mysql)
    stop_all_services "mysql"
    $STD apt purge -y 'mysql*' >/dev/null 2>&1 || true
    rm -rf /var/lib/mysql 2>/dev/null || true
    cleanup_tool_keyrings "mysql"
    ;;
  mongodb)
    stop_all_services "mongod"
    $STD apt purge -y 'mongodb*' >/dev/null 2>&1 || true
    rm -rf /var/lib/mongodb 2>/dev/null || true
    cleanup_tool_keyrings "mongodb"
    ;;
  node | nodejs)
    $STD apt purge -y nodejs npm >/dev/null 2>&1 || true
    # Clean up npm global modules
    if command -v npm >/dev/null 2>&1; then
      npm list -g 2>/dev/null | grep -oE '^  \S+' | awk '{print $1}' | while read -r module; do
        npm uninstall -g "$module" >/dev/null 2>&1 || true
      done
    fi
    cleanup_legacy_install "nodejs"
    cleanup_tool_keyrings "nodesource"
    ;;
  php)
    stop_all_services "php.*-fpm"
    $STD apt purge -y 'php*' >/dev/null 2>&1 || true
    rm -rf /etc/php 2>/dev/null || true
    cleanup_tool_keyrings "deb.sury.org-php" "php"
    ;;
  postgresql)
    stop_all_services "postgresql"
    $STD apt purge -y 'postgresql*' >/dev/null 2>&1 || true
    # Keep data directory for safety (can be removed manually if needed)
    # rm -rf /var/lib/postgresql 2>/dev/null || true
    cleanup_tool_keyrings "postgresql" "pgdg"
    ;;
  java)
    $STD apt purge -y 'temurin*' 'adoptium*' 'openjdk*' >/dev/null 2>&1 || true
    cleanup_tool_keyrings "adoptium"
    ;;
  ruby)
    cleanup_legacy_install "ruby"
    $STD apt purge -y 'ruby*' >/dev/null 2>&1 || true
    ;;
  rust)
    cleanup_legacy_install "rust"
    ;;
  go | golang)
    rm -rf /usr/local/go 2>/dev/null || true
    cleanup_legacy_install "golang"
    ;;
  clickhouse)
    stop_all_services "clickhouse-server"
    $STD apt purge -y 'clickhouse*' >/dev/null 2>&1 || true
    rm -rf /var/lib/clickhouse 2>/dev/null || true
    cleanup_tool_keyrings "clickhouse"
    ;;
  esac

  # Clean up old repository files (both .list and .sources)
  cleanup_old_repo_files "$repo_name"

  return 0
}

# ------------------------------------------------------------------------------
# Determine if tool update/upgrade is needed
# Returns: 0 (update needed), 1 (already up-to-date)
# Usage: if should_update_tool "mariadb" "11.4"; then ... fi
# ------------------------------------------------------------------------------
should_update_tool() {
  local tool_name="$1"
  local target_version="$2"
  local current_version=""

  # Get currently installed version
  current_version=$(is_tool_installed "$tool_name" 2>/dev/null) || return 0 # Not installed = needs install

  # If versions are identical, no update needed
  if [[ "$current_version" == "$target_version" ]]; then
    return 1 # No update needed
  fi

  return 0 # Update needed
}

# ---------------------–----------------------------------------------------------
# Unified repository management for tools
# Handles adding, updating, and verifying tool repositories
# Usage: manage_tool_repository "mariadb" "11.4" "https://repo..." "GPG_key_url"
# Supports: mariadb, mongodb, nodejs, postgresql, php, mysql
# ------------------------------------------------------------------------------
manage_tool_repository() {
  local tool_name="$1"
  local version="$2"
  local repo_url="$3"
  local gpg_key_url="${4:-}"
  local distro_id repo_component suite

  distro_id=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')

  case "$tool_name" in
  mariadb)
    if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
      msg_error "MariaDB repository requires repo_url and gpg_key_url"
      return 1
    fi

    # Clean old repos first
    cleanup_old_repo_files "mariadb"

    # Get suite for fallback handling
    local distro_codename
    distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
    suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url/$distro_id")

    # Setup new repository using deb822 format
    setup_deb822_repo \
      "mariadb" \
      "$gpg_key_url" \
      "$repo_url/$distro_id" \
      "$suite" \
      "main"
    return 0
    ;;

  mongodb)
    if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
      msg_error "MongoDB repository requires repo_url and gpg_key_url"
      return 1
    fi

    # Clean old repos first
    cleanup_old_repo_files "mongodb"

    # Import GPG key
    mkdir -p /etc/apt/keyrings
    if ! curl -fsSL "$gpg_key_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/mongodb-server-${version}.gpg" 2>/dev/null; then
      msg_error "Failed to download MongoDB GPG key"
      return 1
    fi

    # Setup repository
    local distro_codename
    distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

    # Suite mapping with fallback for newer releases not yet supported by upstream
    if [[ "$distro_id" == "debian" ]]; then
      case "$distro_codename" in
      trixie | forky | sid)
        # Testing/unstable releases fallback to latest stable suite
        suite="bookworm"
        ;;
      bookworm)
        suite="bookworm"
        ;;
      bullseye)
        suite="bullseye"
        ;;
      *)
        # Unknown release: fallback to latest stable suite
        msg_warn "Unknown Debian release '${distro_codename}', using bookworm"
        suite="bookworm"
        ;;
      esac
    elif [[ "$distro_id" == "ubuntu" ]]; then
      case "$distro_codename" in
      oracular | plucky)
        # Newer releases fallback to latest LTS
        suite="noble"
        ;;
      noble)
        suite="noble"
        ;;
      jammy)
        suite="jammy"
        ;;
      focal)
        suite="focal"
        ;;
      *)
        # Unknown release: fallback to latest LTS
        msg_warn "Unknown Ubuntu release '${distro_codename}', using noble"
        suite="noble"
        ;;
      esac
    else
      # For other distros, try generic fallback
      suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url")
    fi

    repo_component="main"
    [[ "$distro_id" == "ubuntu" ]] && repo_component="multiverse"

    cat <<EOF >/etc/apt/sources.list.d/mongodb-org-${version}.sources
Types: deb
URIs: ${repo_url}
Suites: ${suite}/mongodb-org/${version}
Components: ${repo_component}
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/mongodb-server-${version}.gpg
EOF
    return 0
    ;;

  nodejs)
    if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
      msg_error "Node.js repository requires repo_url and gpg_key_url"
      return 1
    fi

    cleanup_old_repo_files "nodesource"

    # NodeSource uses deb822 format with GPG from repo
    local distro_codename
    distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

    # Create keyring directory first
    mkdir -p /etc/apt/keyrings

    # Download GPG key from NodeSource
    curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg || {
      msg_error "Failed to import NodeSource GPG key"
      return 1
    }

    cat <<EOF >/etc/apt/sources.list.d/nodesource.sources
Types: deb
URIs: $repo_url
Suites: nodistro
Components: main
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/nodesource.gpg
EOF
    return 0
    ;;

  php)
    if [[ -z "$gpg_key_url" ]]; then
      msg_error "PHP repository requires gpg_key_url"
      return 1
    fi

    cleanup_old_repo_files "php"

    # Download and install keyring
    curl -fsSLo /tmp/debsuryorg-archive-keyring.deb "$gpg_key_url" || {
      msg_error "Failed to download PHP keyring"
      return 1
    }
    dpkg -i /tmp/debsuryorg-archive-keyring.deb >/dev/null 2>&1 || {
      msg_error "Failed to install PHP keyring"
      rm -f /tmp/debsuryorg-archive-keyring.deb
      return 1
    }
    rm -f /tmp/debsuryorg-archive-keyring.deb

    # Setup repository
    local distro_codename
    distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
    cat <<EOF >/etc/apt/sources.list.d/php.sources
Types: deb
URIs: https://packages.sury.org/php
Suites: $distro_codename
Components: main
Architectures: $(dpkg --print-architecture)
Signed-By: /usr/share/keyrings/deb.sury.org-php.gpg
EOF
    return 0
    ;;

  postgresql)
    if [[ -z "$gpg_key_url" ]]; then
      msg_error "PostgreSQL repository requires gpg_key_url"
      return 1
    fi

    cleanup_old_repo_files "postgresql"

    # Create keyring directory first
    mkdir -p /etc/apt/keyrings

    # Import PostgreSQL key
    curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg || {
      msg_error "Failed to import PostgreSQL GPG key"
      return 1
    }

    # Setup repository
    local distro_codename
    distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
    cat <<EOF >/etc/apt/sources.list.d/postgresql.sources
Types: deb
URIs: http://apt.postgresql.org/pub/repos/apt
Suites: $distro_codename-pgdg
Components: main
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/postgresql.gpg
EOF
    return 0
    ;;

  *)
    msg_error "Unknown tool repository: $tool_name"
    return 1
    ;;
  esac

  return 0
}

# ------–----------------------------------------------------------------------
# Unified package upgrade function (with apt update caching)
# ------------------------------------------------------------------------------
upgrade_package() {
  local package="$1"

  # Use same caching logic as ensure_dependencies
  local apt_cache_file="/var/cache/apt-update-timestamp"
  local current_time=$(date +%s)
  local last_update=0

  if [[ -f "$apt_cache_file" ]]; then
    last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0)
  fi

  if ((current_time - last_update > 300)); then
    $STD apt update || {
      msg_warn "APT update failed in upgrade_package - continuing with cached packages"
    }
    echo "$current_time" >"$apt_cache_file"
  fi

  $STD apt install --only-upgrade -y "$package" || {
    msg_warn "Failed to upgrade $package"
    return 1
  }
}

# ------------------------------------------------------------------------------
# Repository availability check
# ------------------------------------------------------------------------------
verify_repo_available() {
  local repo_url="$1"
  local suite="$2"

  if curl -fsSL --max-time 10 "${repo_url}/dists/${suite}/Release" &>/dev/null; then
    return 0
  fi
  return 1
}

# ------------------------------------------------------------------------------
# Ensure dependencies are installed (with apt update caching)
# ------------------------------------------------------------------------------
ensure_dependencies() {
  local deps=("$@")
  local missing=()

  for dep in "${deps[@]}"; do
    if ! command -v "$dep" &>/dev/null && ! is_package_installed "$dep"; then
      missing+=("$dep")
    fi
  done

  if [[ ${#missing[@]} -gt 0 ]]; then
    # Only run apt update if not done recently (within last 5 minutes)
    local apt_cache_file="/var/cache/apt-update-timestamp"
    local current_time=$(date +%s)
    local last_update=0

    if [[ -f "$apt_cache_file" ]]; then
      last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0)
    fi

    if ((current_time - last_update > 300)); then
      # Ensure orphaned sources are cleaned before updating
      cleanup_orphaned_sources 2>/dev/null || true

      if ! $STD apt update; then
        ensure_apt_working || return 1
      fi
      echo "$current_time" >"$apt_cache_file"
    fi

    $STD apt install -y "${missing[@]}" || {
      msg_error "Failed to install dependencies: ${missing[*]}"
      return 1
    }
  fi
}

# ------------------------------------------------------------------------------
# Smart version comparison
# ------------------------------------------------------------------------------
version_gt() {
  test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
}

# ------------------------------------------------------------------------------
# Get system architecture (normalized)
# ------------------------------------------------------------------------------
get_system_arch() {
  local arch_type="${1:-dpkg}" # dpkg, uname, or both
  local arch

  case "$arch_type" in
  dpkg)
    arch=$(dpkg --print-architecture 2>/dev/null)
    ;;
  uname)
    arch=$(uname -m)
    [[ "$arch" == "x86_64" ]] && arch="amd64"
    [[ "$arch" == "aarch64" ]] && arch="arm64"
    ;;
  both | *)
    arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
    [[ "$arch" == "x86_64" ]] && arch="amd64"
    [[ "$arch" == "aarch64" ]] && arch="arm64"
    ;;
  esac

  echo "$arch"
}

# ------------------------------------------------------------------------------
# Create temporary directory with automatic cleanup
# ------------------------------------------------------------------------------
create_temp_dir() {
  local tmp_dir=$(mktemp -d)
  # Set trap to cleanup on EXIT, ERR, INT, TERM
  trap "rm -rf '$tmp_dir'" EXIT ERR INT TERM
  echo "$tmp_dir"
}

# ------------------------------------------------------------------------------
# Check if package is installed (faster than dpkg -l | grep)
# ------------------------------------------------------------------------------
is_package_installed() {
  local package="$1"
  dpkg-query -W -f='${Status}' "$package" 2>/dev/null | grep -q "^install ok installed$"
}

# ------------------------------------------------------------------------------
# GitHub API call with authentication and rate limit handling
# ------------------------------------------------------------------------------
github_api_call() {
  local url="$1"
  local output_file="${2:-/dev/stdout}"
  local max_retries=3
  local retry_delay=2

  local header_args=()
  [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN")

  for attempt in $(seq 1 $max_retries); do
    local http_code
    http_code=$(curl -fsSL -w "%{http_code}" -o "$output_file" \
      -H "Accept: application/vnd.github+json" \
      -H "X-GitHub-Api-Version: 2022-11-28" \
      "${header_args[@]}" \
      "$url" 2>/dev/null || echo "000")

    case "$http_code" in
    200)
      return 0
      ;;
    403)
      # Rate limit - check if we can retry
      if [[ $attempt -lt $max_retries ]]; then
        msg_warn "GitHub API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)"
        sleep "$retry_delay"
        retry_delay=$((retry_delay * 2))
        continue
      fi
      msg_error "GitHub API rate limit exceeded. Set GITHUB_TOKEN to increase limits."
      return 1
      ;;
    404)
      msg_error "GitHub API endpoint not found: $url"
      return 1
      ;;
    *)
      if [[ $attempt -lt $max_retries ]]; then
        sleep "$retry_delay"
        continue
      fi
      msg_error "GitHub API call failed with HTTP $http_code"
      return 1
      ;;
    esac
  done

  return 1
}

should_upgrade() {
  local current="$1"
  local target="$2"

  [[ -z "$current" ]] && return 0
  version_gt "$target" "$current" && return 0
  return 1
}

# ------------------------------------------------------------------------------
# Get OS information (cached for performance)
# ------------------------------------------------------------------------------
get_os_info() {
  local field="${1:-all}" # id, codename, version, version_id, all

  # Cache OS info to avoid repeated file reads
  if [[ -z "${_OS_ID:-}" ]]; then
    export _OS_ID=$(awk -F= '/^ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
    export _OS_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
    export _OS_VERSION=$(awk -F= '/^VERSION_ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
    export _OS_VERSION_FULL=$(awk -F= '/^VERSION=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
  fi

  case "$field" in
  id) echo "$_OS_ID" ;;
  codename) echo "$_OS_CODENAME" ;;
  version) echo "$_OS_VERSION" ;;
  version_id) echo "$_OS_VERSION" ;;
  version_full) echo "$_OS_VERSION_FULL" ;;
  all) echo "ID=$_OS_ID CODENAME=$_OS_CODENAME VERSION=$_OS_VERSION" ;;
  *) echo "$_OS_ID" ;;
  esac
}

# ------------------------------------------------------------------------------
# Check if running on specific OS
# ------------------------------------------------------------------------------
is_debian() {
  [[ "$(get_os_info id)" == "debian" ]]
}

is_ubuntu() {
  [[ "$(get_os_info id)" == "ubuntu" ]]
}

is_alpine() {
  [[ "$(get_os_info id)" == "alpine" ]]
}

# ------------------------------------------------------------------------------
# Get Debian/Ubuntu major version
# ------------------------------------------------------------------------------
get_os_version_major() {
  local version=$(get_os_info version)
  echo "${version%%.*}"
}

# ------------------------------------------------------------------------------
# Download file with retry logic and progress
# ------------------------------------------------------------------------------
download_file() {
  local url="$1"
  local output="$2"
  local max_retries="${3:-3}"
  local show_progress="${4:-false}"

  local curl_opts=(-fsSL)
  [[ "$show_progress" == "true" ]] && curl_opts=(-fL#)

  for attempt in $(seq 1 $max_retries); do
    if curl "${curl_opts[@]}" -o "$output" "$url"; then
      return 0
    fi

    if [[ $attempt -lt $max_retries ]]; then
      msg_warn "Download failed, retrying... (attempt $attempt/$max_retries)"
      sleep 2
    fi
  done

  msg_error "Failed to download: $url"
  return 1
}

# ------------------------------------------------------------------------------
# Get fallback suite for repository (comprehensive mapping)
# ------------------------------------------------------------------------------
get_fallback_suite() {
  local distro_id="$1"
  local distro_codename="$2"
  local repo_base_url="$3"

  # Check if current codename works
  if verify_repo_available "$repo_base_url" "$distro_codename"; then
    echo "$distro_codename"
    return 0
  fi

  # Comprehensive fallback mappings
  case "$distro_id" in
  debian)
    case "$distro_codename" in
    # Debian 13 (Trixie) → Debian 12 (Bookworm)
    trixie | forky | sid)
      echo "bookworm"
      ;;
    # Debian 12 (Bookworm) stays
    bookworm)
      echo "bookworm"
      ;;
    # Debian 11 (Bullseye) stays
    bullseye)
      echo "bullseye"
      ;;
    # Unknown → latest stable
    *)
      echo "bookworm"
      ;;
    esac
    ;;
  ubuntu)
    case "$distro_codename" in
    # Ubuntu 24.10 (Oracular) → 24.04 LTS (Noble)
    oracular | plucky)
      echo "noble"
      ;;
    # Ubuntu 24.04 LTS (Noble) stays
    noble)
      echo "noble"
      ;;
    # Ubuntu 23.10 (Mantic) → 22.04 LTS (Jammy)
    mantic | lunar)
      echo "jammy"
      ;;
    # Ubuntu 22.04 LTS (Jammy) stays
    jammy)
      echo "jammy"
      ;;
    # Ubuntu 20.04 LTS (Focal) stays
    focal)
      echo "focal"
      ;;
    # Unknown → latest LTS
    *)
      echo "jammy"
      ;;
    esac
    ;;
  *)
    echo "$distro_codename"
    ;;
  esac
}

# ------------------------------------------------------------------------------
# Verify package source and version
# ------------------------------------------------------------------------------
verify_package_source() {
  local package="$1"
  local expected_version="$2"

  if apt-cache policy "$package" 2>/dev/null | grep -q "$expected_version"; then
    return 0
  fi
  return 1
}

# ------------------------------------------------------------------------------
# Check if running on LTS version
# ------------------------------------------------------------------------------
is_lts_version() {
  local os_id=$(get_os_info id)
  local codename=$(get_os_info codename)

  if [[ "$os_id" == "ubuntu" ]]; then
    case "$codename" in
    focal | jammy | noble) return 0 ;; # 20.04, 22.04, 24.04
    *) return 1 ;;
    esac
  elif [[ "$os_id" == "debian" ]]; then
    # Debian releases are all "stable"
    case "$codename" in
    bullseye | bookworm | trixie) return 0 ;;
    *) return 1 ;;
    esac
  fi

  return 1
}

# ------------------------------------------------------------------------------
# Get optimal number of parallel jobs (cached)
# ------------------------------------------------------------------------------
get_parallel_jobs() {
  if [[ -z "${_PARALLEL_JOBS:-}" ]]; then
    local cpu_count=$(nproc 2>/dev/null || echo 1)
    local mem_gb=$(free -g | awk '/^Mem:/{print $2}')

    # Limit by available memory (assume 1GB per job for compilation)
    local max_by_mem=$((mem_gb > 0 ? mem_gb : 1))
    local max_jobs=$((cpu_count < max_by_mem ? cpu_count : max_by_mem))

    # At least 1, at most cpu_count
    export _PARALLEL_JOBS=$((max_jobs > 0 ? max_jobs : 1))
  fi
  echo "$_PARALLEL_JOBS"
}

# ------------------------------------------------------------------------------
# Get default PHP version for OS
# ------------------------------------------------------------------------------
get_default_php_version() {
  local os_id=$(get_os_info id)
  local os_version=$(get_os_version_major)

  case "$os_id" in
  debian)
    case "$os_version" in
    13) echo "8.3" ;; # Debian 13 (Trixie)
    12) echo "8.2" ;; # Debian 12 (Bookworm)
    11) echo "7.4" ;; # Debian 11 (Bullseye)
    *) echo "8.2" ;;
    esac
    ;;
  ubuntu)
    case "$os_version" in
    24) echo "8.3" ;; # Ubuntu 24.04 LTS (Noble)
    22) echo "8.1" ;; # Ubuntu 22.04 LTS (Jammy)
    20) echo "7.4" ;; # Ubuntu 20.04 LTS (Focal)
    *) echo "8.1" ;;
    esac
    ;;
  *)
    echo "8.2"
    ;;
  esac
}

# ------------------------------------------------------------------------------
# Get default Python version for OS
# ------------------------------------------------------------------------------
get_default_python_version() {
  local os_id=$(get_os_info id)
  local os_version=$(get_os_version_major)

  case "$os_id" in
  debian)
    case "$os_version" in
    13) echo "3.12" ;; # Debian 13 (Trixie)
    12) echo "3.11" ;; # Debian 12 (Bookworm)
    11) echo "3.9" ;;  # Debian 11 (Bullseye)
    *) echo "3.11" ;;
    esac
    ;;
  ubuntu)
    case "$os_version" in
    24) echo "3.12" ;; # Ubuntu 24.04 LTS
    22) echo "3.10" ;; # Ubuntu 22.04 LTS
    20) echo "3.8" ;;  # Ubuntu 20.04 LTS
    *) echo "3.10" ;;
    esac
    ;;
  *)
    echo "3.11"
    ;;
  esac
}

# ------------------------------------------------------------------------------
# Get default Node.js LTS version
# ------------------------------------------------------------------------------
get_default_nodejs_version() {
  # Always return current LTS (as of 2025)
  echo "22"
}

# ------------------------------------------------------------------------------
# Check if package manager is locked
# ------------------------------------------------------------------------------
is_apt_locked() {
  if fuser /var/lib/dpkg/lock-frontend &>/dev/null ||
    fuser /var/lib/apt/lists/lock &>/dev/null ||
    fuser /var/cache/apt/archives/lock &>/dev/null; then
    return 0
  fi
  return 1
}

# ------------------------------------------------------------------------------
# Wait for apt to be available
# ------------------------------------------------------------------------------
wait_for_apt() {
  local max_wait="${1:-300}" # 5 minutes default
  local waited=0

  while is_apt_locked; do
    if [[ $waited -ge $max_wait ]]; then
      msg_error "Timeout waiting for apt to be available"
      return 1
    fi

    sleep 5
    waited=$((waited + 5))
  done

  return 0
}

# ------------------------------------------------------------------------------
# Cleanup old repository files (migration helper)
# ------------------------------------------------------------------------------
cleanup_old_repo_files() {
  local app="$1"

  # Remove old-style .list files (including backups)
  rm -f /etc/apt/sources.list.d/"${app}"*.list
  rm -f /etc/apt/sources.list.d/"${app}"*.list.save
  rm -f /etc/apt/sources.list.d/"${app}"*.list.distUpgrade
  rm -f /etc/apt/sources.list.d/"${app}"*.list.dpkg-*

  # Remove old GPG keys from trusted.gpg.d
  rm -f /etc/apt/trusted.gpg.d/"${app}"*.gpg

  # Remove keyrings from /etc/apt/keyrings
  rm -f /etc/apt/keyrings/"${app}"*.gpg

  # Remove ALL .sources files for this app (including the main one)
  # This ensures no orphaned .sources files reference deleted keyrings
  rm -f /etc/apt/sources.list.d/"${app}"*.sources
}

# ------------------------------------------------------------------------------
# Cleanup orphaned .sources files that reference missing keyrings
# This prevents APT signature verification errors
# Call this at the start of any setup function to ensure APT is in a clean state
# ------------------------------------------------------------------------------
cleanup_orphaned_sources() {
  local sources_dir="/etc/apt/sources.list.d"
  local keyrings_dir="/etc/apt/keyrings"

  [[ ! -d "$sources_dir" ]] && return 0

  while IFS= read -r -d '' sources_file; do
    local basename_file
    basename_file=$(basename "$sources_file")

    # NEVER remove debian.sources - this is the standard Debian repository
    if [[ "$basename_file" == "debian.sources" ]]; then
      continue
    fi

    # Extract Signed-By path from .sources file
    local keyring_path
    keyring_path=$(grep -E '^Signed-By:' "$sources_file" 2>/dev/null | awk '{print $2}')

    # If keyring doesn't exist, remove the .sources file
    if [[ -n "$keyring_path" ]] && [[ ! -f "$keyring_path" ]]; then
      rm -f "$sources_file"
    fi
  done < <(find "$sources_dir" -name "*.sources" -print0 2>/dev/null)

  # Also check for broken symlinks in keyrings directory
  if [[ -d "$keyrings_dir" ]]; then
    find "$keyrings_dir" -type l ! -exec test -e {} \; -delete 2>/dev/null || true
  fi
}

# ------------------------------------------------------------------------------
# Ensure APT is in a working state before installing packages
# This should be called at the start of any setup function
# ------------------------------------------------------------------------------
ensure_apt_working() {
  # Fix interrupted dpkg operations first
  # This can happen if a previous installation was interrupted (e.g., by script error)
  if [[ -f /var/lib/dpkg/lock-frontend ]] || dpkg --audit 2>&1 | grep -q "interrupted"; then
    $STD dpkg --configure -a 2>/dev/null || true
  fi

  # Clean up orphaned sources first
  cleanup_orphaned_sources

  # Try to update package lists
  if ! $STD apt update; then
    # More aggressive cleanup
    rm -f /etc/apt/sources.list.d/*.sources 2>/dev/null || true
    cleanup_orphaned_sources

    # Try again
    if ! $STD apt update; then
      msg_error "Cannot update package lists - APT is critically broken"
      return 1
    fi
  fi

  return 0
}

# ------------------------------------------------------------------------------
# Standardized deb822 repository setup (with optional Architectures)
# Always runs apt update after repo creation to ensure package availability
# ------------------------------------------------------------------------------
setup_deb822_repo() {
  local name="$1"
  local gpg_url="$2"
  local repo_url="$3"
  local suite="$4"
  local component="${5:-main}"
  local architectures="${6-}" # optional

  # Validate required parameters
  if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then
    msg_error "setup_deb822_repo: missing required parameters (name=$name repo=$repo_url suite=$suite)"
    return 1
  fi

  # Cleanup
  cleanup_old_repo_files "$name"
  cleanup_orphaned_sources

  mkdir -p /etc/apt/keyrings || {
    msg_error "Failed to create /etc/apt/keyrings"
    return 1
  }

  # Import GPG
  curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
    msg_error "Failed to import GPG key for ${name}"
    return 1
  }

  # Write deb822
  {
    echo "Types: deb"
    echo "URIs: $repo_url"
    echo "Suites: $suite"
    echo "Components: $component"
    [[ -n "$architectures" ]] && echo "Architectures: $architectures"
    echo "Signed-By: /etc/apt/keyrings/${name}.gpg"
  } >/etc/apt/sources.list.d/${name}.sources

  $STD apt update
}

# ------------------------------------------------------------------------------
# Package version hold/unhold helpers
# ------------------------------------------------------------------------------
hold_package_version() {
  local package="$1"
  $STD apt-mark hold "$package"
}

unhold_package_version() {
  local package="$1"
  $STD apt-mark unhold "$package"
}

# ------------------------------------------------------------------------------
# Safe service restart with verification
# ------------------------------------------------------------------------------
safe_service_restart() {
  local service="$1"

  if systemctl is-active --quiet "$service"; then
    $STD systemctl restart "$service"
  else
    $STD systemctl start "$service"
  fi

  if ! systemctl is-active --quiet "$service"; then
    msg_error "Failed to start $service"
    systemctl status "$service" --no-pager
    return 1
  fi
  return 0
}

# ------------------------------------------------------------------------------
# Enable and start service (with error handling)
# ------------------------------------------------------------------------------
enable_and_start_service() {
  local service="$1"

  if ! systemctl enable "$service" &>/dev/null; then
    return 1
  fi

  if ! systemctl start "$service" &>/dev/null; then
    msg_error "Failed to start $service"
    systemctl status "$service" --no-pager
    return 1
  fi

  return 0
}

# ------------------------------------------------------------------------------
# Check if service is enabled
# ------------------------------------------------------------------------------
is_service_enabled() {
  local service="$1"
  systemctl is-enabled --quiet "$service" 2>/dev/null
}

# ------------------------------------------------------------------------------
# Check if service is running
# ------------------------------------------------------------------------------
is_service_running() {
  local service="$1"
  systemctl is-active --quiet "$service" 2>/dev/null
}

# ------------------------------------------------------------------------------
# Extract version from JSON (GitHub releases)
# ------------------------------------------------------------------------------
extract_version_from_json() {
  local json="$1"
  local field="${2:-tag_name}"
  local strip_v="${3:-true}"

  ensure_dependencies jq

  local version
  version=$(echo "$json" | jq -r ".${field} // empty")

  if [[ -z "$version" ]]; then
    return 1
  fi

  if [[ "$strip_v" == "true" ]]; then
    echo "${version#v}"
  else
    echo "$version"
  fi
}

# ------------------------------------------------------------------------------
# Get latest GitHub release version
# ------------------------------------------------------------------------------
get_latest_github_release() {
  local repo="$1"
  local strip_v="${2:-true}"
  local temp_file=$(mktemp)

  if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then
    rm -f "$temp_file"
    return 1
  fi

  local version
  version=$(extract_version_from_json "$(cat "$temp_file")" "tag_name" "$strip_v")
  rm -f "$temp_file"

  if [[ -z "$version" ]]; then
    return 1
  fi

  echo "$version"
}

# ------------------------------------------------------------------------------
# Debug logging (only if DEBUG=1)
# ------------------------------------------------------------------------------
debug_log() {
  [[ "${DEBUG:-0}" == "1" ]] && echo "[DEBUG] $*" >&2
}

# ------------------------------------------------------------------------------
# Performance timing helper
# ------------------------------------------------------------------------------
start_timer() {
  echo $(date +%s)
}

end_timer() {
  local start_time="$1"
  local label="${2:-Operation}"
  local end_time=$(date +%s)
  local duration=$((end_time - start_time))
}

# ------------------------------------------------------------------------------
# GPG key fingerprint verification
# ------------------------------------------------------------------------------
verify_gpg_fingerprint() {
  local key_file="$1"
  local expected_fingerprint="$2"

  local actual_fingerprint
  actual_fingerprint=$(gpg --show-keys --with-fingerprint --with-colons "$key_file" 2>&1 | grep -m1 '^fpr:' | cut -d: -f10)

  if [[ "$actual_fingerprint" == "$expected_fingerprint" ]]; then
    return 0
  fi

  msg_error "GPG fingerprint mismatch! Expected: $expected_fingerprint, Got: $actual_fingerprint"
  return 1
}

# ==============================================================================
# INSTALL FUNCTIONS
# ==============================================================================

# ------------------------------------------------------------------------------
# Checks for new GitHub release (latest tag).
#
# Description:
#   - Queries the GitHub API for the latest release tag
#   - Compares it to a local cached version (~/.<app>)
#   - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
#
# Usage:
#     if check_for_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" [optional] "v1.1.1"; then
#       # trigger update...
#     fi
#     exit 0
#     } (end of update_script not from the function)
#
# Notes:
#   - Requires `jq` (auto-installed if missing)
#   - Does not modify anything, only checks version state
#   - Does not support pre-releases
# ------------------------------------------------------------------------------
check_for_gh_release() {
  local app="$1"
  local source="$2"
  local pinned_version_in="${3:-}" # optional
  local app_lc="${app,,}"
  local current_file="$HOME/.${app_lc}"

  msg_info "Checking for update: ${app}"

  # DNS check
  if ! getent hosts api.github.com >/dev/null 2>&1; then
    msg_error "Network error: cannot resolve api.github.com"
    return 1
  fi

  ensure_dependencies jq

  # Fetch releases and exclude drafts/prereleases
  local releases_json
  releases_json=$(curl -fsSL --max-time 20 \
    -H 'Accept: application/vnd.github+json' \
    -H 'X-GitHub-Api-Version: 2022-11-28' \
    "https://api.github.com/repos/${source}/releases") || {
    msg_error "Unable to fetch releases for ${app}"
    return 1
  }

  mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json")
  if ((${#raw_tags[@]} == 0)); then
    msg_error "No stable releases found for ${app}"
    return 1
  fi

  local clean_tags=()
  for t in "${raw_tags[@]}"; do
    clean_tags+=("${t#v}")
  done

  local latest_raw="${raw_tags[0]}"
  local latest_clean="${clean_tags[0]}"

  # current installed (stored without v)
  local current=""
  if [[ -f "$current_file" ]]; then
    current="$(<"$current_file")"
  else
    # Migration: search for any /opt/*_version.txt
    local legacy_files
    mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null)
    if ((${#legacy_files[@]} == 1)); then
      current="$(<"${legacy_files[0]}")"
      echo "${current#v}" >"$current_file"
      rm -f "${legacy_files[0]}"
    fi
  fi
  current="${current#v}"

  # Pinned version handling
  if [[ -n "$pinned_version_in" ]]; then
    local pin_clean="${pinned_version_in#v}"
    local match_raw=""
    for i in "${!clean_tags[@]}"; do
      if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then
        match_raw="${raw_tags[$i]}"
        break
      fi
    done

    if [[ -z "$match_raw" ]]; then
      msg_error "Pinned version ${pinned_version_in} not found upstream"
      return 1
    fi

    if [[ "$current" != "$pin_clean" ]]; then
      CHECK_UPDATE_RELEASE="$match_raw"
      msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}"
      return 0
    fi

    msg_ok "No update available: ${app} is already on pinned version (${current})"
    return 1
  fi

  # No pinning → use latest
  if [[ -z "$current" || "$current" != "$latest_clean" ]]; then
    CHECK_UPDATE_RELEASE="$latest_raw"
    msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}"
    return 0
  fi

  msg_ok "No update available: ${app} (${latest_clean})"
  return 1
}

# ------------------------------------------------------------------------------
# Creates and installs self-signed certificates.
#
# Description:
#   - Create a self-signed certificate with option to override application name
#
# Variables:
#   APP   - Application name (default: $APPLICATION variable)
# ------------------------------------------------------------------------------
create_self_signed_cert() {
  local APP_NAME="${1:-${APPLICATION}}"
  local APP_NAME_LC=$(echo "${APP_NAME,,}" | tr -d ' ')
  local CERT_DIR="/etc/ssl/${APP_NAME_LC}"
  local CERT_KEY="${CERT_DIR}/${APP_NAME_LC}.key"
  local CERT_CRT="${CERT_DIR}/${APP_NAME_LC}.crt"

  if [[ -f "$CERT_CRT" && -f "$CERT_KEY" ]]; then
    return 0
  fi

  # Use ensure_dependencies for cleaner handling
  ensure_dependencies openssl || {
    msg_error "Failed to install OpenSSL"
    return 1
  }

  mkdir -p "$CERT_DIR"
  $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \
    -subj "/CN=${APP_NAME}" \
    -addext "subjectAltName=DNS:${APP_NAME}" \
    -keyout "$CERT_KEY" \
    -out "$CERT_CRT" || {
    msg_error "Failed to create self-signed certificate"
    return 1
  }

  chmod 600 "$CERT_KEY"
  chmod 644 "$CERT_CRT"
}

# ------------------------------------------------------------------------------
# Downloads file with optional progress indicator using pv.
#
# Arguments:
#   $1 - URL
#   $2 - Destination path
# ------------------------------------------------------------------------------

function download_with_progress() {
  local url="$1"
  local output="$2"
  if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi

  ensure_dependencies pv
  set -o pipefail

  # Content-Length aus HTTP-Header holen
  local content_length
  content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true)

  if [[ -z "$content_length" ]]; then
    if ! curl -fL# -o "$output" "$url"; then
      msg_error "Download failed"
      return 1
    fi
  else
    if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then
      msg_error "Download failed"
      return 1
    fi
  fi
}

# ------------------------------------------------------------------------------
# Ensures /usr/local/bin is permanently in system PATH.
#
# Description:
#   - Adds to /etc/profile.d if not present
# ------------------------------------------------------------------------------

function ensure_usr_local_bin_persist() {
  local PROFILE_FILE="/etc/profile.d/custom_path.sh"

  if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then
    echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE"
    chmod +x "$PROFILE_FILE"
  fi
}

# ------------------------------------------------------------------------------
# Downloads and deploys latest GitHub release (source, binary, tarball, asset).
#
# Description:
#   - Fetches latest release metadata from GitHub API
#   - Supports the following modes:
#       - tarball: Source code tarball (default if omitted)
#       - source: Alias for tarball (same behavior)
#       - binary: .deb package install (arch-dependent)
#       - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries)
#       - singlefile: Standalone binary (no archive, direct chmod +x install)
#   - Handles download, extraction/installation and version tracking in ~/.<app>
#
# Parameters:
#   $1   APP               - Application name (used for install path and version file)
#   $2   REPO              - GitHub repository in form user/repo
#   $3   MODE              - Release type:
#                              tarball   → source tarball (.tar.gz)
#                              binary    → .deb file (auto-arch matched)
#                              prebuild  → prebuilt archive (e.g. tar.gz)
#                              singlefile→ standalone binary (chmod +x)
#   $4   VERSION           - Optional release tag (default: latest)
#   $5   TARGET_DIR        - Optional install path (default: /opt/<app>)
#   $6   ASSET_FILENAME    - Required for:
#                              - prebuild  → archive filename or pattern
#                              - singlefile→ binary filename or pattern
#
# Optional:
#   - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD).
#
# Examples:
#   # 1. Minimal: Fetch and deploy source tarball
#   fetch_and_deploy_gh_release "myapp" "myuser/myapp"
#
#   # 2. Binary install via .deb asset (architecture auto-detected)
#   fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary"
#
#   # 3. Prebuilt archive (.tar.gz) with asset filename match
#   fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz"
#
#   # 4. Single binary (chmod +x) like Argus, Promtail etc.
#   fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64"
# ------------------------------------------------------------------------------

function fetch_and_deploy_gh_release() {
  local app="$1"
  local repo="$2"
  local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile
  local version="${4:-latest}"
  local target="${5:-/opt/$app}"
  local asset_pattern="${6:-}"

  local app_lc=$(echo "${app,,}" | tr -d ' ')
  local version_file="$HOME/.${app_lc}"

  local api_timeout="--connect-timeout 10 --max-time 60"
  local download_timeout="--connect-timeout 15 --max-time 900"

  local current_version=""
  [[ -f "$version_file" ]] && current_version=$(<"$version_file")

  ensure_dependencies jq

  local api_url="https://api.github.com/repos/$repo/releases"
  [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest"
  local header=()
  [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN")

  # dns pre check
  local gh_host
  gh_host=$(awk -F/ '{print $3}' <<<"$api_url")
  if ! getent hosts "$gh_host" &>/dev/null; then
    msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking"
    return 1
  fi

  local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code

  while ((attempt <= max_retries)); do
    resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url") && success=true && break
    sleep "$retry_delay"
    ((attempt++))
  done

  if ! $success; then
    msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts"
    return 1
  fi

  http_code="${resp:(-3)}"
  [[ "$http_code" != "200" ]] && {
    msg_error "GitHub API returned HTTP $http_code"
    return 1
  }

  local json tag_name
  json=$(</tmp/gh_rel.json)
  tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty')
  [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name"

  if [[ "$current_version" == "$version" ]]; then
    $STD msg_ok "$app is already up-to-date (v$version)"
    return 0
  fi

  local tmpdir
  tmpdir=$(mktemp -d) || return 1
  local filename="" url=""

  msg_info "Fetching GitHub release: $app ($version)"

  local clean_install=false
  [[ -n "${CLEAN_INSTALL:-}" && "$CLEAN_INSTALL" == "1" ]] && clean_install=true

  ### Tarball Mode ###
  if [[ "$mode" == "tarball" || "$mode" == "source" ]]; then
    # GitHub API's tarball_url/zipball_url can return HTTP 300 Multiple Choices
    # when a branch and tag share the same name. Use explicit refs/tags/ URL instead.
    local direct_tarball_url="https://github.com/$repo/archive/refs/tags/$tag_name.tar.gz"
    filename="${app_lc}-${version}.tar.gz"

    curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url" || {
      msg_error "Download failed: $direct_tarball_url"
      rm -rf "$tmpdir"
      return 1
    }

    mkdir -p "$target"
    if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
      rm -rf "${target:?}/"*
    fi

    tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || {
      msg_error "Failed to extract tarball"
      rm -rf "$tmpdir"
      return 1
    }
    local unpack_dir
    unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1)

    shopt -s dotglob nullglob
    cp -r "$unpack_dir"/* "$target/"
    shopt -u dotglob nullglob

    ### Binary Mode ###
  elif [[ "$mode" == "binary" ]]; then
    local arch
    arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
    [[ "$arch" == "x86_64" ]] && arch="amd64"
    [[ "$arch" == "aarch64" ]] && arch="arm64"

    local assets url_match=""
    assets=$(echo "$json" | jq -r '.assets[].browser_download_url')

    # If explicit filename pattern is provided (param $6), match that first
    if [[ -n "$asset_pattern" ]]; then
      for u in $assets; do
        case "${u##*/}" in
        $asset_pattern)
          url_match="$u"
          break
          ;;
        esac
      done
    fi

    # If no match via explicit pattern, fall back to architecture heuristic
    if [[ -z "$url_match" ]]; then
      for u in $assets; do
        if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
          url_match="$u"
          break
        fi
      done
    fi

    # Fallback: any .deb file
    if [[ -z "$url_match" ]]; then
      for u in $assets; do
        [[ "$u" =~ \.deb$ ]] && url_match="$u" && break
      done
    fi

    if [[ -z "$url_match" ]]; then
      msg_error "No suitable .deb asset found for $app"
      rm -rf "$tmpdir"
      return 1
    fi

    filename="${url_match##*/}"
    curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || {
      msg_error "Download failed: $url_match"
      rm -rf "$tmpdir"
      return 1
    }

    chmod 644 "$tmpdir/$filename"
    $STD apt install -y "$tmpdir/$filename" || {
      $STD dpkg -i "$tmpdir/$filename" || {
        msg_error "Both apt and dpkg installation failed"
        rm -rf "$tmpdir"
        return 1
      }
    }

    ### Prebuild Mode ###
  elif [[ "$mode" == "prebuild" ]]; then
    local pattern="${6%\"}"
    pattern="${pattern#\"}"
    [[ -z "$pattern" ]] && {
      msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)"
      rm -rf "$tmpdir"
      return 1
    }

    local asset_url=""
    for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do
      filename_candidate="${u##*/}"
      case "$filename_candidate" in
      $pattern)
        asset_url="$u"
        break
        ;;
      esac
    done

    [[ -z "$asset_url" ]] && {
      msg_error "No asset matching '$pattern' found"
      rm -rf "$tmpdir"
      return 1
    }

    filename="${asset_url##*/}"
    curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || {
      msg_error "Download failed: $asset_url"
      rm -rf "$tmpdir"
      return 1
    }

    local unpack_tmp
    unpack_tmp=$(mktemp -d)
    mkdir -p "$target"
    if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
      rm -rf "${target:?}/"*
    fi

    if [[ "$filename" == *.zip ]]; then
      ensure_dependencies unzip
      unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || {
        msg_error "Failed to extract ZIP archive"
        rm -rf "$tmpdir" "$unpack_tmp"
        return 1
      }
    elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then
      tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || {
        msg_error "Failed to extract TAR archive"
        rm -rf "$tmpdir" "$unpack_tmp"
        return 1
      }
    else
      msg_error "Unsupported archive format: $filename"
      rm -rf "$tmpdir" "$unpack_tmp"
      return 1
    fi

    local top_dirs
    top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l)
    local top_entries inner_dir
    top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1)
    if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then
      # Strip leading folder
      inner_dir="$top_entries"
      shopt -s dotglob nullglob
      if compgen -G "$inner_dir/*" >/dev/null; then
        cp -r "$inner_dir"/* "$target/" || {
          msg_error "Failed to copy contents from $inner_dir to $target"
          rm -rf "$tmpdir" "$unpack_tmp"
          return 1
        }
      else
        msg_error "Inner directory is empty: $inner_dir"
        rm -rf "$tmpdir" "$unpack_tmp"
        return 1
      fi
      shopt -u dotglob nullglob
    else
      # Copy all contents
      shopt -s dotglob nullglob
      if compgen -G "$unpack_tmp/*" >/dev/null; then
        cp -r "$unpack_tmp"/* "$target/" || {
          msg_error "Failed to copy contents to $target"
          rm -rf "$tmpdir" "$unpack_tmp"
          return 1
        }
      else
        msg_error "Unpacked archive is empty"
        rm -rf "$tmpdir" "$unpack_tmp"
        return 1
      fi
      shopt -u dotglob nullglob
    fi

    ### Singlefile Mode ###
  elif [[ "$mode" == "singlefile" ]]; then
    local pattern="${6%\"}"
    pattern="${pattern#\"}"
    [[ -z "$pattern" ]] && {
      msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)"
      rm -rf "$tmpdir"
      return 1
    }

    local asset_url=""
    for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do
      filename_candidate="${u##*/}"
      case "$filename_candidate" in
      $pattern)
        asset_url="$u"
        break
        ;;
      esac
    done

    [[ -z "$asset_url" ]] && {
      msg_error "No asset matching '$pattern' found"
      rm -rf "$tmpdir"
      return 1
    }

    filename="${asset_url##*/}"
    mkdir -p "$target"

    local use_filename="${USE_ORIGINAL_FILENAME:-false}"
    local target_file="$app"
    [[ "$use_filename" == "true" ]] && target_file="$filename"

    curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || {
      msg_error "Download failed: $asset_url"
      rm -rf "$tmpdir"
      return 1
    }

    if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then
      chmod +x "$target/$target_file"
    fi

  else
    msg_error "Unknown mode: $mode"
    rm -rf "$tmpdir"
    return 1
  fi

  echo "$version" >"$version_file"
  msg_ok "Deployed: $app ($version)"
  rm -rf "$tmpdir"
}

# ------------------------------------------------------------------------------
# Loads LOCAL_IP from persistent store or detects if missing.
#
# Description:
#   - Loads from /run/local-ip.env or performs runtime lookup
# ------------------------------------------------------------------------------

function import_local_ip() {
  local IP_FILE="/run/local-ip.env"
  if [[ -f "$IP_FILE" ]]; then
    # shellcheck disable=SC1090
    source "$IP_FILE"
  fi

  if [[ -z "${LOCAL_IP:-}" ]]; then
    get_current_ip() {
      local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
      local ip

      for target in "${targets[@]}"; do
        if [[ "$target" == "default" ]]; then
          ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
        else
          ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
        fi
        if [[ -n "$ip" ]]; then
          echo "$ip"
          return 0
        fi
      done

      return 1
    }

    LOCAL_IP="$(get_current_ip || true)"
    if [[ -z "$LOCAL_IP" ]]; then
      msg_error "Could not determine LOCAL_IP"
      return 1
    fi
  fi

  export LOCAL_IP
}

# ------------------------------------------------------------------------------
# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download).
#
# Description:
#   - Adds Adminer to Apache or web root
#   - Supports Alpine and Debian-based systems
# ------------------------------------------------------------------------------

function setup_adminer() {
  if grep -qi alpine /etc/os-release; then
    msg_info "Setup Adminer (Alpine)"
    mkdir -p /var/www/localhost/htdocs/adminer
    curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
      -o /var/www/localhost/htdocs/adminer/index.php || {
      msg_error "Failed to download Adminer"
      return 1
    }
    cache_installed_version "adminer" "latest-alpine"
    msg_ok "Setup Adminer (Alpine)"
  else
    msg_info "Setup Adminer (Debian/Ubuntu)"
    ensure_dependencies adminer
    $STD a2enconf adminer || {
      msg_error "Failed to enable Adminer Apache config"
      return 1
    }
    $STD systemctl reload apache2 || {
      msg_error "Failed to reload Apache"
      return 1
    }
    local VERSION
    VERSION=$(dpkg -s adminer 2>/dev/null | grep '^Version:' | awk '{print $2}')
    cache_installed_version "adminer" "${VERSION:-unknown}"
    msg_ok "Setup Adminer (Debian/Ubuntu)"
  fi
}

# ------------------------------------------------------------------------------
# Installs or updates Composer globally (robust, idempotent).
#
# - Installs to /usr/local/bin/composer
# - Removes old binaries/symlinks in /usr/bin, /bin, /root/.composer, etc.
# - Ensures /usr/local/bin is in PATH (permanent)
# - Auto-updates to latest version
# ------------------------------------------------------------------------------

function setup_composer() {
  local COMPOSER_BIN="/usr/local/bin/composer"
  export COMPOSER_ALLOW_SUPERUSER=1

  # Get currently installed version
  local INSTALLED_VERSION=""
  if [[ -x "$COMPOSER_BIN" ]]; then
    INSTALLED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
  fi

  # Scenario 1: Already installed - just self-update
  if [[ -n "$INSTALLED_VERSION" ]]; then
    msg_info "Update Composer $INSTALLED_VERSION"
    $STD "$COMPOSER_BIN" self-update --no-interaction || true
    local UPDATED_VERSION
    UPDATED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
    cache_installed_version "composer" "$UPDATED_VERSION"
    msg_ok "Update Composer $UPDATED_VERSION"
    return 0
  fi

  # Scenario 2: Fresh install
  msg_info "Setup Composer"

  for old in /usr/bin/composer /bin/composer /root/.composer/vendor/bin/composer; do
    [[ -e "$old" && "$old" != "$COMPOSER_BIN" ]] && rm -f "$old"
  done

  ensure_usr_local_bin_persist
  export PATH="/usr/local/bin:$PATH"

  curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
    msg_error "Failed to download Composer installer"
    return 1
  }

  $STD php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer || {
    msg_error "Failed to install Composer"
    rm -f /tmp/composer-setup.php
    return 1
  }
  rm -f /tmp/composer-setup.php

  if [[ ! -x "$COMPOSER_BIN" ]]; then
    msg_error "Composer installation failed"
    return 1
  fi

  chmod +x "$COMPOSER_BIN"
  $STD "$COMPOSER_BIN" self-update --no-interaction || true

  local FINAL_VERSION
  FINAL_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
  cache_installed_version "composer" "$FINAL_VERSION"
  msg_ok "Setup Composer"
}

# ------------------------------------------------------------------------------
# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only).
#
# Description:
#   - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg)
#   - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1)
#   - Supports build profile via FFMPEG_TYPE:
#       - minimal : x264, vpx, mp3 only
#       - medium  : adds subtitles, fonts, opus, vorbis
#       - full    : adds dav1d, svt-av1, zlib, numa
#       - binary  : downloads static build (johnvansickle.com)
#   - Defaults to latest stable version and full feature set
#
# Notes:
#   - Requires: curl, jq, build-essential, and matching codec libraries
#   - Result is installed to /usr/local/bin/ffmpeg
# ------------------------------------------------------------------------------

function setup_ffmpeg() {
  local TMP_DIR=$(mktemp -d)
  local GITHUB_REPO="FFmpeg/FFmpeg"
  local VERSION="${FFMPEG_VERSION:-latest}"
  local TYPE="${FFMPEG_TYPE:-full}"
  local BIN_PATH="/usr/local/bin/ffmpeg"

  # Get currently installed version
  local INSTALLED_VERSION=""
  if command -v ffmpeg &>/dev/null; then
    INSTALLED_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}')
  fi

  msg_info "Setup FFmpeg ${VERSION} ($TYPE)"

  # Binary fallback mode
  if [[ "$TYPE" == "binary" ]]; then
    curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || {
      msg_error "Failed to download FFmpeg binary"
      rm -rf "$TMP_DIR"
      return 1
    }
    tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || {
      msg_error "Failed to extract FFmpeg binary"
      rm -rf "$TMP_DIR"
      return 1
    }
    local EXTRACTED_DIR
    EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*")
    cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH"
    cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe
    chmod +x "$BIN_PATH" /usr/local/bin/ffprobe
    local FINAL_VERSION=$($BIN_PATH -version 2>/dev/null | head -n1 | awk '{print $3}')
    rm -rf "$TMP_DIR"
    cache_installed_version "ffmpeg" "$FINAL_VERSION"
    ensure_usr_local_bin_persist
    [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION"
    return 0
  fi

  ensure_dependencies jq

  # Auto-detect latest stable version if none specified
  if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then
    local ffmpeg_tags
    ffmpeg_tags=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/tags" 2>/dev/null || echo "")

    if [[ -z "$ffmpeg_tags" ]]; then
      msg_warn "Could not fetch FFmpeg versions from GitHub, trying binary fallback"
      VERSION="" # Will trigger binary fallback below
    else
      VERSION=$(echo "$ffmpeg_tags" | jq -r '.[].name' 2>/dev/null |
        grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' |
        sort -V | tail -n1 || echo "")
    fi
  fi

  if [[ -z "$VERSION" ]]; then
    msg_info "Could not determine FFmpeg source version, using pre-built binary"
    VERSION="" # Will use binary fallback
  fi

  # Dependency selection
  local DEPS=(build-essential yasm nasm pkg-config)
  case "$TYPE" in
  minimal)
    DEPS+=(libx264-dev libvpx-dev libmp3lame-dev)
    ;;
  medium)
    DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev)
    ;;
  full)
    DEPS+=(
      libx264-dev libx265-dev libvpx-dev libmp3lame-dev
      libfreetype6-dev libass-dev libopus-dev libvorbis-dev
      libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev
      libva-dev libdrm-dev
    )
    ;;
  *)
    msg_error "Invalid FFMPEG_TYPE: $TYPE"
    rm -rf "$TMP_DIR"
    return 1
    ;;
  esac

  ensure_dependencies "${DEPS[@]}"

  # Try to download source if VERSION is set
  if [[ -n "$VERSION" ]]; then
    curl -fsSL "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" -o "$TMP_DIR/ffmpeg.tar.gz" || {
      msg_warn "Failed to download FFmpeg source ${VERSION}, falling back to pre-built binary"
      VERSION=""
    }
  fi

  # If no source download (either VERSION empty or download failed), use binary
  if [[ -z "$VERSION" ]]; then
    msg_info "Setup FFmpeg from pre-built binary"
    curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || {
      msg_error "Failed to download FFmpeg pre-built binary"
      rm -rf "$TMP_DIR"
      return 1
    }

    tar -xJf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || {
      msg_error "Failed to extract FFmpeg binary archive"
      rm -rf "$TMP_DIR"
      return 1
    }

    if ! cp "$TMP_DIR/ffmpeg-"*/ffmpeg /usr/local/bin/ffmpeg 2>/dev/null; then
      msg_error "Failed to install FFmpeg binary"
      rm -rf "$TMP_DIR"
      return 1
    fi

    cache_installed_version "ffmpeg" "static"
    rm -rf "$TMP_DIR"
    msg_ok "Setup FFmpeg from pre-built binary"
    return 0
  fi

  tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR" || {
    msg_error "Failed to extract FFmpeg source"
    rm -rf "$TMP_DIR"
    return 1
  }

  cd "$TMP_DIR/FFmpeg-"* || {
    msg_error "Source extraction failed"
    rm -rf "$TMP_DIR"
    return 1
  }

  local args=(
    --enable-gpl
    --enable-shared
    --enable-nonfree
    --disable-static
    --enable-libx264
    --enable-libvpx
    --enable-libmp3lame
  )

  if [[ "$TYPE" != "minimal" ]]; then
    args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis)
  fi

  if [[ "$TYPE" == "full" ]]; then
    args+=(--enable-libx265 --enable-libdav1d --enable-zlib)
    args+=(--enable-vaapi --enable-libdrm)
  fi

  if [[ ${#args[@]} -eq 0 ]]; then
    msg_error "FFmpeg configure args array is empty"
    rm -rf "$TMP_DIR"
    return 1
  fi

  $STD ./configure "${args[@]}" || {
    msg_error "FFmpeg configure failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make -j"$(nproc)" || {
    msg_error "FFmpeg compilation failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make install || {
    msg_error "FFmpeg installation failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf
  $STD ldconfig

  ldconfig -p 2>/dev/null | grep libavdevice >/dev/null || {
    msg_error "libavdevice not registered with dynamic linker"
    rm -rf "$TMP_DIR"
    return 1
  }

  if ! command -v ffmpeg &>/dev/null; then
    msg_error "FFmpeg installation failed"
    rm -rf "$TMP_DIR"
    return 1
  fi

  local FINAL_VERSION
  FINAL_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}')
  rm -rf "$TMP_DIR"
  cache_installed_version "ffmpeg" "$FINAL_VERSION"
  ensure_usr_local_bin_persist
  [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION"
}

# ------------------------------------------------------------------------------
# Installs Go (Golang) from official tarball.
#
# Description:
#   - Determines system architecture
#   - Downloads latest version if GO_VERSION not set
#
# Variables:
#   GO_VERSION     - Version to install (e.g. 1.22.2 or latest)
# ------------------------------------------------------------------------------

function setup_go() {
  local ARCH
  case "$(uname -m)" in
  x86_64) ARCH="amd64" ;;
  aarch64) ARCH="arm64" ;;
  *)
    msg_error "Unsupported architecture: $(uname -m)"
    return 1
    ;;
  esac

  # Resolve "latest" version
  local GO_VERSION="${GO_VERSION:-latest}"
  if [[ "$GO_VERSION" == "latest" ]]; then
    GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text 2>/dev/null | head -n1 | sed 's/^go//') || {
      msg_error "Could not determine latest Go version"
      return 1
    }
    [[ -z "$GO_VERSION" ]] && {
      msg_error "Latest Go version is empty"
      return 1
    }
  fi

  local GO_BIN="/usr/local/bin/go"
  local GO_INSTALL_DIR="/usr/local/go"

  # Get currently installed version
  local CURRENT_VERSION=""
  if [[ -x "$GO_BIN" ]]; then
    CURRENT_VERSION=$("$GO_BIN" version 2>/dev/null | awk '{print $3}' | sed 's/go//')
  fi

  # Scenario 1: Already at target version
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$GO_VERSION" ]]; then
    cache_installed_version "go" "$GO_VERSION"
    return 0
  fi

  # Scenario 2: Different version or not installed
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$GO_VERSION" ]]; then
    msg_info "Upgrade Go from $CURRENT_VERSION to $GO_VERSION"
    remove_old_tool_version "go"
  else
    msg_info "Setup Go $GO_VERSION"
  fi

  local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
  local URL="https://go.dev/dl/${TARBALL}"
  local TMP_TAR=$(mktemp)

  curl -fsSL "$URL" -o "$TMP_TAR" || {
    msg_error "Failed to download Go $GO_VERSION"
    rm -f "$TMP_TAR"
    return 1
  }

  $STD tar -C /usr/local -xzf "$TMP_TAR" || {
    msg_error "Failed to extract Go tarball"
    rm -f "$TMP_TAR"
    return 1
  }

  ln -sf /usr/local/go/bin/go /usr/local/bin/go
  ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
  rm -f "$TMP_TAR"

  cache_installed_version "go" "$GO_VERSION"
  ensure_usr_local_bin_persist
  msg_ok "Setup Go $GO_VERSION"
}

# ------------------------------------------------------------------------------
# Installs or updates Ghostscript (gs) from source.
#
# Description:
#   - Fetches latest release
#   - Builds and installs system-wide
# ------------------------------------------------------------------------------

function setup_gs() {
  local TMP_DIR=$(mktemp -d)
  local CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0")

  ensure_dependencies jq

  local RELEASE_JSON
  RELEASE_JSON=$(curl -fsSL --max-time 15 https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest 2>/dev/null || echo "")

  if [[ -z "$RELEASE_JSON" ]]; then
    msg_warn "Cannot fetch latest Ghostscript version from GitHub API"
    # Try to get from current version
    if command -v gs &>/dev/null; then
      gs --version | head -n1
      cache_installed_version "ghostscript" "$CURRENT_VERSION"
      return 0
    fi
    msg_error "Cannot determine Ghostscript version and no existing installation found"
    return 1
  fi
  local LATEST_VERSION
  LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^gs//')
  local LATEST_VERSION_DOTTED
  LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | jq -r '.name' | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+')

  if [[ -z "$LATEST_VERSION" || -z "$LATEST_VERSION_DOTTED" ]]; then
    msg_warn "Could not determine latest Ghostscript version from GitHub - checking system"
    # Fallback: try to use system version or return error
    if [[ "$CURRENT_VERSION" == "0" ]]; then
      msg_error "Ghostscript not installed and cannot determine latest version"
      rm -rf "$TMP_DIR"
      return 1
    fi
    rm -rf "$TMP_DIR"
    return 0
  fi

  # Scenario 1: Already at latest version
  if [[ -n "$LATEST_VERSION_DOTTED" ]] && dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED" 2>/dev/null; then
    cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED"
    rm -rf "$TMP_DIR"
    return 0
  fi

  # Scenario 2: New install or upgrade
  if [[ "$CURRENT_VERSION" != "0" && "$CURRENT_VERSION" != "$LATEST_VERSION_DOTTED" ]]; then
    msg_info "Upgrade Ghostscript from $CURRENT_VERSION to $LATEST_VERSION_DOTTED"
  else
    msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED"
  fi

  curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz" || {
    msg_error "Failed to download Ghostscript"
    rm -rf "$TMP_DIR"
    return 1
  }

  if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then
    msg_error "Failed to extract Ghostscript archive"
    rm -rf "$TMP_DIR"
    return 1
  fi

  # Verify directory exists before cd
  if [[ ! -d "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" ]]; then
    msg_error "Ghostscript source directory not found: $TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}"
    rm -rf "$TMP_DIR"
    return 1
  fi

  cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || {
    msg_error "Failed to enter Ghostscript source directory"
    rm -rf "$TMP_DIR"
    return 1
  }

  ensure_dependencies build-essential libpng-dev zlib1g-dev

  $STD ./configure || {
    msg_error "Ghostscript configure failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make -j"$(nproc)" || {
    msg_error "Ghostscript compilation failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make install || {
    msg_error "Ghostscript installation failed"
    rm -rf "$TMP_DIR"
    return 1
  }

  hash -r
  if [[ ! -x "$(command -v gs)" ]]; then
    if [[ -x /usr/local/bin/gs ]]; then
      ln -sf /usr/local/bin/gs /usr/bin/gs
    fi
  fi

  rm -rf "$TMP_DIR"
  cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED"
  ensure_usr_local_bin_persist
  msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED"
}

# ------------------------------------------------------------------------------
# Sets up Hardware Acceleration on debian or ubuntu.
#
# Description:
#   - Determites CPU/GPU/APU Vendor
#   - Installs the correct libraries and packages
#   - Sets up Hardware Acceleration
#
# Notes:
#   - Some things are fetched from intel repositories due to not being in debian repositories.
# ------------------------------------------------------------------------------
function setup_hwaccel() {
  msg_info "Setup Hardware Acceleration"

  if ! command -v lspci &>/dev/null; then
    $STD apt -y update || {
      msg_error "Failed to update package list"
      return 1
    }
    $STD apt -y install pciutils || {
      msg_error "Failed to install pciutils"
      return 1
    }
  fi

  # Detect GPU vendor (Intel, AMD, NVIDIA)
  local gpu_vendor
  gpu_vendor=$(lspci 2>/dev/null | grep -Ei 'vga|3d|display' | grep -Eo 'Intel|AMD|NVIDIA' | head -n1 || echo "")

  # Detect CPU vendor (relevant for AMD APUs)
  local cpu_vendor
  cpu_vendor=$(lscpu 2>/dev/null | grep -i 'Vendor ID' | awk '{print $3}' || echo "")

  if [[ -z "$gpu_vendor" && -z "$cpu_vendor" ]]; then
    msg_error "No GPU or CPU vendor detected (missing lspci/lscpu output)"
    return 1
  fi

  # Detect OS with fallbacks
  local os_id os_codename
  os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^ID=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "debian")
  os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^VERSION_CODENAME=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "unknown")

  # Validate os_id
  if [[ -z "$os_id" ]]; then
    os_id="debian"
  fi

  # Determine if we are on a VM or LXC
  local in_ct="${CTTYPE:-0}"

  case "$gpu_vendor" in
  Intel)
    if [[ "$os_id" == "ubuntu" ]]; then
      $STD apt -y install intel-opencl-icd || {
        msg_error "Failed to install intel-opencl-icd"
        return 1
      }
    else
      # For Debian: fetch Intel GPU drivers from GitHub
      fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || {
        msg_warn "Failed to deploy Intel IGC core 2"
      }
      fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || {
        msg_warn "Failed to deploy Intel IGC OpenCL 2"
      }
      fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || {
        msg_warn "Failed to deploy Intel GDGMM12"
      }
      fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || {
        msg_warn "Failed to deploy Intel OpenCL ICD"
      }
    fi

    $STD apt -y install va-driver-all ocl-icd-libopencl1 vainfo intel-gpu-tools || {
      msg_error "Failed to install Intel GPU dependencies"
      return 1
    }
    ;;
  AMD)
    $STD apt -y install mesa-va-drivers mesa-vdpau-drivers mesa-opencl-icd vainfo clinfo || {
      msg_error "Failed to install AMD GPU dependencies"
      return 1
    }

    # For AMD CPUs without discrete GPU (APUs)
    if [[ "$cpu_vendor" == "AuthenticAMD" && -n "$gpu_vendor" ]]; then
      $STD apt -y install libdrm-amdgpu1 firmware-amd-graphics || true
    fi
    ;;
  NVIDIA)
    # NVIDIA needs manual driver setup - skip for now
    msg_info "NVIDIA GPU detected - manual driver setup required"
    ;;
  *)
    # If no discrete GPU, but AMD CPU (e.g., Ryzen APU)
    if [[ "$cpu_vendor" == "AuthenticAMD" ]]; then
      $STD apt -y install mesa-opencl-icd ocl-icd-libopencl1 clinfo || {
        msg_error "Failed to install Mesa OpenCL stack"
        return 1
      }
    else
      msg_warn "No supported GPU vendor detected - skipping GPU acceleration"
    fi
    ;;
  esac

  if [[ "$in_ct" == "0" ]]; then
    chgrp video /dev/dri 2>/dev/null || true
    chmod 755 /dev/dri 2>/dev/null || true
    chmod 660 /dev/dri/* 2>/dev/null || true
    $STD adduser "$(id -u -n)" video
    $STD adduser "$(id -u -n)" render
  fi

  cache_installed_version "hwaccel" "1.0"
  msg_ok "Setup Hardware Acceleration"
}

# ------------------------------------------------------------------------------
# Installs ImageMagick 7 from source (Debian/Ubuntu only).
#
# Description:
#   - Downloads the latest ImageMagick source tarball
#   - Builds and installs ImageMagick to /usr/local
#   - Configures dynamic linker (ldconfig)
#
# Notes:
#   - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc.
# ------------------------------------------------------------------------------
function setup_imagemagick() {
  local TMP_DIR=$(mktemp -d)
  local BINARY_PATH="/usr/local/bin/magick"

  # Get currently installed version
  local INSTALLED_VERSION=""
  if command -v magick &>/dev/null; then
    INSTALLED_VERSION=$(magick -version | awk '/^Version/ {print $3}')
  fi

  msg_info "Setup ImageMagick"

  ensure_dependencies \
    build-essential \
    libtool \
    libjpeg-dev \
    libpng-dev \
    libtiff-dev \
    libwebp-dev \
    libheif-dev \
    libde265-dev \
    libopenjp2-7-dev \
    libxml2-dev \
    liblcms2-dev \
    libfreetype6-dev \
    libraw-dev \
    libfftw3-dev \
    liblqr-1-0-dev \
    libgsl-dev \
    pkg-config \
    ghostscript

  curl -fsSL https://imagemagick.org/archive/ImageMagick.tar.gz -o "$TMP_DIR/ImageMagick.tar.gz" || {
    msg_error "Failed to download ImageMagick"
    rm -rf "$TMP_DIR"
    return 1
  }

  tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR" || {
    msg_error "Failed to extract ImageMagick"
    rm -rf "$TMP_DIR"
    return 1
  }

  cd "$TMP_DIR"/ImageMagick-* || {
    msg_error "Source extraction failed"
    rm -rf "$TMP_DIR"
    return 1
  }

  $STD ./configure --disable-static || {
    msg_error "ImageMagick configure failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make -j"$(nproc)" || {
    msg_error "ImageMagick compilation failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD make install || {
    msg_error "ImageMagick installation failed"
    rm -rf "$TMP_DIR"
    return 1
  }
  $STD ldconfig /usr/local/lib

  if [[ ! -x "$BINARY_PATH" ]]; then
    msg_error "ImageMagick installation failed"
    rm -rf "$TMP_DIR"
    return 1
  fi

  local FINAL_VERSION
  FINAL_VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}')
  rm -rf "$TMP_DIR"
  cache_installed_version "imagemagick" "$FINAL_VERSION"
  ensure_usr_local_bin_persist

  if [[ -n "$INSTALLED_VERSION" ]]; then
    msg_ok "Upgrade ImageMagick $INSTALLED_VERSION → $FINAL_VERSION"
  else
    msg_ok "Setup ImageMagick $FINAL_VERSION"
  fi
}

# ------------------------------------------------------------------------------
# Installs Temurin JDK via Adoptium APT repository.
#
# Description:
#   - Removes previous JDK if version mismatch
#   - Installs or upgrades to specified JAVA_VERSION
#
# Variables:
#   JAVA_VERSION   - Temurin JDK version to install (e.g. 17, 21)
# ------------------------------------------------------------------------------

function setup_java() {
  local JAVA_VERSION="${JAVA_VERSION:-21}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
  DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
  local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk"

  # Prepare repository (cleanup + validation)
  prepare_repository_setup "adoptium" || {
    msg_error "Failed to prepare Adoptium repository"
    return 1
  }

  # Add repo if needed
  if [[ ! -f /etc/apt/sources.list.d/adoptium.sources ]]; then
    local SUITE
    SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://packages.adoptium.net/artifactory/deb")
    setup_deb822_repo \
      "adoptium" \
      "https://packages.adoptium.net/artifactory/api/gpg/key/public" \
      "https://packages.adoptium.net/artifactory/deb" \
      "$SUITE" \
      "main"
  fi

  # Get currently installed version
  local INSTALLED_VERSION=""
  if dpkg -l | grep -q "temurin-.*-jdk" 2>/dev/null; then
    INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "")
  fi

  # Validate INSTALLED_VERSION is not empty if JDK package found
  local JDK_COUNT=0
  JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true)
  if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then
    msg_warn "Found Temurin JDK but cannot determine version - attempting reinstall"
    # Try to get actual package name for purge
    local OLD_PACKAGE
    OLD_PACKAGE=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | head -n1 || echo "")
    if [[ -n "$OLD_PACKAGE" ]]; then
      msg_info "Removing existing package: $OLD_PACKAGE"
      $STD apt purge -y "$OLD_PACKAGE" || true
    fi
    INSTALLED_VERSION="" # Reset to trigger fresh install
  fi

  # Scenario 1: Already at correct version
  if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then
    msg_info "Update Temurin JDK $JAVA_VERSION"
    ensure_apt_working || return 1
    upgrade_packages_with_retry "$DESIRED_PACKAGE" || {
      msg_error "Failed to update Temurin JDK"
      return 1
    }
    cache_installed_version "temurin-jdk" "$JAVA_VERSION"
    msg_ok "Update Temurin JDK $JAVA_VERSION"
    return 0
  fi

  # Scenario 2: Different version - remove old and install new
  if [[ -n "$INSTALLED_VERSION" ]]; then
    msg_info "Upgrade Temurin JDK from $INSTALLED_VERSION to $JAVA_VERSION"
    $STD apt purge -y "temurin-${INSTALLED_VERSION}-jdk" || true
  else
    msg_info "Setup Temurin JDK $JAVA_VERSION"
  fi

  ensure_apt_working || return 1

  # Install with retry logic
  install_packages_with_retry "$DESIRED_PACKAGE" || {
    msg_error "Failed to install Temurin JDK $JAVA_VERSION"
    return 1
  }

  cache_installed_version "temurin-jdk" "$JAVA_VERSION"
  msg_ok "Setup Temurin JDK $JAVA_VERSION"
}

# ------------------------------------------------------------------------------
# Installs a local IP updater script using networkd-dispatcher.
#
# Description:
#   - Stores current IP in /run/local-ip.env
#   - Automatically runs on network changes
# ------------------------------------------------------------------------------

function setup_local_ip_helper() {
  local BASE_DIR="/usr/local/community-scripts/ip-management"
  local SCRIPT_PATH="$BASE_DIR/update_local_ip.sh"
  local IP_FILE="/run/local-ip.env"
  local DISPATCHER_SCRIPT="/etc/networkd-dispatcher/routable.d/10-update-local-ip.sh"

  # Check if already set up
  if [[ -f "$SCRIPT_PATH" && -f "$DISPATCHER_SCRIPT" ]]; then
    msg_info "Update Local IP Helper"
    cache_installed_version "local-ip-helper" "1.0"
    msg_ok "Update Local IP Helper"
  else
    msg_info "Setup Local IP Helper"
  fi

  mkdir -p "$BASE_DIR"

  # Install networkd-dispatcher if not present
  if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then
    ensure_dependencies networkd-dispatcher || {
      msg_error "Failed to install networkd-dispatcher"
      return 1
    }
  fi

  # Write update_local_ip.sh
  cat <<'EOF' >"$SCRIPT_PATH"
#!/bin/bash
set -euo pipefail

IP_FILE="/run/local-ip.env"
mkdir -p "$(dirname "$IP_FILE")"

get_current_ip() {
    local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
    local ip

    for target in "${targets[@]}"; do
        if [[ "$target" == "default" ]]; then
            ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
        else
            ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
        fi
        if [[ -n "$ip" ]]; then
            echo "$ip"
            return 0
        fi
    done

    return 1
}

current_ip="$(get_current_ip)"

if [[ -z "$current_ip" ]]; then
    echo "[ERROR] Could not detect local IP" >&2
    exit 1
fi

if [[ -f "$IP_FILE" ]]; then
    source "$IP_FILE"
    [[ "$LOCAL_IP" == "$current_ip" ]] && exit 0
fi

echo "LOCAL_IP=$current_ip" > "$IP_FILE"
echo "[INFO] LOCAL_IP updated to $current_ip"
EOF

  chmod +x "$SCRIPT_PATH"

  # Install dispatcher hook
  mkdir -p "$(dirname "$DISPATCHER_SCRIPT")"
  cat <<EOF >"$DISPATCHER_SCRIPT"
#!/bin/bash
$SCRIPT_PATH
EOF

  chmod +x "$DISPATCHER_SCRIPT"
  systemctl enable -q --now networkd-dispatcher.service || {
    msg_warn "Failed to enable networkd-dispatcher service"
  }

  cache_installed_version "local-ip-helper" "1.0"
  msg_ok "Setup Local IP Helper"
}

# ------------------------------------------------------------------------------
# Installs or updates MariaDB from official repo.
#
# Description:
#   - Detects current MariaDB version and replaces it if necessary
#   - Preserves existing database data
#   - Dynamically determines latest GA version if "latest" is given
#
# Variables:
#   MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
# ------------------------------------------------------------------------------

setup_mariadb() {
  local MARIADB_VERSION="${MARIADB_VERSION:-latest}"

  # Resolve "latest" to actual version
  if [[ "$MARIADB_VERSION" == "latest" ]]; then
    if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then
      msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback"
      # Try using official mariadb_repo_setup script as fallback
      if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
        msg_ok "MariaDB repository configured via mariadb_repo_setup"
        # Extract version from configured repo
        MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
      else
        msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
        MARIADB_VERSION="12.2"
      fi
    else
      MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null |
        grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
        grep -vE 'rc/|rolling/' |
        sed 's|/||' |
        sort -Vr |
        head -n1 || echo "")

      if [[ -z "$MARIADB_VERSION" ]]; then
        msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup"
        if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
          msg_ok "MariaDB repository configured via mariadb_repo_setup"
          MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
        else
          msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
          MARIADB_VERSION="12.2"
        fi
      fi
    fi
  fi

  # Get currently installed version
  local CURRENT_VERSION=""
  CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true

  # Scenario 1: Already installed at target version - just update packages
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
    msg_info "Update MariaDB $MARIADB_VERSION"

    # Ensure APT is working
    ensure_apt_working || return 1

    # Check if repository needs to be refreshed
    if [[ -f /etc/apt/sources.list.d/mariadb.sources ]]; then
      local REPO_VERSION=""
      REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || echo "")
      if [[ -n "$REPO_VERSION" && "$REPO_VERSION" != "${MARIADB_VERSION%.*}" ]]; then
        msg_warn "Repository version mismatch, updating..."
        manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \
          "https://mariadb.org/mariadb_release_signing_key.asc" || {
          msg_error "Failed to update MariaDB repository"
          return 1
        }
      fi
    fi

    # Perform upgrade with retry logic
    ensure_apt_working || return 1
    upgrade_packages_with_retry "mariadb-server" "mariadb-client" || {
      msg_error "Failed to upgrade MariaDB packages"
      return 1
    }
    cache_installed_version "mariadb" "$MARIADB_VERSION"
    msg_ok "Update MariaDB $MARIADB_VERSION"
    return 0
  fi

  # Scenario 2: Different version installed - clean upgrade
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MARIADB_VERSION" ]]; then
    msg_info "Upgrade MariaDB from $CURRENT_VERSION to $MARIADB_VERSION"
    remove_old_tool_version "mariadb"
  fi

  # Scenario 3: Fresh install or version change
  msg_info "Setup MariaDB $MARIADB_VERSION"

  # Prepare repository (cleanup + validation)
  prepare_repository_setup "mariadb" || {
    msg_error "Failed to prepare MariaDB repository"
    return 1
  }

  # Install required dependencies first
  local mariadb_deps=()
  for dep in gawk rsync socat libdbi-perl pv; do
    if apt-cache search "^${dep}$" 2>/dev/null | grep -q .; then
      mariadb_deps+=("$dep")
    fi
  done

  if [[ ${#mariadb_deps[@]} -gt 0 ]]; then
    $STD apt install -y "${mariadb_deps[@]}" 2>/dev/null || true
  fi

  # Setup repository
  manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \
    "https://mariadb.org/mariadb_release_signing_key.asc" || {
    msg_error "Failed to setup MariaDB repository"
    return 1
  }

  # Set debconf selections for all potential versions
  local MARIADB_MAJOR_MINOR
  MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
  if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
    echo "mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false" | debconf-set-selections
  fi

  # Install packages with retry logic
  export DEBIAN_FRONTEND=noninteractive
  if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
    # Fallback: try without specific version
    msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..."
    cleanup_old_repo_files "mariadb"
    $STD apt update || {
      msg_warn "APT update also failed, continuing with cache"
    }
    install_packages_with_retry "mariadb-server" "mariadb-client" || {
      msg_error "Failed to install MariaDB packages (both upstream and distro)"
      return 1
    }
  fi

  cache_installed_version "mariadb" "$MARIADB_VERSION"
  msg_ok "Setup MariaDB $MARIADB_VERSION"
}

# ------------------------------------------------------------------------------
# Creates MariaDB database with user, charset and optional extra grants/modes
#
# Description:
#   - Generates password if empty
#   - Creates database with utf8mb4_unicode_ci
#   - Creates local user with password
#   - Grants full access to this DB
#   - Optional: apply extra GRANT statements (comma-separated)
#   - Optional: apply custom GLOBAL sql_mode
#   - Saves credentials to file
#   - Exports variables for use in calling script
#
# Usage:
#   MARIADB_DB_NAME="myapp_db" MARIADB_DB_USER="myapp_user" setup_mariadb_db
#   MARIADB_DB_NAME="domain_monitor" MARIADB_DB_USER="domainmonitor" setup_mariadb_db
#   MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" MARIADB_DB_EXTRA_GRANTS="GRANT SELECT ON \`mysql\`.\`time_zone_name\`" setup_mariadb_db
#   MARIADB_DB_NAME="ghostfolio" MARIADB_DB_USER="ghostfolio" MARIADB_DB_SQL_MODE="" setup_mariadb_db
#
# Variables:
#   MARIADB_DB_NAME         - Database name (required)
#   MARIADB_DB_USER         - Database user (required)
#   MARIADB_DB_PASS         - User password (optional, auto-generated if empty)
#   MARIADB_DB_EXTRA_GRANTS - Comma-separated GRANT statements (optional)
#                             Example: "GRANT SELECT ON \`mysql\`.\`time_zone_name\`"
#   MARIADB_DB_SQL_MODE     - Optional global sql_mode override (e.g. "", "STRICT_TRANS_TABLES")
#   MARIADB_DB_CREDS_FILE   - Credentials file path (optional, default: ~/${APPLICATION}.creds)
#
# Exports:
#   MARIADB_DB_NAME, MARIADB_DB_USER, MARIADB_DB_PASS
# ------------------------------------------------------------------------------

function setup_mariadb_db() {
  if [[ -z "${MARIADB_DB_NAME:-}" || -z "${MARIADB_DB_USER:-}" ]]; then
    msg_error "MARIADB_DB_NAME and MARIADB_DB_USER must be set before calling setup_mariadb_db"
    return 1
  fi

  if [[ -z "${MARIADB_DB_PASS:-}" ]]; then
    MARIADB_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
  fi

  msg_info "Setting up MariaDB Database"

  $STD mariadb -u root -e "CREATE DATABASE \`$MARIADB_DB_NAME\` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
  $STD mariadb -u root -e "CREATE USER '$MARIADB_DB_USER'@'localhost' IDENTIFIED BY '$MARIADB_DB_PASS';"
  $STD mariadb -u root -e "GRANT ALL ON \`$MARIADB_DB_NAME\`.* TO '$MARIADB_DB_USER'@'localhost';"

  # Optional extra grants
  if [[ -n "${MARIADB_DB_EXTRA_GRANTS:-}" ]]; then
    IFS=',' read -ra G_LIST <<<"${MARIADB_DB_EXTRA_GRANTS:-}"
    for g in "${G_LIST[@]}"; do
      g=$(echo "$g" | xargs)
      $STD mariadb -u root -e "$g TO '$MARIADB_DB_USER'@'localhost';"
    done
  fi

  # Optional sql_mode override
  if [[ -n "${MARIADB_DB_SQL_MODE:-}" ]]; then
    $STD mariadb -u root -e "SET GLOBAL sql_mode='${MARIADB_DB_SQL_MODE:-}';"
  fi

  $STD mariadb -u root -e "FLUSH PRIVILEGES;"

  local app_name="${APPLICATION,,}"
  local CREDS_FILE="${MARIADB_DB_CREDS_FILE:-${HOME}/${app_name}.creds}"
  {
    echo "MariaDB Credentials"
    echo "Database: $MARIADB_DB_NAME"
    echo "User: $MARIADB_DB_USER"
    echo "Password: $MARIADB_DB_PASS"
  } >>"$CREDS_FILE"

  msg_ok "Set up MariaDB Database"

  export MARIADB_DB_NAME
  export MARIADB_DB_USER
  export MARIADB_DB_PASS
}

# ------------------------------------------------------------------------------
# Installs or updates MongoDB to specified major version.
#
# Description:
#   - Preserves data across installations
#   - Adds official MongoDB repo
#
# Variables:
#   MONGO_VERSION  - MongoDB major version to install (e.g. 7.0, 8.0)
# ------------------------------------------------------------------------------

function setup_mongodb() {
  local MONGO_VERSION="${MONGO_VERSION:-8.0}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(get_os_info id)
  DISTRO_CODENAME=$(get_os_info codename)

  # Check AVX support
  if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then
    local major="${MONGO_VERSION%%.*}"
    if ((major > 5)); then
      msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system."
      return 1
    fi
  fi

  case "$DISTRO_ID" in
  ubuntu)
    MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu"
    ;;
  debian)
    MONGO_BASE_URL="https://repo.mongodb.org/apt/debian"
    ;;
  *)
    msg_error "Unsupported distribution: $DISTRO_ID"
    return 1
    ;;
  esac

  # Get currently installed version
  local INSTALLED_VERSION=""
  INSTALLED_VERSION=$(is_tool_installed "mongodb" 2>/dev/null) || true

  # Scenario 1: Already at target version - just update packages
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then
    msg_info "Update MongoDB $MONGO_VERSION"

    ensure_apt_working || return 1

    # Perform upgrade with retry logic
    upgrade_packages_with_retry "mongodb-org" || {
      msg_error "Failed to upgrade MongoDB"
      return 1
    }
    cache_installed_version "mongodb" "$MONGO_VERSION"
    msg_ok "Update MongoDB $MONGO_VERSION"
    return 0
  fi

  # Scenario 2: Different version installed - clean upgrade
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$MONGO_VERSION" ]]; then
    msg_info "Upgrade MongoDB from $INSTALLED_VERSION to $MONGO_VERSION"
    remove_old_tool_version "mongodb"
  else
    msg_info "Setup MongoDB $MONGO_VERSION"
  fi

  cleanup_orphaned_sources

  # Prepare repository (cleanup + validation)
  prepare_repository_setup "mongodb" || {
    msg_error "Failed to prepare MongoDB repository"
    return 1
  }

  # Setup repository
  manage_tool_repository "mongodb" "$MONGO_VERSION" "$MONGO_BASE_URL" \
    "https://www.mongodb.org/static/pgp/server-${MONGO_VERSION}.asc" || {
    msg_error "Failed to setup MongoDB repository"
    return 1
  }

  # Wait for repo to settle
  $STD apt update || {
    msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?"
    return 1
  }

  # Install MongoDB with retry logic
  install_packages_with_retry "mongodb-org" || {
    msg_error "Failed to install MongoDB packages"
    return 1
  }

  if ! command -v mongod >/dev/null 2>&1; then
    msg_error "MongoDB binary not found after installation"
    return 1
  fi

  mkdir -p /var/lib/mongodb
  chown -R mongodb:mongodb /var/lib/mongodb

  $STD systemctl enable mongod || {
    msg_warn "Failed to enable mongod service"
  }
  safe_service_restart mongod

  # Verify MongoDB version
  local INSTALLED_VERSION
  INSTALLED_VERSION=$(mongod --version 2>/dev/null | grep -oP 'db version v\K[0-9]+\.[0-9]+' | head -n1 || echo "0.0")
  verify_tool_version "MongoDB" "$MONGO_VERSION" "$INSTALLED_VERSION" || true

  cache_installed_version "mongodb" "$MONGO_VERSION"
  msg_ok "Setup MongoDB $MONGO_VERSION"
}

# ------------------------------------------------------------------------------
# Installs or upgrades MySQL and configures APT repo.
#
# Description:
#   - Detects existing MySQL installation
#   - Purges conflicting packages before installation
#   - Supports clean upgrade
#   - Handles Debian Trixie libaio1t64 transition
#
# Variables:
#   MYSQL_VERSION  - MySQL version to install (e.g. 5.7, 8.0) (default: 8.0)
# ------------------------------------------------------------------------------

function setup_mysql() {
  local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
  DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

  # Get currently installed version
  local CURRENT_VERSION=""
  CURRENT_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true

  # Scenario 1: Already at target version - just update packages
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MYSQL_VERSION" ]]; then
    msg_info "Update MySQL $MYSQL_VERSION"

    ensure_apt_working || return 1

    # Perform upgrade with retry logic (non-fatal if fails)
    upgrade_packages_with_retry "mysql-server" "mysql-client" || true

    cache_installed_version "mysql" "$MYSQL_VERSION"
    msg_ok "Update MySQL $MYSQL_VERSION"
    return 0
  fi

  # Scenario 2: Different version installed - clean upgrade
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
    msg_info "Upgrade MySQL from $CURRENT_VERSION to $MYSQL_VERSION"
    remove_old_tool_version "mysql"
  else
    msg_info "Setup MySQL $MYSQL_VERSION"
  fi

  # Prepare repository (cleanup + validation)
  prepare_repository_setup "mysql" || {
    msg_error "Failed to prepare MySQL repository"
    return 1
  }

  # Debian 13+ Fix: MySQL 8.0 incompatible with libaio1t64, use 8.4 LTS
  if [[ "$DISTRO_ID" == "debian" && "$DISTRO_CODENAME" =~ ^(trixie|forky|sid)$ ]]; then
    msg_info "Debian ${DISTRO_CODENAME} detected → using MySQL 8.4 LTS (libaio1t64 compatible)"

    if ! curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2023 | gpg --dearmor -o /etc/apt/keyrings/mysql.gpg 2>/dev/null; then
      msg_error "Failed to import MySQL GPG key"
      return 1
    fi

    cat >/etc/apt/sources.list.d/mysql.sources <<EOF
Types: deb
URIs: https://repo.mysql.com/apt/debian/
Suites: bookworm
Components: mysql-8.4-lts
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/mysql.gpg
EOF

    $STD apt update || {
      msg_error "Failed to update APT for MySQL 8.4 LTS"
      return 1
    }

    # Install with retry logic
    if ! install_packages_with_retry "mysql-community-server" "mysql-community-client"; then
      msg_warn "MySQL 8.4 LTS installation failed – falling back to MariaDB"
      cleanup_old_repo_files "mysql"
      $STD apt update
      install_packages_with_retry "mariadb-server" "mariadb-client" || {
        msg_error "Failed to install database engine (MySQL/MariaDB fallback)"
        return 1
      }
      msg_ok "Setup Database Engine (MariaDB fallback on Debian ${DISTRO_CODENAME})"
      return 0
    fi

    cache_installed_version "mysql" "8.4"
    msg_ok "Setup MySQL 8.4 LTS (Debian ${DISTRO_CODENAME})"
    return 0
  fi

  # Standard setup for other distributions
  local SUITE
  if [[ "$DISTRO_ID" == "debian" ]]; then
    case "$DISTRO_CODENAME" in
    bookworm | bullseye) SUITE="$DISTRO_CODENAME" ;;
    *) SUITE="bookworm" ;;
    esac
  else
    SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://repo.mysql.com/apt/${DISTRO_ID}")
  fi

  # Setup repository
  manage_tool_repository "mysql" "$MYSQL_VERSION" "https://repo.mysql.com/apt/${DISTRO_ID}" \
    "https://repo.mysql.com/RPM-GPG-KEY-mysql-2023" || {
    msg_error "Failed to setup MySQL repository"
    return 1
  }

  ensure_apt_working || return 1

  # Try multiple package names with retry logic
  export DEBIAN_FRONTEND=noninteractive
  local mysql_install_success=false

  if apt-cache search "^mysql-server$" 2>/dev/null | grep -q . &&
    install_packages_with_retry "mysql-server" "mysql-client"; then
    mysql_install_success=true
  elif apt-cache search "^mysql-community-server$" 2>/dev/null | grep -q . &&
    install_packages_with_retry "mysql-community-server" "mysql-community-client"; then
    mysql_install_success=true
  elif apt-cache search "^mysql$" 2>/dev/null | grep -q . &&
    install_packages_with_retry "mysql"; then
    mysql_install_success=true
  fi

  if [[ "$mysql_install_success" == false ]]; then
    msg_error "MySQL ${MYSQL_VERSION} package not available for suite ${SUITE}"
    return 1
  fi

  # Verify mysql command is accessible
  if ! command -v mysql >/dev/null 2>&1; then
    hash -r
    if ! command -v mysql >/dev/null 2>&1; then
      msg_error "MySQL installed but mysql command still not found"
      return 1
    fi
  fi

  cache_installed_version "mysql" "$MYSQL_VERSION"
  msg_ok "Setup MySQL $MYSQL_VERSION"
}

# ------------------------------------------------------------------------------
# Installs Node.js and optional global modules.
#
# Description:
#   - Installs specified Node.js version using NodeSource APT repo
#   - Optionally installs or updates global npm modules
#
# Variables:
#   NODE_VERSION   - Node.js version to install (default: 24 LTS)
#   NODE_MODULE    - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
# ------------------------------------------------------------------------------

function setup_nodejs() {
  local NODE_VERSION="${NODE_VERSION:-24}"
  local NODE_MODULE="${NODE_MODULE:-}"

  # ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts
  cleanup_legacy_install "nodejs"

  # Get currently installed version
  local CURRENT_NODE_VERSION=""
  CURRENT_NODE_VERSION=$(is_tool_installed "nodejs" 2>/dev/null) || true

  # Ensure jq is available for JSON parsing
  if ! command -v jq &>/dev/null; then
    $STD apt update
    $STD apt install -y jq || {
      msg_error "Failed to install jq"
      return 1
    }
  fi

  # Scenario 1: Already installed at target version - just update packages/modules
  if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then
    msg_info "Update Node.js $NODE_VERSION"

    ensure_apt_working || return 1

    # Just update npm to latest
    $STD npm install -g npm@latest 2>/dev/null || true

    cache_installed_version "nodejs" "$NODE_VERSION"
    msg_ok "Update Node.js $NODE_VERSION"
  else
    # Scenario 2: Different version installed - clean upgrade
    if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
      msg_info "Upgrade Node.js from $CURRENT_NODE_VERSION to $NODE_VERSION"
      remove_old_tool_version "nodejs"
    else
      msg_info "Setup Node.js $NODE_VERSION"
    fi

    # Remove ALL Debian nodejs packages BEFORE adding NodeSource repo
    if dpkg -l 2>/dev/null | grep -qE "^ii.*(nodejs|libnode|node-cjs|node-acorn|node-balanced|node-brace|node-minimatch|node-undici|node-xtend|node-corepack)"; then
      msg_info "Removing Debian-packaged Node.js and dependencies"
      $STD apt purge -y nodejs nodejs-doc libnode* node-* 2>/dev/null || true
      $STD apt autoremove -y 2>/dev/null || true
      $STD apt clean 2>/dev/null || true
    fi

    # Remove any APT pinning (not needed)
    rm -f /etc/apt/preferences.d/nodesource 2>/dev/null || true

    # Prepare repository (cleanup + validation)
    prepare_repository_setup "nodesource" || {
      msg_error "Failed to prepare Node.js repository"
      return 1
    }

    # Setup NodeSource repository
    manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || {
      msg_error "Failed to setup Node.js repository"
      return 1
    }

    # Force APT cache refresh after repository setup
    $STD apt update

    ensure_dependencies curl ca-certificates gnupg

    install_packages_with_retry "nodejs" || {
      msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
      return 1
    }

    # Verify Node.js was installed correctly
    if ! command -v node >/dev/null 2>&1; then
      msg_error "Node.js binary not found after installation"
      return 1
    fi

    local INSTALLED_NODE_VERSION
    INSTALLED_NODE_VERSION=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+' || echo "0")
    verify_tool_version "Node.js" "$NODE_VERSION" "$INSTALLED_NODE_VERSION" || true

    # Verify npm is available (should come with NodeSource nodejs)
    if ! command -v npm >/dev/null 2>&1; then
      msg_error "npm not found after Node.js installation - repository issue?"
      return 1
    fi

    # Update to latest npm (with version check to avoid incompatibility)
    local NPM_VERSION
    NPM_VERSION=$(npm -v 2>/dev/null || echo "0")
    if [[ "$NPM_VERSION" != "0" ]]; then
      $STD npm install -g npm@latest 2>/dev/null || {
        msg_warn "Failed to update npm to latest version (continuing with bundled npm $NPM_VERSION)"
      }
    fi

    cache_installed_version "nodejs" "$NODE_VERSION"
    msg_ok "Setup Node.js $NODE_VERSION"
  fi

  export NODE_OPTIONS="--max-old-space-size=4096"

  # Ensure valid working directory for npm (avoids uv_cwd error)
  if [[ ! -d /opt ]]; then
    mkdir -p /opt
  fi
  cd /opt || {
    msg_error "Failed to set safe working directory before npm install"
    return 1
  }

  # Install global Node modules
  if [[ -n "$NODE_MODULE" ]]; then
    IFS=',' read -ra MODULES <<<"$NODE_MODULE"
    local failed_modules=0
    for mod in "${MODULES[@]}"; do
      local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
      if [[ "$mod" == @*/*@* ]]; then
        # Scoped package with version, e.g. @vue/cli-service@latest
        MODULE_NAME="${mod%@*}"
        MODULE_REQ_VERSION="${mod##*@}"
      elif [[ "$mod" == *"@"* ]]; then
        # Unscoped package with version, e.g. yarn@latest
        MODULE_NAME="${mod%@*}"
        MODULE_REQ_VERSION="${mod##*@}"
      else
        # No version specified
        MODULE_NAME="$mod"
        MODULE_REQ_VERSION="latest"
      fi

      # Check if the module is already installed
      if $STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep -q "$MODULE_NAME@"; then
        MODULE_INSTALLED_VERSION="$($STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
        if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
          msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
          if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then
            msg_warn "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
            ((failed_modules++))
            continue
          fi
        elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
          msg_info "Updating $MODULE_NAME to latest version"
          if ! $STD npm install -g "${MODULE_NAME}@latest" 2>/dev/null; then
            msg_warn "Failed to update $MODULE_NAME to latest version"
            ((failed_modules++))
            continue
          fi
        fi
      else
        msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
        if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then
          msg_warn "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
          ((failed_modules++))
          continue
        fi
      fi
    done
    if [[ $failed_modules -eq 0 ]]; then
      msg_ok "Installed Node.js modules: $NODE_MODULE"
    else
      msg_warn "Installed Node.js modules with $failed_modules failure(s): $NODE_MODULE"
    fi
  fi
}

# ------------------------------------------------------------------------------
# Installs PHP with selected modules and configures Apache/FPM support.
#
# Description:
#   - Adds Sury PHP repo if needed
#   - Installs default and user-defined modules
#   - Patches php.ini for CLI, Apache, and FPM as needed
#
# Variables:
#   PHP_VERSION                - PHP version to install (default: 8.4)
#   PHP_MODULE                 - Additional comma-separated modules
#   PHP_APACHE                 - Set YES to enable PHP with Apache
#   PHP_FPM                    - Set YES to enable PHP-FPM
#   PHP_MEMORY_LIMIT           - (default: 512M)
#   PHP_UPLOAD_MAX_FILESIZE    - (default: 128M)
#   PHP_POST_MAX_SIZE          - (default: 128M)
#   PHP_MAX_EXECUTION_TIME     - (default: 300)
# ------------------------------------------------------------------------------

function setup_php() {
  local PHP_VERSION="${PHP_VERSION:-8.4}"
  local PHP_MODULE="${PHP_MODULE:-}"
  local PHP_APACHE="${PHP_APACHE:-NO}"
  local PHP_FPM="${PHP_FPM:-NO}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
  DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

  local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip"
  local COMBINED_MODULES

  local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}"
  local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}"
  local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}"
  local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}"

  # Merge default + user-defined modules
  if [[ -n "$PHP_MODULE" ]]; then
    COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}"
  else
    COMBINED_MODULES="${DEFAULT_MODULES}"
  fi

  # Deduplicate
  COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -)

  # Get current PHP-CLI version
  local CURRENT_PHP=""
  CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true

  # Remove conflicting PHP version before pinning
  if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
    msg_info "Removing conflicting PHP ${CURRENT_PHP} (need ${PHP_VERSION})"
    stop_all_services "php.*-fpm"
    $STD apt purge -y "php*" 2>/dev/null || true
    $STD apt autoremove -y 2>/dev/null || true
  fi

  # NOW create pinning for the desired version
  mkdir -p /etc/apt/preferences.d
  cat <<EOF >/etc/apt/preferences.d/php-pin
Package: php${PHP_VERSION}*
Pin: version ${PHP_VERSION}.*
Pin-Priority: 1001

Package: php[0-9].*
Pin: release o=packages.sury.org-php
Pin-Priority: -1
EOF

  # Setup repository
  prepare_repository_setup "php" "deb.sury.org-php" || {
    msg_error "Failed to prepare PHP repository"
    return 1
  }

  manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
    msg_error "Failed to setup PHP repository"
    return 1
  }

  ensure_apt_working || return 1
  $STD apt update

  # Get available PHP version from repository
  local AVAILABLE_PHP_VERSION=""
  AVAILABLE_PHP_VERSION=$(apt-cache show "php${PHP_VERSION}" 2>/dev/null | grep -m1 "^Version:" | awk '{print $2}' | cut -d- -f1) || true

  if [[ -z "$AVAILABLE_PHP_VERSION" ]]; then
    msg_error "PHP ${PHP_VERSION} not found in configured repositories"
    return 1
  fi

  # Build module list - without version pinning (preferences.d handles it)
  local MODULE_LIST="php${PHP_VERSION}"

  IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
  for mod in "${MODULES[@]}"; do
    MODULE_LIST+=" php${PHP_VERSION}-${mod}"
  done

  if [[ "$PHP_FPM" == "YES" ]]; then
    MODULE_LIST+=" php${PHP_VERSION}-fpm"
  fi

  # install apache2 with PHP support if requested
  if [[ "$PHP_APACHE" == "YES" ]]; then
    if ! dpkg -l 2>/dev/null | grep -q "libapache2-mod-php${PHP_VERSION}"; then
      msg_info "Installing Apache with PHP ${PHP_VERSION} module"
      install_packages_with_retry "apache2" || {
        msg_error "Failed to install Apache"
        return 1
      }
      install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || {
        msg_warn "Failed to install libapache2-mod-php${PHP_VERSION}, continuing without Apache module"
      }
    fi
  fi

  # Install PHP packages (pinning via preferences.d ensures correct version)
  msg_info "Installing PHP ${PHP_VERSION} packages"
  if ! install_packages_with_retry $MODULE_LIST; then
    msg_warn "Failed to install PHP packages, attempting individual installation"

    # Install main package first (critical)
    install_packages_with_retry "php${PHP_VERSION}" || {
      msg_error "Failed to install php${PHP_VERSION}"
      return 1
    }

    # Try to install Apache module individually if requested
    if [[ "$PHP_APACHE" == "YES" ]]; then
      install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || {
        msg_warn "Could not install libapache2-mod-php${PHP_VERSION}"
      }
    fi

    # Try to install modules individually - skip those that don't exist
    for pkg in "${MODULES[@]}"; do
      if apt-cache search "^php${PHP_VERSION}-${pkg}\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-${pkg}"; then
        install_packages_with_retry "php${PHP_VERSION}-${pkg}" || {
          msg_warn "Could not install php${PHP_VERSION}-${pkg}"
        }
      fi
    done

    if [[ "$PHP_FPM" == "YES" ]]; then
      if apt-cache search "^php${PHP_VERSION}-fpm\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-fpm"; then
        install_packages_with_retry "php${PHP_VERSION}-fpm" || {
          msg_warn "Could not install php${PHP_VERSION}-fpm"
        }
      fi
    fi
  fi
  cache_installed_version "php" "$PHP_VERSION"

  # Patch all relevant php.ini files
  local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini")
  [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini")
  [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini")
  for ini in "${PHP_INI_PATHS[@]}"; do
    if [[ -f "$ini" ]]; then
      $STD sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini"
      $STD sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini"
      $STD sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini"
      $STD sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini"
    fi
  done

  # Patch Apache configuration if needed
  if [[ "$PHP_APACHE" == "YES" ]]; then
    for mod in $(ls /etc/apache2/mods-enabled/ 2>/dev/null | grep -E '^php[0-9]\.[0-9]\.conf$' | sed 's/\.conf//'); do
      if [[ "$mod" != "php${PHP_VERSION}" ]]; then
        $STD a2dismod "$mod" || true
      fi
    done
    $STD a2enmod mpm_prefork
    $STD a2enmod "php${PHP_VERSION}"
    safe_service_restart apache2 || true
  fi

  # Enable and restart PHP-FPM if requested
  if [[ "$PHP_FPM" == "YES" ]]; then
    if systemctl list-unit-files | grep -q "php${PHP_VERSION}-fpm.service"; then
      $STD systemctl enable php${PHP_VERSION}-fpm
      safe_service_restart php${PHP_VERSION}-fpm
    fi
  fi

  # Verify PHP installation - critical check
  if ! command -v php >/dev/null 2>&1; then
    msg_error "PHP installation verification failed - php command not found"
    return 1
  fi

  local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)

  if [[ "$INSTALLED_VERSION" != "$PHP_VERSION" ]]; then
    msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}"
    msg_error "This indicates a critical package installation issue"
    # Don't cache wrong version
    return 1
  fi

  cache_installed_version "php" "$INSTALLED_VERSION"
  msg_ok "Setup PHP ${INSTALLED_VERSION}"
}

# ------------------------------------------------------------------------------
# Installs or upgrades PostgreSQL and optional extensions/modules.
#
# Description:
#   - Detects existing PostgreSQL version
#   - Dumps all databases before upgrade
#   - Adds PGDG repo and installs specified version
#   - Installs optional PG_MODULES (e.g. postgis, contrib)
#   - Restores dumped data post-upgrade
#
# Variables:
#   PG_VERSION     - Major PostgreSQL version (e.g. 15, 16) (default: 16)
function setup_postgresql() {
  local PG_VERSION="${PG_VERSION:-16}"
  local PG_MODULES="${PG_MODULES:-}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
  DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

  # Get currently installed version
  local CURRENT_PG_VERSION=""
  if command -v psql >/dev/null; then
    CURRENT_PG_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)"
  fi

  # Scenario 1: Already at correct version
  if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then
    msg_info "Update PostgreSQL $PG_VERSION"
    ensure_apt_working || return 1

    # Perform upgrade with retry logic (non-fatal if fails)
    upgrade_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null || true
    cache_installed_version "postgresql" "$PG_VERSION"
    msg_ok "Update PostgreSQL $PG_VERSION"

    # Still install modules if specified
    if [[ -n "$PG_MODULES" ]]; then
      IFS=',' read -ra MODULES <<<"$PG_MODULES"
      for module in "${MODULES[@]}"; do
        $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true
      done
    fi
    return 0
  fi

  # Scenario 2: Different version - backup, remove old, install new
  if [[ -n "$CURRENT_PG_VERSION" ]]; then
    msg_info "Upgrade PostgreSQL from $CURRENT_PG_VERSION to $PG_VERSION"
    msg_info "Creating backup of PostgreSQL $CURRENT_PG_VERSION databases..."
    $STD runuser -u postgres -- pg_dumpall >/var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql || {
      msg_error "Failed to backup PostgreSQL databases"
      return 1
    }
    $STD systemctl stop postgresql || true
    $STD apt purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" 2>/dev/null || true
  else
    msg_info "Setup PostgreSQL $PG_VERSION"
  fi

  # Scenario 3: Fresh install or after removal - setup repo and install
  prepare_repository_setup "pgdg" "postgresql" || {
    msg_error "Failed to prepare PostgreSQL repository"
    return 1
  }

  local SUITE
  case "$DISTRO_CODENAME" in
  trixie | forky | sid)

    if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then
      SUITE="trixie-pgdg"

    else
      SUITE="bookworm-pgdg"
    fi

    ;;
  *)
    SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt")
    SUITE="${SUITE}-pgdg"
    ;;
  esac

  setup_deb822_repo \
    "pgdg" \
    "https://www.postgresql.org/media/keys/ACCC4CF8.asc" \
    "https://apt.postgresql.org/pub/repos/apt" \
    "$SUITE" \
    "main"

  if ! $STD apt update; then
    msg_error "APT update failed for PostgreSQL repository"
    return 1
  fi

  # Install ssl-cert dependency if available
  if apt-cache search "^ssl-cert$" 2>/dev/null | grep -q .; then
    $STD apt install -y ssl-cert 2>/dev/null || true
  fi

  # Try multiple PostgreSQL package patterns with retry logic
  local pg_install_success=false

  if apt-cache search "^postgresql-${PG_VERSION}$" 2>/dev/null | grep -q . &&
    install_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"; then
    pg_install_success=true
  fi

  if [[ "$pg_install_success" == false ]] &&
    apt-cache search "^postgresql-server-${PG_VERSION}$" 2>/dev/null | grep -q . &&
    $STD apt install -y "postgresql-server-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null; then
    pg_install_success=true
  fi

  if [[ "$pg_install_success" == false ]] &&
    apt-cache search "^postgresql$" 2>/dev/null | grep -q . &&
    $STD apt install -y postgresql postgresql-client 2>/dev/null; then
    pg_install_success=true
  fi

  if [[ "$pg_install_success" == false ]]; then
    msg_error "PostgreSQL package not available for suite ${SUITE}"
    return 1
  fi

  if ! command -v psql >/dev/null 2>&1; then
    msg_error "PostgreSQL installed but psql command not found"
    return 1
  fi

  # Restore database backup if we upgraded from previous version
  if [[ -n "$CURRENT_PG_VERSION" ]]; then
    msg_info "Restoring PostgreSQL databases from backup..."
    $STD runuser -u postgres -- psql </var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql 2>/dev/null || {
      msg_warn "Failed to restore database backup - this may be expected for major version upgrades"
    }
  fi

  $STD systemctl enable --now postgresql 2>/dev/null || true

  # Add PostgreSQL binaries to PATH
  if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then
    echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${PG_VERSION}"'/bin"' >/etc/environment
  fi

  cache_installed_version "postgresql" "$PG_VERSION"
  msg_ok "Setup PostgreSQL $PG_VERSION"

  # Install optional modules
  if [[ -n "$PG_MODULES" ]]; then
    IFS=',' read -ra MODULES <<<"$PG_MODULES"
    for module in "${MODULES[@]}"; do
      $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true
    done
  fi
}

# ------------------------------------------------------------------------------
# Creates PostgreSQL database with user and optional extensions
#
# Description:
#   - Creates PostgreSQL role with login and password
#   - Creates database with UTF8 encoding and template0
#   - Installs optional extensions (postgis, pgvector, etc.)
#   - Configures ALTER ROLE settings for Django/Rails compatibility
#   - Saves credentials to file
#   - Exports variables for use in calling script
#
# Usage:
#   PG_DB_NAME="myapp_db" PG_DB_USER="myapp_user" setup_postgresql_db
#   PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_EXTENSIONS="pgvector" setup_postgresql_db
#   PG_DB_NAME="ghostfolio" PG_DB_USER="ghostfolio" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db
#   PG_DB_NAME="adventurelog" PG_DB_USER="adventurelog" PG_DB_EXTENSIONS="postgis" setup_postgresql_db
#
# Variables:
#   PG_DB_NAME             - Database name (required)
#   PG_DB_USER             - Database user (required)
#   PG_DB_PASS             - Database password (optional, auto-generated if empty)
#   PG_DB_EXTENSIONS       - Comma-separated list of extensions (optional, e.g. "postgis,pgvector")
#   PG_DB_GRANT_SUPERUSER  - Grant SUPERUSER privilege (optional, "true" to enable, security risk!)
#   PG_DB_SCHEMA_PERMS     - Grant schema-level permissions (optional, "true" to enable)
#   PG_DB_SKIP_ALTER_ROLE  - Skip ALTER ROLE settings (optional, "true" to skip)
#   PG_DB_CREDS_FILE       - Credentials file path (optional, default: ~/${APPLICATION}.creds)
#
# Exports:
#   PG_DB_NAME, PG_DB_USER, PG_DB_PASS - For use in calling script
# ------------------------------------------------------------------------------

function setup_postgresql_db() {
  # Validation
  if [[ -z "${PG_DB_NAME:-}" || -z "${PG_DB_USER:-}" ]]; then
    msg_error "PG_DB_NAME and PG_DB_USER must be set before calling setup_postgresql_db"
    return 1
  fi

  # Generate password if not provided
  if [[ -z "${PG_DB_PASS:-}" ]]; then
    PG_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
  fi

  msg_info "Setting up PostgreSQL Database"
  $STD sudo -u postgres psql -c "CREATE ROLE $PG_DB_USER WITH LOGIN PASSWORD '$PG_DB_PASS';"
  $STD sudo -u postgres psql -c "CREATE DATABASE $PG_DB_NAME WITH OWNER $PG_DB_USER ENCODING 'UTF8' TEMPLATE template0;"

  # Install extensions (comma-separated)
  if [[ -n "${PG_DB_EXTENSIONS:-}" ]]; then
    IFS=',' read -ra EXT_LIST <<<"${PG_DB_EXTENSIONS:-}"
    for ext in "${EXT_LIST[@]}"; do
      ext=$(echo "$ext" | xargs) # Trim whitespace
      $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "CREATE EXTENSION IF NOT EXISTS $ext;"
    done
  fi

  # ALTER ROLE settings for Django/Rails compatibility (unless skipped)
  if [[ "${PG_DB_SKIP_ALTER_ROLE:-}" != "true" ]]; then
    $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET client_encoding TO 'utf8';"
    $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET default_transaction_isolation TO 'read committed';"
    $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET timezone TO 'UTC';"
  fi

  # Schema permissions (if requested)
  if [[ "${PG_DB_SCHEMA_PERMS:-}" == "true" ]]; then
    $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME TO $PG_DB_USER;"
    $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER CREATEDB;"
    $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON SCHEMA public TO $PG_DB_USER;"
    $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT CREATE ON SCHEMA public TO $PG_DB_USER;"
    $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO $PG_DB_USER;"
    $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO $PG_DB_USER;"
  fi

  # Superuser grant (if requested - WARNING!)
  if [[ "${PG_DB_GRANT_SUPERUSER:-}" == "true" ]]; then
    msg_warn "Granting SUPERUSER privilege (security risk!)"
    $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME to $PG_DB_USER;"
    $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER WITH SUPERUSER;"
  fi

  # Save credentials
  local app_name="${APPLICATION,,}"
  local CREDS_FILE="${PG_DB_CREDS_FILE:-${HOME}/${app_name}.creds}"
  {
    echo "PostgreSQL Credentials"
    echo "Database: $PG_DB_NAME"
    echo "User: $PG_DB_USER"
    echo "Password: $PG_DB_PASS"
  } >>"$CREDS_FILE"

  msg_ok "Set up PostgreSQL Database"

  # Export for use in calling script
  export PG_DB_NAME
  export PG_DB_USER
  export PG_DB_PASS
}

# ------------------------------------------------------------------------------
# Installs rbenv and ruby-build, installs Ruby and optionally Rails.
#
# Description:
#   - Downloads rbenv and ruby-build from GitHub
#   - Compiles and installs target Ruby version
#   - Optionally installs Rails via gem
#
# Variables:
#   RUBY_VERSION         - Ruby version to install (default: 3.4.4)
#   RUBY_INSTALL_RAILS   - true/false to install Rails (default: true)
# ------------------------------------------------------------------------------

function setup_ruby() {
  local RUBY_VERSION="${RUBY_VERSION:-3.4.4}"
  local RUBY_INSTALL_RAILS="${RUBY_INSTALL_RAILS:-true}"
  local RBENV_DIR="$HOME/.rbenv"
  local RBENV_BIN="$RBENV_DIR/bin/rbenv"
  local PROFILE_FILE="$HOME/.profile"
  local TMP_DIR=$(mktemp -d)

  # Get currently installed Ruby version
  local CURRENT_RUBY_VERSION=""
  if [[ -x "$RBENV_BIN" ]]; then
    CURRENT_RUBY_VERSION=$("$RBENV_BIN" global 2>/dev/null || echo "")
  fi

  # Scenario 1: Already at correct Ruby version
  if [[ "$CURRENT_RUBY_VERSION" == "$RUBY_VERSION" ]]; then
    msg_info "Update Ruby $RUBY_VERSION"
    cache_installed_version "ruby" "$RUBY_VERSION"
    msg_ok "Update Ruby $RUBY_VERSION"
    return 0
  fi

  # Scenario 2: Different version - reinstall
  if [[ -n "$CURRENT_RUBY_VERSION" ]]; then
    msg_info "Upgrade Ruby from $CURRENT_RUBY_VERSION to $RUBY_VERSION"
  else
    msg_info "Setup Ruby $RUBY_VERSION"
  fi

  ensure_apt_working || return 1

  # Install build dependencies with fallbacks
  local ruby_deps=()
  local dep_variations=(
    "jq"
    "autoconf"
    "patch"
    "build-essential"
    "libssl-dev"
    "libyaml-dev"
    "libreadline-dev|libreadline6-dev"
    "zlib1g-dev"
    "libgmp-dev"
    "libncurses-dev|libncurses5-dev"
    "libffi-dev"
    "libgdbm-dev"
    "libdb-dev"
    "uuid-dev"
  )

  for dep_pattern in "${dep_variations[@]}"; do
    if [[ "$dep_pattern" == *"|"* ]]; then
      IFS='|' read -ra variations <<<"$dep_pattern"
      for var in "${variations[@]}"; do
        if apt-cache search "^${var}$" 2>/dev/null | grep -q .; then
          ruby_deps+=("$var")
          break
        fi
      done
    else
      if apt-cache search "^${dep_pattern}$" 2>/dev/null | grep -q .; then
        ruby_deps+=("$dep_pattern")
      fi
    fi
  done

  if [[ ${#ruby_deps[@]} -gt 0 ]]; then
    $STD apt install -y "${ruby_deps[@]}" 2>/dev/null || true
  else
    msg_error "No Ruby build dependencies available"
    rm -rf "$TMP_DIR"
    return 1
  fi

  # Download and build rbenv if needed
  if [[ ! -x "$RBENV_BIN" ]]; then
    local RBENV_RELEASE
    local rbenv_json
    rbenv_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/rbenv/releases/latest 2>/dev/null || echo "")

    if [[ -z "$rbenv_json" ]]; then
      msg_error "Failed to fetch latest rbenv version from GitHub"
      rm -rf "$TMP_DIR"
      return 1
    fi

    RBENV_RELEASE=$(echo "$rbenv_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")

    if [[ -z "$RBENV_RELEASE" ]]; then
      msg_error "Could not parse rbenv version from GitHub response"
      rm -rf "$TMP_DIR"
      return 1
    fi

    curl -fsSL "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" -o "$TMP_DIR/rbenv.tar.gz" || {
      msg_error "Failed to download rbenv"
      rm -rf "$TMP_DIR"
      return 1
    }

    tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR" || {
      msg_error "Failed to extract rbenv"
      rm -rf "$TMP_DIR"
      return 1
    }

    mkdir -p "$RBENV_DIR"
    cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/"
    (cd "$RBENV_DIR" && src/configure && $STD make -C src) || {
      msg_error "Failed to build rbenv"
      rm -rf "$TMP_DIR"
      return 1
    }

    # Setup profile
    if ! grep -q 'rbenv init' "$PROFILE_FILE" 2>/dev/null; then
      echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE"
      echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE"
    fi
  fi

  # Install ruby-build plugin
  if [[ ! -d "$RBENV_DIR/plugins/ruby-build" ]]; then
    local RUBY_BUILD_RELEASE
    local ruby_build_json
    ruby_build_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/ruby-build/releases/latest 2>/dev/null || echo "")

    if [[ -z "$ruby_build_json" ]]; then
      msg_error "Failed to fetch latest ruby-build version from GitHub"
      rm -rf "$TMP_DIR"
      return 1
    fi

    RUBY_BUILD_RELEASE=$(echo "$ruby_build_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")

    if [[ -z "$RUBY_BUILD_RELEASE" ]]; then
      msg_error "Could not parse ruby-build version from GitHub response"
      rm -rf "$TMP_DIR"
      return 1
    fi

    curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" -o "$TMP_DIR/ruby-build.tar.gz" || {
      msg_error "Failed to download ruby-build"
      rm -rf "$TMP_DIR"
      return 1
    }

    tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR" || {
      msg_error "Failed to extract ruby-build"
      rm -rf "$TMP_DIR"
      return 1
    }

    mkdir -p "$RBENV_DIR/plugins/ruby-build"
    cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/"
  fi

  # Setup PATH and install Ruby version
  export PATH="$RBENV_DIR/bin:$PATH"
  eval "$("$RBENV_BIN" init - bash)" 2>/dev/null || true

  if ! "$RBENV_BIN" versions --bare 2>/dev/null | grep -qx "$RUBY_VERSION"; then
    $STD "$RBENV_BIN" install "$RUBY_VERSION" || {
      msg_error "Failed to install Ruby $RUBY_VERSION"
      rm -rf "$TMP_DIR"
      return 1
    }
  fi

  "$RBENV_BIN" global "$RUBY_VERSION" || {
    msg_error "Failed to set Ruby $RUBY_VERSION as global version"
    rm -rf "$TMP_DIR"
    return 1
  }

  hash -r

  # Install Rails if requested
  if [[ "$RUBY_INSTALL_RAILS" == "true" ]]; then
    $STD gem install rails || {
      msg_warn "Failed to install Rails - Ruby installation successful"
    }
  fi

  rm -rf "$TMP_DIR"
  cache_installed_version "ruby" "$RUBY_VERSION"
  msg_ok "Setup Ruby $RUBY_VERSION"
}

# ------------------------------------------------------------------------------
# Installs or upgrades ClickHouse database server.
#
# Description:
#   - Adds ClickHouse official repository
#   - Installs specified version
#   - Configures systemd service
#   - Supports Debian/Ubuntu with fallback mechanism
#
# Variables:
#   CLICKHOUSE_VERSION  - ClickHouse version to install (default: latest)
# ------------------------------------------------------------------------------

function setup_clickhouse() {
  local CLICKHOUSE_VERSION="${CLICKHOUSE_VERSION:-latest}"
  local DISTRO_ID DISTRO_CODENAME
  DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
  DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

  # Resolve "latest" version
  if [[ "$CLICKHOUSE_VERSION" == "latest" ]]; then
    CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://packages.clickhouse.com/tgz/stable/ 2>/dev/null |
      grep -oP 'clickhouse-common-static-\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' |
      sort -V | tail -n1 || echo "")

    # Fallback to GitHub API if package server failed
    if [[ -z "$CLICKHOUSE_VERSION" ]]; then
      CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://api.github.com/repos/ClickHouse/ClickHouse/releases/latest 2>/dev/null |
        grep -oP '"tag_name":\s*"v\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1 || echo "")
    fi

    [[ -z "$CLICKHOUSE_VERSION" ]] && {
      msg_error "Could not determine latest ClickHouse version from any source"
      return 1
    }
  fi

  # Get currently installed version
  local CURRENT_VERSION=""
  if command -v clickhouse-server >/dev/null 2>&1; then
    CURRENT_VERSION=$(clickhouse-server --version 2>/dev/null | grep -oP 'version \K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1)
  fi

  # Scenario 1: Already at target version - just update packages
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$CLICKHOUSE_VERSION" ]]; then
    msg_info "Update ClickHouse $CLICKHOUSE_VERSION"
    ensure_apt_working || return 1

    # Perform upgrade with retry logic (non-fatal if fails)
    upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || true
    cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION"
    msg_ok "Update ClickHouse $CLICKHOUSE_VERSION"
    return 0
  fi

  # Scenario 2: Different version - clean upgrade
  if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$CLICKHOUSE_VERSION" ]]; then
    msg_info "Upgrade ClickHouse from $CURRENT_VERSION to $CLICKHOUSE_VERSION"
    stop_all_services "clickhouse-server"
    remove_old_tool_version "clickhouse"
  else
    msg_info "Setup ClickHouse $CLICKHOUSE_VERSION"
  fi

  ensure_dependencies apt-transport-https ca-certificates dirmngr gnupg

  # Prepare repository (cleanup + validation)
  prepare_repository_setup "clickhouse" || {
    msg_error "Failed to prepare ClickHouse repository"
    return 1
  }

  # Setup repository (ClickHouse uses 'stable' suite)
  setup_deb822_repo \
    "clickhouse" \
    "https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key" \
    "https://packages.clickhouse.com/deb" \
    "stable" \
    "main"

  # Install packages with retry logic
  export DEBIAN_FRONTEND=noninteractive
  $STD apt update || {
    msg_error "APT update failed for ClickHouse repository"
    return 1
  }

  install_packages_with_retry "clickhouse-server" "clickhouse-client" || {
    msg_error "Failed to install ClickHouse packages"
    return 1
  }

  # Verify installation
  if ! command -v clickhouse-server >/dev/null 2>&1; then
    msg_error "ClickHouse installation completed but clickhouse-server command not found"
    return 1
  fi

  # Setup data directory
  mkdir -p /var/lib/clickhouse
  if id clickhouse >/dev/null 2>&1; then
    chown -R clickhouse:clickhouse /var/lib/clickhouse
  fi

  # Enable and start service
  $STD systemctl enable clickhouse-server || {
    msg_warn "Failed to enable clickhouse-server service"
  }
  safe_service_restart clickhouse-server || true

  cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION"
  msg_ok "Setup ClickHouse $CLICKHOUSE_VERSION"
}

# ------------------------------------------------------------------------------
# Installs Rust toolchain and optional global crates via cargo.
#
# Description:
#   - Installs rustup (if missing)
#   - Installs or updates desired Rust toolchain (stable, nightly, or versioned)
#   - Installs or updates specified global crates using `cargo install`
#
# Notes:
#   - Skips crate install if exact version is already present
#   - Updates crate if newer version or different version is requested
#
# Variables:
#   RUST_TOOLCHAIN  - Rust toolchain to install (default: stable)
#   RUST_CRATES     - Comma-separated list of crates (e.g. "cargo-edit,wasm-pack@0.12.1")
# ------------------------------------------------------------------------------

function setup_rust() {
  local RUST_TOOLCHAIN="${RUST_TOOLCHAIN:-stable}"
  local RUST_CRATES="${RUST_CRATES:-}"
  local CARGO_BIN="${HOME}/.cargo/bin"

  # Get currently installed version
  local CURRENT_VERSION=""
  if command -v rustc &>/dev/null; then
    CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
  fi

  # Scenario 1: Rustup not installed - fresh install
  if ! command -v rustup &>/dev/null; then
    msg_info "Setup Rust ($RUST_TOOLCHAIN)"
    curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN" || {
      msg_error "Failed to install Rust"
      return 1
    }
    export PATH="$CARGO_BIN:$PATH"
    echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile"

    # Verify installation
    if ! command -v rustc >/dev/null 2>&1; then
      msg_error "Rust binary not found after installation"
      return 1
    fi

    local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
    if [[ -z "$RUST_VERSION" ]]; then
      msg_error "Failed to determine Rust version"
      return 1
    fi

    cache_installed_version "rust" "$RUST_VERSION"
    msg_ok "Setup Rust $RUST_VERSION"
  else
    # Scenario 2: Rustup already installed - update/maintain
    msg_info "Update Rust ($RUST_TOOLCHAIN)"

    # Ensure default toolchain is set
    $STD rustup default "$RUST_TOOLCHAIN" 2>/dev/null || {
      # If default fails, install the toolchain first
      $STD rustup install "$RUST_TOOLCHAIN" || {
        msg_error "Failed to install Rust toolchain $RUST_TOOLCHAIN"
        return 1
      }
      $STD rustup default "$RUST_TOOLCHAIN" || {
        msg_error "Failed to set default Rust toolchain"
        return 1
      }
    }

    # Update to latest patch version
    $STD rustup update "$RUST_TOOLCHAIN" </dev/null || true

    # Ensure PATH is updated for current shell session
    export PATH="$CARGO_BIN:$PATH"

    local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
    if [[ -z "$RUST_VERSION" ]]; then
      msg_error "Failed to determine Rust version after update"
      return 1
    fi

    cache_installed_version "rust" "$RUST_VERSION"
    msg_ok "Update Rust $RUST_VERSION"
  fi

  # Install global crates
  if [[ -n "$RUST_CRATES" ]]; then
    msg_info "Processing Rust crates: $RUST_CRATES"
    IFS=',' read -ra CRATES <<<"$RUST_CRATES"
    for crate in "${CRATES[@]}"; do
      crate=$(echo "$crate" | xargs) # trim whitespace
      [[ -z "$crate" ]] && continue  # skip empty entries

      local NAME VER INSTALLED_VER CRATE_LIST
      if [[ "$crate" == *"@"* ]]; then
        NAME="${crate%@*}"
        VER="${crate##*@}"
      else
        NAME="$crate"
        VER=""
      fi

      # Get list of installed crates once
      CRATE_LIST=$(cargo install --list 2>/dev/null || echo "")

      # Check if already installed
      if echo "$CRATE_LIST" | grep -q "^${NAME} "; then
        INSTALLED_VER=$(echo "$CRATE_LIST" | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:')

        if [[ -n "$VER" && "$VER" != "$INSTALLED_VER" ]]; then
          msg_info "Upgrading $NAME from v$INSTALLED_VER to v$VER"
          $STD cargo install "$NAME" --version "$VER" --force || {
            msg_error "Failed to install $NAME@$VER"
            return 1
          }
          msg_ok "Upgraded $NAME to v$VER"
        elif [[ -z "$VER" ]]; then
          msg_info "Upgrading $NAME to latest"
          $STD cargo install "$NAME" --force || {
            msg_error "Failed to upgrade $NAME"
            return 1
          }
          local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:')
          msg_ok "Upgraded $NAME to v$NEW_VER"
        else
          msg_ok "$NAME v$INSTALLED_VER already installed"
        fi
      else
        msg_info "Installing $NAME${VER:+@$VER}"
        if [[ -n "$VER" ]]; then
          $STD cargo install "$NAME" --version "$VER" || {
            msg_error "Failed to install $NAME@$VER"
            return 1
          }
          msg_ok "Installed $NAME v$VER"
        else
          $STD cargo install "$NAME" || {
            msg_error "Failed to install $NAME"
            return 1
          }
          local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:')
          msg_ok "Installed $NAME v$NEW_VER"
        fi
      fi
    done
    msg_ok "Processed Rust crates"
  fi
}

# ------------------------------------------------------------------------------
# Installs or upgrades uv (Python package manager) from GitHub releases.
#   - Downloads platform-specific tarball (no install.sh!)
#   - Extracts uv binary
#   - Places it in /usr/local/bin
#   - Optionally installs a specific Python version via uv
# ------------------------------------------------------------------------------

function setup_uv() {
  local UV_BIN="/usr/local/bin/uv"
  local UVX_BIN="/usr/local/bin/uvx"
  local TMP_DIR=$(mktemp -d)
  local CACHED_VERSION

  # trap for TMP Cleanup
  trap "rm -rf '$TMP_DIR'" EXIT

  CACHED_VERSION=$(get_cached_version "uv")

  # Architecture Detection
  local ARCH=$(uname -m)
  local OS_TYPE=""
  local UV_TAR=""

  if grep -qi "alpine" /etc/os-release; then
    OS_TYPE="musl"
  else
    OS_TYPE="gnu"
  fi

  case "$ARCH" in
  x86_64)
    UV_TAR="uv-x86_64-unknown-linux-${OS_TYPE}.tar.gz"
    ;;
  aarch64)
    UV_TAR="uv-aarch64-unknown-linux-${OS_TYPE}.tar.gz"
    ;;
  i686)
    UV_TAR="uv-i686-unknown-linux-${OS_TYPE}.tar.gz"
    ;;
  *)
    msg_error "Unsupported architecture: $ARCH (supported: x86_64, aarch64, i686)"
    return 1
    ;;
  esac

  ensure_dependencies jq

  # Fetch latest version
  local releases_json
  releases_json=$(curl -fsSL --max-time 15 \
    "https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "")

  if [[ -z "$releases_json" ]]; then
    msg_error "Could not fetch latest uv version from GitHub API"
    return 1
  fi

  local LATEST_VERSION
  LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//')

  if [[ -z "$LATEST_VERSION" ]]; then
    msg_error "Could not parse uv version from GitHub API response"
    return 1
  fi

  # Get currently installed version
  local INSTALLED_VERSION=""
  if [[ -x "$UV_BIN" ]]; then
    INSTALLED_VERSION=$("$UV_BIN" --version 2>/dev/null | awk '{print $2}')
  fi

  # Scenario 1: Already at latest version
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
    cache_installed_version "uv" "$LATEST_VERSION"

    # Check if uvx is needed and missing
    if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then
      msg_info "Installing uvx wrapper"
      _install_uvx_wrapper || return 1
      msg_ok "uvx wrapper installed"
    fi

    return 0
  fi

  # Scenario 2: New install or upgrade
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then
    msg_info "Upgrade uv from $INSTALLED_VERSION to $LATEST_VERSION"
  else
    msg_info "Setup uv $LATEST_VERSION"
  fi

  local UV_URL="https://github.com/astral-sh/uv/releases/download/${LATEST_VERSION}/${UV_TAR}"

  $STD curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz" || {
    msg_error "Failed to download uv from $UV_URL"
    return 1
  }

  # Extract
  $STD tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR" || {
    msg_error "Failed to extract uv"
    return 1
  }

  # Find and install uv binary (tarball extracts to uv-VERSION-ARCH/ directory)
  local UV_BINARY=$(find "$TMP_DIR" -name "uv" -type f -executable | head -n1)
  if [[ ! -f "$UV_BINARY" ]]; then
    msg_error "Could not find uv binary in extracted tarball"
    return 1
  fi

  $STD install -m 755 "$UV_BINARY" "$UV_BIN" || {
    msg_error "Failed to install uv binary"
    return 1
  }

  ensure_usr_local_bin_persist
  export PATH="/usr/local/bin:$PATH"

  # Optional: Install uvx wrapper
  if [[ "${USE_UVX:-NO}" == "YES" ]]; then
    msg_info "Installing uvx wrapper"
    _install_uvx_wrapper || {
      msg_error "Failed to install uvx wrapper"
      return 1
    }
    msg_ok "uvx wrapper installed"
  fi

  # Optional: Generate shell completions
  $STD uv generate-shell-completion bash >/etc/bash_completion.d/uv 2>/dev/null || true
  if [[ -d /usr/share/zsh/site-functions ]]; then
    $STD uv generate-shell-completion zsh >/usr/share/zsh/site-functions/_uv 2>/dev/null || true
  fi

  # Optional: Install specific Python version if requested
  if [[ -n "${PYTHON_VERSION:-}" ]]; then
    msg_info "Installing Python $PYTHON_VERSION via uv"
    $STD uv python install "$PYTHON_VERSION" || {
      msg_error "Failed to install Python $PYTHON_VERSION"
      return 1
    }
    msg_ok "Python $PYTHON_VERSION installed"
  fi

  cache_installed_version "uv" "$LATEST_VERSION"
  msg_ok "Setup uv $LATEST_VERSION"
}

# Helper function to install uvx wrapper
_install_uvx_wrapper() {
  local UVX_BIN="/usr/local/bin/uvx"

  cat >"$UVX_BIN" <<'EOF'
#!/bin/bash
# uvx - Run Python applications from PyPI as command-line tools
# Wrapper for: uv tool run
exec /usr/local/bin/uv tool run "$@"
EOF

  chmod +x "$UVX_BIN"
  return 0
}

# ------------------------------------------------------------------------------
# Installs or updates yq (mikefarah/yq - Go version).
#
# Description:
#   - Checks if yq is installed and from correct source
#   - Compares with latest release on GitHub
#   - Updates if outdated or wrong implementation
# ------------------------------------------------------------------------------

function setup_yq() {
  local TMP_DIR=$(mktemp -d)
  local BINARY_PATH="/usr/local/bin/yq"
  local GITHUB_REPO="mikefarah/yq"

  ensure_dependencies jq
  ensure_usr_local_bin_persist

  # Remove non-mikefarah implementations
  if command -v yq &>/dev/null; then
    if ! yq --version 2>&1 | grep -q 'mikefarah'; then
      rm -f "$(command -v yq)"
    fi
  fi

  local LATEST_VERSION
  local releases_json
  releases_json=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" 2>/dev/null || echo "")

  if [[ -z "$releases_json" ]]; then
    msg_error "Could not fetch latest yq version from GitHub API"
    rm -rf "$TMP_DIR"
    return 1
  fi

  LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")

  if [[ -z "$LATEST_VERSION" ]]; then
    msg_error "Could not parse yq version from GitHub API response"
    rm -rf "$TMP_DIR"
    return 1
  fi

  # Get currently installed version
  local INSTALLED_VERSION=""
  if command -v yq &>/dev/null && yq --version 2>&1 | grep -q 'mikefarah'; then
    INSTALLED_VERSION=$(yq --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//')
  fi

  # Scenario 1: Already at latest version
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
    cache_installed_version "yq" "$LATEST_VERSION"
    rm -rf "$TMP_DIR"
    return 0
  fi

  # Scenario 2: New install or upgrade
  if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then
    msg_info "Upgrade yq from $INSTALLED_VERSION to $LATEST_VERSION"
  else
    msg_info "Setup yq $LATEST_VERSION"
  fi

  curl -fsSL "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" -o "$TMP_DIR/yq" || {
    msg_error "Failed to download yq"
    rm -rf "$TMP_DIR"
    return 1
  }

  chmod +x "$TMP_DIR/yq"
  mv "$TMP_DIR/yq" "$BINARY_PATH" || {
    msg_error "Failed to install yq"
    rm -rf "$TMP_DIR"
    return 1
  }

  rm -rf "$TMP_DIR"
  hash -r

  local FINAL_VERSION
  FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//')
  cache_installed_version "yq" "$FINAL_VERSION"
  msg_ok "Setup yq $FINAL_VERSION"
}

# ------------------------------------------------------------------------------
# Docker Engine Installation and Management (All-In-One)
#
# Description:
#   - Detects and migrates old Docker installations
#   - Installs/Updates Docker Engine via official repository
#   - Optional: Installs/Updates Portainer CE
#   - Updates running containers interactively
#   - Cleans up legacy repository files
#
# Usage:
#   setup_docker
#   DOCKER_PORTAINER="true" setup_docker
#   DOCKER_LOG_DRIVER="json-file" setup_docker
#
# Variables:
#   DOCKER_PORTAINER       - Install Portainer CE (optional, "true" to enable)
#   DOCKER_LOG_DRIVER      - Log driver (optional, default: "journald")
#   DOCKER_SKIP_UPDATES    - Skip container update check (optional, "true" to skip)
#
# Features:
#   - Migrates from get.docker.com to repository-based installation
#   - Updates Docker Engine if newer version available
#   - Interactive container update with multi-select
#   - Portainer installation and update support
# ------------------------------------------------------------------------------
function setup_docker() {
  local docker_installed=false
  local portainer_installed=false

  # Check if Docker is already installed
  if command -v docker &>/dev/null; then
    docker_installed=true
    DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
    msg_info "Docker $DOCKER_CURRENT_VERSION detected"
  fi

  # Check if Portainer is running
  if docker ps --format '{{.Names}}' 2>/dev/null | grep -q '^portainer$'; then
    portainer_installed=true
    msg_info "Portainer container detected"
  fi

  # Cleanup old repository configurations
  if [ -f /etc/apt/sources.list.d/docker.list ]; then
    msg_info "Migrating from old Docker repository format"
    rm -f /etc/apt/sources.list.d/docker.list
    rm -f /etc/apt/keyrings/docker.asc
  fi

  # Setup/Update Docker repository
  msg_info "Setting up Docker Repository"
  setup_deb822_repo \
    "docker" \
    "https://download.docker.com/linux/$(get_os_info id)/gpg" \
    "https://download.docker.com/linux/$(get_os_info id)" \
    "$(get_os_info codename)" \
    "stable" \
    "$(dpkg --print-architecture)"

  # Install or upgrade Docker
  if [ "$docker_installed" = true ]; then
    msg_info "Checking for Docker updates"
    DOCKER_LATEST_VERSION=$(apt-cache policy docker-ce | grep Candidate | awk '{print $2}' | cut -d':' -f2 | cut -d'-' -f1)

    if [ "$DOCKER_CURRENT_VERSION" != "$DOCKER_LATEST_VERSION" ]; then
      msg_info "Updating Docker $DOCKER_CURRENT_VERSION → $DOCKER_LATEST_VERSION"
      $STD apt install -y --only-upgrade \
        docker-ce \
        docker-ce-cli \
        containerd.io \
        docker-buildx-plugin \
        docker-compose-plugin
      msg_ok "Updated Docker to $DOCKER_LATEST_VERSION"
    else
      msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)"
    fi
  else
    msg_info "Installing Docker"
    $STD apt install -y \
      docker-ce \
      docker-ce-cli \
      containerd.io \
      docker-buildx-plugin \
      docker-compose-plugin

    DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
    msg_ok "Installed Docker $DOCKER_CURRENT_VERSION"
  fi

  # Configure daemon.json
  local log_driver="${DOCKER_LOG_DRIVER:-journald}"
  mkdir -p /etc/docker
  if [ ! -f /etc/docker/daemon.json ]; then
    cat <<EOF >/etc/docker/daemon.json
{
  "log-driver": "$log_driver"
}
EOF
  fi

  # Enable and start Docker
  systemctl enable -q --now docker

  # Portainer Management
  if [[ "${DOCKER_PORTAINER:-}" == "true" ]]; then
    if [ "$portainer_installed" = true ]; then
      msg_info "Checking for Portainer updates"
      PORTAINER_CURRENT=$(docker inspect portainer --format='{{.Config.Image}}' 2>/dev/null | cut -d':' -f2)
      PORTAINER_LATEST=$(curl -fsSL https://registry.hub.docker.com/v2/repositories/portainer/portainer-ce/tags?page_size=100 | grep -oP '"name":"\K[0-9]+\.[0-9]+\.[0-9]+"' | head -1 | tr -d '"')

      if [ "$PORTAINER_CURRENT" != "$PORTAINER_LATEST" ]; then
        read -r -p "${TAB3}Update Portainer $PORTAINER_CURRENT → $PORTAINER_LATEST? <y/N> " prompt
        if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
          msg_info "Updating Portainer"
          docker stop portainer
          docker rm portainer
          docker pull portainer/portainer-ce:latest
          docker run -d \
            -p 9000:9000 \
            -p 9443:9443 \
            --name=portainer \
            --restart=always \
            -v /var/run/docker.sock:/var/run/docker.sock \
            -v portainer_data:/data \
            portainer/portainer-ce:latest
          msg_ok "Updated Portainer to $PORTAINER_LATEST"
        fi
      else
        msg_ok "Portainer is up-to-date ($PORTAINER_CURRENT)"
      fi
    else
      msg_info "Installing Portainer"
      docker volume create portainer_data
      docker run -d \
        -p 9000:9000 \
        -p 9443:9443 \
        --name=portainer \
        --restart=always \
        -v /var/run/docker.sock:/var/run/docker.sock \
        -v portainer_data:/data \
        portainer/portainer-ce:latest

      LOCAL_IP=$(hostname -I | awk '{print $1}')
      msg_ok "Installed Portainer (http://${LOCAL_IP}:9000)"
    fi
  fi

  # Interactive Container Update Check
  if [[ "${DOCKER_SKIP_UPDATES:-}" != "true" ]] && [ "$docker_installed" = true ]; then
    msg_info "Checking for container updates"

    # Get list of running containers with update status
    local containers_with_updates=()
    local container_info=()
    local index=1

    while IFS= read -r container; do
      local name=$(echo "$container" | awk '{print $1}')
      local image=$(echo "$container" | awk '{print $2}')
      local current_digest=$(docker inspect "$name" --format='{{.Image}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12)

      # Pull latest image digest
      docker pull "$image" >/dev/null 2>&1
      local latest_digest=$(docker inspect "$image" --format='{{.Id}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12)

      if [ "$current_digest" != "$latest_digest" ]; then
        containers_with_updates+=("$name")
        container_info+=("${index}) ${name} (${image})")
        ((index++))
      fi
    done < <(docker ps --format '{{.Names}} {{.Image}}')

    if [ ${#containers_with_updates[@]} -gt 0 ]; then
      echo ""
      echo "${TAB3}Container updates available:"
      for info in "${container_info[@]}"; do
        echo "${TAB3}  $info"
      done
      echo ""
      read -r -p "${TAB3}Select containers to update (e.g., 1,3,5 or 'all' or 'none'): " selection

      if [[ ${selection,,} == "all" ]]; then
        for container in "${containers_with_updates[@]}"; do
          msg_info "Updating container: $container"
          docker stop "$container"
          docker rm "$container"
          # Note: This requires the original docker run command - best to recreate via compose
          msg_ok "Stopped and removed $container (please recreate with updated image)"
        done
      elif [[ ${selection,,} != "none" ]]; then
        IFS=',' read -ra SELECTED <<<"$selection"
        for num in "${SELECTED[@]}"; do
          num=$(echo "$num" | xargs) # trim whitespace
          if [[ "$num" =~ ^[0-9]+$ ]] && [ "$num" -ge 1 ] && [ "$num" -le "${#containers_with_updates[@]}" ]; then
            container="${containers_with_updates[$((num - 1))]}"
            msg_info "Updating container: $container"
            docker stop "$container"
            docker rm "$container"
            msg_ok "Stopped and removed $container (please recreate with updated image)"
          fi
        done
      fi
    else
      msg_ok "All containers are up-to-date"
    fi
  fi

  msg_ok "Docker setup completed"
}
