#!/bin/bash

# Copyright 2025 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Install KServe Knative Mode and all related dependencies using helm.
#
# AUTO-GENERATED from: kserve-knative-mode-full-install.definition
# DO NOT EDIT MANUALLY
#
# To regenerate:
#   ./scripts/generate-install-script.py kserve-knative-mode-full-install.definition
#
# Usage: kserve-knative-mode-full-install.sh [--reinstall|--uninstall]

set -o errexit
set -o nounset
set -o pipefail

#================================================
# Helper Functions (from common.sh)
#================================================

# Utility Functions
# ============================================================================

find_repo_root() {
    local current_dir="${1:-$(pwd)}"
    local skip="${2:-false}"

    while [[ "$current_dir" != "/" ]]; do
        if [[ -d "${current_dir}/.git" ]]; then
            echo "$current_dir"
            return 0
        fi
        current_dir="$(dirname "$current_dir")"
    done

    # Git repository not found
    if [[ "$skip" == "true" ]]; then
        log_warning "Could not find git repository root, using current directory: $PWD"
        echo "$PWD"
        return 0
    else
        log_error "Could not find git repository root"
        exit 1
    fi
}

ensure_dir() {
    local dir_path="${1}"

    if [[ -d "${dir_path}" ]]; then
        return 0
    fi

    mkdir -p "${dir_path}"
}

detect_os() {
    local os=""
    case "$(uname -s)" in
        Linux*)  os="linux" ;;
        Darwin*) os="darwin" ;;
        *)       log_error "Unsupported OS detected: $(uname -s)" ; exit 1 ;;
    esac
    echo "$os"
}

detect_arch() {
    local arch=""
    case "$(uname -m)" in
        x86_64)  arch="amd64" ;;
        aarch64|arm64) arch="arm64" ;;
        *)       log_error "Unsupported architecture detected: $(uname -m)" ; exit 1 ;;
    esac
    echo "$arch"
}

cleanup_bin_dir() {
    # Remove BIN_DIR if it was created by this script
    if [[ "${BIN_DIR_CREATED_BY_SCRIPT:-false}" == "true" ]] && [[ -d "${BIN_DIR:-}" ]]; then
        log_info "Cleaning up BIN_DIR: ${BIN_DIR}"
        rm -rf "${BIN_DIR}"
    fi
}

cleanup() {
    # Call all cleanup functions
    cleanup_bin_dir
}

# Set up trap to run cleanup on exit
trap cleanup EXIT

# Color codes (disable if NO_COLOR is set or not a terminal)
if [[ -z "${NO_COLOR:-}" ]] && [[ -t 1 ]]; then
    BLUE='\033[94m'
    GREEN='\033[92m'
    RED='\033[91m'
    YELLOW='\033[93m'
    RESET='\033[0m'
else
    BLUE=''
    GREEN=''
    RED=''
    YELLOW=''
    RESET=''
fi

log_info() {
    echo -e "${BLUE}[INFO]${RESET} $*" >&2
}

log_error() {
    echo -e "${RED}[ERROR]${RESET} $*" >&2
}

log_success() {
    echo -e "${GREEN}[SUCCESS]${RESET} $*" >&2
}

log_warning() {
    echo -e "${YELLOW}[WARNING]${RESET} $*" >&2
}


# ============================================================================
# Infrastructure Installation Helper Functions
# ============================================================================

# Detect the platform (kind/minikube/openshift/kubernetes)
# Returns: kind, minikube, openshift, or kubernetes
detect_platform() {
    # Check for OpenShift
    if kubectl get clusterversion &>/dev/null; then
        echo "openshift"
        return 0
    fi

    # Check for Kind
    local node_hostname
    node_hostname=$(kubectl get nodes -o jsonpath='{.items[0].metadata.labels.kubernetes\.io/hostname}' 2>/dev/null || echo "")
    if [[ "$node_hostname" == *"kind"* ]]; then
        echo "kind"
        return 0
    fi

    # Check for Minikube
    local current_context
    current_context=$(kubectl config current-context 2>/dev/null || echo "")
    if [[ "$current_context" == *"minikube"* ]]; then
        echo "minikube"
        return 0
    fi

    # Default to standard Kubernetes
    echo "kubernetes"
    return 0
}

# Wait for pods to be created (exist)
# Usage: wait_for_pods_created <namespace> <label-selector> [timeout_seconds]
wait_for_pods_created() {
    local namespace="$1"
    local label_selector="$2"
    local timeout="${3:-60}"
    local elapsed=0

    log_info "Waiting for pods with label '$label_selector' in namespace '$namespace' to be created..."

    while true; do
        # Exclude terminating pods by filtering out Terminating status
        local pod_count=$(kubectl get pods -n "$namespace" -l "$label_selector" \
            --no-headers 2>/dev/null | grep -v "Terminating" | wc -l)

        if [ "$pod_count" -gt 0 ]; then
            log_info "Found $pod_count pod(s) with label '$label_selector'"
            return 0
        fi

        if [ $elapsed -ge $timeout ]; then
            log_error "Timeout waiting for pods with label '$label_selector' to be created"
            return 1
        fi

        sleep 2
        elapsed=$((elapsed + 2))
    done
}

# Wait for pods to be ready
# Usage: wait_for_pods_ready <namespace> <label-selector> [timeout]
wait_for_pods_ready() {
    local namespace="$1"
    local label_selector="$2"
    local timeout="${3:-180s}"

    log_info "Waiting for pods with label '$label_selector' in namespace '$namespace' to be ready..."

    # Get list of non-terminating pods and wait for them
    local pods=$(kubectl get pods -n "$namespace" -l "$label_selector" \
        --no-headers 2>/dev/null | grep -v "Terminating" | awk '{print $1}')

    if [ -z "$pods" ]; then
        log_error "No pods found with label '$label_selector' in namespace '$namespace'"
        return 1
    fi

    for pod in $pods; do
        kubectl wait --for=condition=Ready pod/"$pod" -n "$namespace" --timeout="$timeout" || return 1
    done
}

# Wait for pods to be ready (combines both creation and ready checks)
# Usage: wait_for_pods <namespace> <label-selector> [timeout]
wait_for_pods() {
    local namespace="$1"
    local label_selector="$2"
    local timeout="${3:-180s}"

    # Convert timeout to seconds for pod creation check
    local timeout_seconds="${timeout%s}"
    local timeout_created=60

    # If timeout is longer than 60s, use 60s for creation, rest for ready
    # If timeout is shorter, split it
    if [ "$timeout_seconds" -gt 60 ]; then
        timeout_created=60
    else
        timeout_created=$((timeout_seconds / 3))
    fi

    # First, wait for pods to be created
    wait_for_pods_created "$namespace" "$label_selector" "$timeout_created" || return 1

    # Then, wait for pods to be ready
    wait_for_pods_ready "$namespace" "$label_selector" "$timeout" || return 1

    log_success "Pods with label '$label_selector' in namespace '$namespace' are ready!"
}

# Wait for deployment to be available using kubectl wait
# Usage: wait_for_deployment <namespace> <deployment-name> [timeout]
# Note: This uses kubectl wait --for=condition=Available, which checks deployment status directly
wait_for_deployment() {
    local namespace="$1"
    local deployment_name="$2"
    local timeout="${3:-180s}"

    log_info "Waiting for deployment '$deployment_name' in namespace '$namespace' to be available..."
    kubectl wait --timeout="$timeout" -n "$namespace" deployment/"$deployment_name" --for=condition=Available

    if [ $? -eq 0 ]; then
        log_success "Deployment '$deployment_name' in namespace '$namespace' is available!"
    else
        log_error "Deployment '$deployment_name' in namespace '$namespace' failed to become available within $timeout"
        return 1
    fi
}

# Wait for CRD to be established
# Usage: wait_for_crd <crd-name> [timeout]
wait_for_crd() {
    local crd_name="$1"
    local timeout="${2:-60s}"

    log_info "Waiting for CRD '$crd_name' to be established..."

    # Add small delay to allow CRD status to be initialized
    sleep 2

    # Retry logic to handle race condition where .status.conditions may not be initialized yet
    local max_retries=3
    local retry=0
    while [ $retry -lt $max_retries ]; do
        if kubectl wait --for=condition=Established --timeout="$timeout" crd/"$crd_name" 2>/dev/null; then
            return 0
        fi
        retry=$((retry + 1))
        if [ $retry -lt $max_retries ]; then
            log_info "Retry $retry/$max_retries: Waiting for CRD status to be initialized..."
            sleep 3
        fi
    done

    # Final attempt with error output
    kubectl wait --for=condition=Established --timeout="$timeout" crd/"$crd_name"
}

# Wait for multiple CRDs to be established
# Usage: wait_for_crds <timeout> <crd1> <crd2> ...
wait_for_crds() {
    local timeout="$1"
    shift

    for crd in "$@"; do
        wait_for_crd "$crd" "$timeout" || return 1
    done

    log_success "All CRDs are established!"
}

# Update multiple fields in KServe inferenceservice-config ConfigMap
# Usage: update_isvc_config "ingress.enableGatewayApi=true" "deploy.defaultDeploymentMode=Standard"
# Example:
#   update_isvc_config "ingress.enableGatewayApi=true"
#   update_isvc_config "ingress.enableGatewayApi=true" "ingress.className=\"envoy\""
update_isvc_config() {
    if [ $# -eq 0 ]; then
        log_error "No update parameters provided"
        return 1
    fi

    log_info "Updating inferenceservice-config..."

    local temp=$(mktemp)
    kubectl get configmap inferenceservice-config -n "${KSERVE_NAMESPACE}" -o json > "$temp"

    # Group updates by data_key to avoid multiple updates to the same key
    declare -A data_key_updates

    for arg in "$@"; do
        local key="${arg%%=*}"
        local value="${arg#*=}"

        # Split "ingress.enableGatewayApi" -> data_key="ingress", json_path="enableGatewayApi"
        local data_key="${key%%.*}"
        local json_path="${key#*.}"

        # Append to the list of updates for this data_key
        if [ -z "${data_key_updates[$data_key]:-}" ]; then
            data_key_updates[$data_key]="$json_path=$value"
        else
            data_key_updates[$data_key]="${data_key_updates[$data_key]}|$json_path=$value"
        fi
    done

    # Process each data_key once with all its updates
    for data_key in "${!data_key_updates[@]}"; do
        # Get current JSON from data field (stored as string)
        local current=$(jq -r ".data.\"$data_key\"" "$temp")

        # Skip if the key doesn't exist or is null
        if [ "$current" = "null" ] || [ -z "$current" ]; then
            log_info "  ⊘ Skipping all updates for '$data_key' (not found in ConfigMap)"
            continue
        fi

        # Apply all updates for this data_key
        local updated="$current"
        IFS='|' read -ra updates <<< "${data_key_updates[$data_key]}"
        for update in "${updates[@]}"; do
            local json_path="${update%%=*}"
            local value="${update#*=}"

            # Smart quote handling for string values
            # If value doesn't start with " and is not a number/boolean, add double quotes
            if [[ ! $value =~ ^\" ]] && [[ ! $value =~ ^[0-9]+(\.[0-9]+)?$ ]] && [[ ! $value =~ ^(true|false|null)$ ]]; then
                value="\"$value\""
            fi

            # Check if the nested path exists, create if missing
            local parent_path="${json_path%.*}"
            if [ "$parent_path" != "$json_path" ]; then
                # There's a parent path, check if it exists
                if ! echo "$updated" | jq -e ".$parent_path" >/dev/null 2>&1; then
                    log_info "  + Creating nested path '$parent_path' in $data_key"
                    # Create all intermediate paths as empty objects
                    updated=$(echo "$updated" | jq ".$parent_path = {}")
                fi
            fi

            # Update the nested field
            updated=$(echo "$updated" | jq ".$json_path = $value")
            log_info "  ✓ $data_key.$json_path = $value"
        done

        # Put it back as JSON string (preserve pretty-print format like original ConfigMap)
        pretty_json=$(echo "$updated" | jq '.')
        jq --arg updated "$pretty_json" ".data.\"$data_key\" = \$updated" "$temp" > "$temp.new" && mv "$temp.new" "$temp"
    done

    kubectl apply -f "$temp"
    rm -f "$temp"

    log_success "ConfigMap updated successfully"
}

# Create namespace if it does not exist (skip if already exists)
# Usage: create_or_skip_namespace <namespace>
create_or_skip_namespace() {
    local namespace="$1"

    if kubectl get namespace "$namespace" &>/dev/null; then
        log_info "Namespace '$namespace' already exists"
    else
        log_info "Creating namespace '$namespace'..."
        kubectl create namespace "$namespace"
        log_success "Namespace '$namespace' created"
    fi
}

# Check if required CLI tools exist
# Usage: check_cli_exist <tool1> [tool2] [tool3] ...
check_cli_exist() {
    local missing=()
    for cmd in "$@"; do
        if ! command_exists "$cmd"; then
            missing+=("$cmd")
        fi
    done

    if [ ${#missing[@]} -gt 0 ]; then
        log_error "Required CLI tool(s) not found: ${missing[*]}"
        log_error "Please install missing tool(s) first."
        exit 1
    fi
}

command_exists() {
    command -v "$1" &>/dev/null
}

# Compare semantic versions (returns 0 if v1 >= v2, 1 otherwise)
# Usage: version_gte "v3.17.3" "v3.16.0"
# Example: version_gte "$current_version" "$required_version" && echo "OK"
version_gte() {
    [ "$1" = "$(printf '%s\n' "$1" "$2" | sort -V | tail -1)" ]
}

# ============================================================================

# Set environment variable based on priority order:
# Priority: 1. Runtime env > 2. Component env > 3. Global env > 4. Component default
# Usage: set_env_with_priority VAR_NAME COMPONENT_ENV_VALUE GLOBAL_ENV_VALUE DEFAULT_VALUE
set_env_with_priority() {
    local var_name="$1"
    local component_value="$2"
    local global_value="$3"
    local default_value="$4"

    # Get current value
    local current_value
    eval "current_value=\${${var_name}}"

    # If current value differs from default/component/global, it must be runtime - keep it
    if [ -n "$current_value" ] && [ "$current_value" != "$default_value" ] &&
       [ "$current_value" != "$component_value" ] && [ "$current_value" != "$global_value" ]; then
        # This is a runtime value, keep it
        return
    fi

    # Apply priority: component env > global env > default
    if [ -n "$component_value" ]; then
        export "$var_name=$component_value"
    elif [ -n "$global_value" ]; then
        export "$var_name=$global_value"
    fi
    # If both are empty, variable keeps its default value
}

#================================================
# Determine repository root using find_repo_root
#================================================

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-.}")" && pwd)"
REPO_ROOT="$(find_repo_root "${SCRIPT_DIR}" "true")"
export REPO_ROOT

# Set up BIN_DIR - use repo bin if it exists, otherwise use temp directory
if [[ -d "${REPO_ROOT}/bin" ]]; then
    export BIN_DIR="${REPO_ROOT}/bin"
else
    export BIN_DIR="$(mktemp -d)"
    log_info "Using temp BIN_DIR: ${BIN_DIR}"
fi

export PATH="${BIN_DIR}:${PATH}"

UNINSTALL="${UNINSTALL:-false}"
REINSTALL="${REINSTALL:-false}"

if [[ "$*" == *"--uninstall"* ]]; then
    UNINSTALL=true
elif [[ "$*" == *"--reinstall"* ]]; then
    REINSTALL=true
fi

export REINSTALL
export UNINSTALL

# RELEASE mode (from definition file)
RELEASE="true"
export RELEASE

#================================================
# Version Dependencies (from kserve-deps.env)
#================================================

GOLANGCI_LINT_VERSION=v1.64.8
CONTROLLER_TOOLS_VERSION=v0.19.0
ENVTEST_VERSION=latest
YQ_VERSION=v4.28.1
HELM_VERSION=v3.16.3
KUSTOMIZE_VERSION=v5.5.0
HELM_DOCS_VERSION=v1.12.0
BLACK_FMT_VERSION=24.3
FLAKE8_LINT_VERSION=7.1
POETRY_VERSION=1.8.3
UV_VERSION=0.7.8
CERT_MANAGER_VERSION=v1.17.0
ENVOY_GATEWAY_VERSION=v1.5.0
ENVOY_AI_GATEWAY_VERSION=v0.3.0
KNATIVE_OPERATOR_VERSION=v1.16.0
KNATIVE_SERVING_VERSION=1.15.2
KEDA_OTEL_ADDON_VERSION=v0.0.6
KSERVE_VERSION=v0.16.0
ISTIO_VERSION=1.27.1
KEDA_VERSION=2.16.1
OPENTELEMETRY_OPERATOR_VERSION=0.113.0
LWS_VERSION=v0.6.2
GATEWAY_API_VERSION=v1.2.1
GIE_VERSION=v0.3.0

#================================================
# Global Variables (from global-vars.env)
#================================================
# These provide default namespace values that can be overridden
# by environment variables or GLOBAL_ENV settings below

KEDA_NAMESPACE="${KEDA_NAMESPACE:-keda}"
KSERVE_NAMESPACE="${KSERVE_NAMESPACE:-kserve}"
OTEL_NAMESPACE="${OTEL_NAMESPACE:-opentelemetry-operator}"
OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-knative-operator}"
SERVING_NAMESPACE="${SERVING_NAMESPACE:-knative-serving}"
ISTIO_NAMESPACE="${ISTIO_NAMESPACE:-istio-system}"
GATEWAY_NAMESPACE="${GATEWAY_NAMESPACE:-kserve}"
DEPLOYMENT_MODE="${DEPLOYMENT_MODE:-Knative}"
GATEWAY_NETWORK_LAYER="${GATEWAY_NETWORK_LAYER:-false}"
LLMISVC="${LLMISVC:-false}"
EMBED_MANIFESTS="${EMBED_MANIFESTS:-false}"
KSERVE_CUSTOM_ISVC_CONFIGS="${KSERVE_CUSTOM_ISVC_CONFIGS:-}"

#================================================
# Component-Specific Variables
#================================================

ADDON_RELEASE_NAME="keda-otel-scaler"
OTEL_RELEASE_NAME="my-opentelemetry-operator"
KSERVE_CRD_RELEASE_NAME="kserve-crd"
KSERVE_RELEASE_NAME="kserve"
CRD_DIR_NAME="kserve-crd"
CORE_DIR_NAME="kserve-resources"
TARGET_DEPLOYMENT_NAMES=(
"kserve-controller-manager"
)
USE_LOCAL_CHARTS="${USE_LOCAL_CHARTS:-false}"
CHARTS_DIR="${REPO_ROOT}/charts"
SET_KSERVE_VERSION="${SET_KSERVE_VERSION:-}"

#================================================
# Component Functions
#================================================

# ----------------------------------------
# CLI/Component: helm
# ----------------------------------------



install_helm() {
    local os=$(detect_os)
    local arch=$(detect_arch)
    local archive_name="helm-${HELM_VERSION}-${os}-${arch}.tar.gz"
    local download_url="https://get.helm.sh/${archive_name}"

    log_info "Installing Helm ${HELM_VERSION} for ${os}/${arch}..."

    if command -v helm &>/dev/null; then
        local current_version=$(helm version --template='{{.Version}}' 2>/dev/null)
        if [[ -n "$current_version" ]] && version_gte "$current_version" "$HELM_VERSION"; then
            log_info "Helm ${current_version} is already installed (>= ${HELM_VERSION})"
            return 0
        fi
        [[ -n "$current_version" ]] && log_info "Upgrading Helm from ${current_version} to ${HELM_VERSION}..."
    fi

    local temp_dir=$(mktemp -d)
    local temp_file="${temp_dir}/${archive_name}"

    if command -v wget &>/dev/null; then
        wget -q "${download_url}" -O "${temp_file}"
    elif command -v curl &>/dev/null; then
        curl -sL "${download_url}" -o "${temp_file}"
    else
        log_error "Neither wget nor curl is available" >&2
        rm -rf "${temp_dir}"
        exit 1
    fi

    tar -xzf "${temp_file}" -C "${temp_dir}"

    local binary_path="${temp_dir}/${os}-${arch}/helm"

    if [[ ! -f "${binary_path}" ]]; then
        log_error "helm binary not found in archive" >&2
        rm -rf "${temp_dir}"
        exit 1
    fi

    chmod +x "${binary_path}"

    if [[ -w "${BIN_DIR}" ]]; then
        mv "${binary_path}" "${BIN_DIR}/helm"
    else
        sudo mv "${binary_path}" "${BIN_DIR}/helm"
    fi

    rm -rf "${temp_dir}"

    log_success "Successfully installed Helm ${HELM_VERSION} to ${BIN_DIR}/helm"
    helm version
}

# ----------------------------------------
# CLI/Component: kustomize
# ----------------------------------------



install_kustomize() {
    local os=$(detect_os)
    local arch=$(detect_arch)
    local archive_name="kustomize_${KUSTOMIZE_VERSION}_${os}_${arch}.tar.gz"
    local download_url="https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/${archive_name}"

    log_info "Installing Kustomize ${KUSTOMIZE_VERSION} for ${os}/${arch}..."

    if command -v kustomize &>/dev/null; then
        local current_version=$(kustomize version --short 2>/dev/null | grep -oP 'v[0-9.]+')
        if [[ -n "$current_version" ]] && version_gte "$current_version" "$KUSTOMIZE_VERSION"; then
            log_info "Kustomize ${current_version} is already installed (>= ${KUSTOMIZE_VERSION})"
            return 0
        fi
        [[ -n "$current_version" ]] && log_info "Upgrading Kustomize from ${current_version} to ${KUSTOMIZE_VERSION}..."
    fi

    local temp_dir=$(mktemp -d)
    local temp_file="${temp_dir}/${archive_name}"

    if command -v wget &>/dev/null; then
        wget -q "${download_url}" -O "${temp_file}"
    elif command -v curl &>/dev/null; then
        curl -sL "${download_url}" -o "${temp_file}"
    else
        log_error "Neither wget nor curl is available" >&2
        rm -rf "${temp_dir}"
        exit 1
    fi

    tar -xzf "${temp_file}" -C "${temp_dir}"

    local binary_path="${temp_dir}/kustomize"

    if [[ ! -f "${binary_path}" ]]; then
        log_error "kustomize binary not found in archive" >&2
        rm -rf "${temp_dir}"
        exit 1
    fi

    chmod +x "${binary_path}"

    if [[ -w "${BIN_DIR}" ]]; then
        mv "${binary_path}" "${BIN_DIR}/kustomize"
    else
        sudo mv "${binary_path}" "${BIN_DIR}/kustomize"
    fi

    rm -rf "${temp_dir}"

    log_success "Successfully installed Kustomize ${KUSTOMIZE_VERSION} to ${BIN_DIR}/kustomize"
    kustomize version
}

# ----------------------------------------
# CLI/Component: yq
# ----------------------------------------



install_yq() {
    local os=$(detect_os)
    local arch=$(detect_arch)
    local binary_name="yq_${os}_${arch}"
    local download_url="https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${binary_name}"

    log_info "Installing yq ${YQ_VERSION} for ${os}/${arch}..."

    if command -v yq &>/dev/null; then
        local current_version=$(yq --version 2>&1 | grep -oP 'version \K[v0-9.]+')
        # Normalize version format (add 'v' prefix if missing)
        [[ -n "$current_version" && "$current_version" != v* ]] && current_version="v${current_version}"
        if [[ -n "$current_version" ]] && version_gte "$current_version" "$YQ_VERSION"; then
            log_info "yq ${current_version} is already installed (>= ${YQ_VERSION})"
            return 0
        fi
        [[ -n "$current_version" ]] && log_info "Upgrading yq from ${current_version} to ${YQ_VERSION}..."
    fi

    local temp_file=$(mktemp)

    if command -v wget &>/dev/null; then
        wget -q "${download_url}" -O "${temp_file}"
    elif command -v curl &>/dev/null; then
        curl -sL "${download_url}" -o "${temp_file}"
    else
        log_info "Neither wget nor curl is available" >&2
        rm -f "${temp_file}"
        exit 1
    fi

    chmod +x "${temp_file}"

    if [[ -w "${BIN_DIR}" ]]; then
        mv "${temp_file}" "${BIN_DIR}/yq"
    else
        sudo mv "${temp_file}" "${BIN_DIR}/yq"
    fi

    log_success "Successfully installed yq ${YQ_VERSION} to ${BIN_DIR}/yq"
    yq --version
}

# ----------------------------------------
# CLI/Component: cert-manager
# ----------------------------------------

uninstall_cert_manager() {
    log_info "Uninstalling cert-manager..."
    helm uninstall cert-manager -n cert-manager 2>/dev/null || true
    kubectl delete all --all -n cert-manager --force --grace-period=0 2>/dev/null || true
    kubectl delete namespace cert-manager --wait=true --timeout=60s --force --grace-period=0 2>/dev/null || true
    log_success "cert-manager uninstalled"
}

install_cert_manager() {
    if helm list -n cert-manager 2>/dev/null | grep -q "cert-manager"; then
        if [ "$REINSTALL" = false ]; then
            log_info "cert-manager is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling cert-manager..."
            uninstall_cert_manager
        fi
    fi

    log_info "Adding cert-manager Helm repository..."
    helm repo add jetstack https://charts.jetstack.io --force-update

    log_info "Installing cert-manager ${CERT_MANAGER_VERSION}..."
    helm install \
        cert-manager jetstack/cert-manager \
        --namespace cert-manager \
        --create-namespace \
        --version "${CERT_MANAGER_VERSION}" \
        --set crds.enabled=true \
        --wait

    log_success "Successfully installed cert-manager ${CERT_MANAGER_VERSION} via Helm"

    wait_for_pods "cert-manager" "app in (cert-manager,webhook,cainjector)" "180s"

    log_success "cert-manager is ready!"
}

# ----------------------------------------
# CLI/Component: istio
# ----------------------------------------

uninstall_istio() {
    log_info "Uninstalling Istio..."
    helm uninstall istio-ingressgateway -n "${ISTIO_NAMESPACE}" 2>/dev/null || true
    helm uninstall istiod -n "${ISTIO_NAMESPACE}" 2>/dev/null || true
    helm uninstall istio-base -n "${ISTIO_NAMESPACE}" 2>/dev/null || true
    kubectl delete all --all -n "${ISTIO_NAMESPACE}" --force --grace-period=0 2>/dev/null || true
    kubectl delete namespace "${ISTIO_NAMESPACE}" --wait=true --timeout=60s --force --grace-period=0 2>/dev/null || true
    log_success "Istio uninstalled"
}

install_istio() {
    if helm list -n "${ISTIO_NAMESPACE}" 2>/dev/null | grep -q "istio-base"; then
        if [ "$REINSTALL" = false ]; then
            log_info "Istio is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling Istio..."
            uninstall_istio
        fi
    fi

    log_info "Adding Istio Helm repository..."
    helm repo add istio https://istio-release.storage.googleapis.com/charts --force-update

    log_info "Installing istio-base ${ISTIO_VERSION}..."
    helm install istio-base istio/base \
        --namespace "${ISTIO_NAMESPACE}" \
        --create-namespace \
        --version "${ISTIO_VERSION}" \
        --set defaultRevision=default \
        --wait \
        ${ISTIO_BASE_EXTRA_ARGS:-}

    log_info "Installing istiod ${ISTIO_VERSION}..."
    helm install istiod istio/istiod \
        --namespace "${ISTIO_NAMESPACE}" \
        --version "${ISTIO_VERSION}" \
        --set proxy.autoInject=disabled \
        --set-string pilot.podAnnotations."cluster-autoscaler\.kubernetes\.io/safe-to-evict"=true \
        --wait \
        ${ISTIOD_EXTRA_ARGS:-}

    log_info "Installing istio-ingressgateway ${ISTIO_VERSION}..."
    helm install istio-ingressgateway istio/gateway \
        --namespace "${ISTIO_NAMESPACE}" \
        --version "${ISTIO_VERSION}" \
        --set-string podAnnotations."cluster-autoscaler\.kubernetes\.io/safe-to-evict"=true \
        ${ISTIO_GATEWAY_EXTRA_ARGS:-}

    log_success "Successfully installed Istio ${ISTIO_VERSION} via Helm"

    wait_for_pods "${ISTIO_NAMESPACE}" "app=istiod" "600s"
    wait_for_pods "${ISTIO_NAMESPACE}" "app=istio-ingressgateway" "600s"

    log_success "Istio is ready!"
}

# ----------------------------------------
# CLI/Component: istio-ingress-class
# ----------------------------------------

uninstall_istio_ingress_class() {
    log_info "Deleting Istio IngressClass 'istio'..."
    kubectl delete ingressclass "istio" --ignore-not-found=true --force --grace-period=0 2>/dev/null || true
    log_success "Istio IngressClass 'istio' deleted"
}

install_istio_ingress_class() {
    if kubectl get ingressclass "istio" &>/dev/null; then
        if [ "$REINSTALL" = false ]; then
            log_info "Istio IngressClass 'istio' already exists. Use --reinstall to recreate."
            return 0
        else
            log_info "Recreating Istio IngressClass 'istio'..."
            uninstall_istio_ingress_class
        fi
    fi

    log_info "Creating Istio IngressClass 'istio'..."
    cat <<EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  name: istio
spec:
  controller: istio.io/ingress-controller
EOF

    log_success "Istio IngressClass 'istio' created successfully!"
}

# ----------------------------------------
# CLI/Component: keda
# ----------------------------------------

uninstall_keda() {
    log_info "Uninstalling KEDA..."

    helm uninstall keda-otel-scaler -n "${KEDA_NAMESPACE}" 2>/dev/null || true
    helm uninstall keda -n "${KEDA_NAMESPACE}" 2>/dev/null || true
    kubectl delete all --all -n "${KEDA_NAMESPACE}" --force --grace-period=0 2>/dev/null || true
    kubectl delete namespace "${KEDA_NAMESPACE}" --wait=true --timeout=60s --force --grace-period=0 2>/dev/null || true

    log_success "KEDA uninstalled"
}

install_keda() {
    if helm list -n "${KEDA_NAMESPACE}" 2>/dev/null | grep -q "keda"; then
        if [ "$REINSTALL" = false ]; then
            log_info "KEDA is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling KEDA..."
            uninstall_keda
        fi
    fi

    log_info "Adding KEDA Helm repository..."
    helm repo add kedacore https://kedacore.github.io/charts --force-update

    log_info "Installing KEDA ${KEDA_VERSION}..."
    helm install keda kedacore/keda \
        --namespace "${KEDA_NAMESPACE}" \
        --create-namespace \
        --version "${KEDA_VERSION}" \
        --wait \
        ${KEDA_EXTRA_ARGS:-}

    log_success "Successfully installed KEDA ${KEDA_VERSION} via Helm"

    wait_for_pods "${KEDA_NAMESPACE}" "app.kubernetes.io/name=keda-operator" "300s"

    log_success "KEDA is ready!"
}

# ----------------------------------------
# CLI/Component: keda-otel-addon
# ----------------------------------------

uninstall_keda_otel_addon() {
    log_info "Uninstalling KEDA OTel add-on..."
    helm uninstall "${ADDON_RELEASE_NAME}" -n "${KEDA_NAMESPACE}" 2>/dev/null || true
    log_success "KEDA OTel add-on uninstalled"
}

install_keda_otel_addon() {
    if ! kubectl get namespace "${KEDA_NAMESPACE}" &>/dev/null; then
        log_error "KEDA namespace '${KEDA_NAMESPACE}' does not exist. Please install KEDA first."
        exit 1
    fi

    if helm list -n "${KEDA_NAMESPACE}" 2>/dev/null | grep -q "${ADDON_RELEASE_NAME}"; then
        if [ "$REINSTALL" = false ]; then
            log_info "KEDA OTel add-on is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling KEDA OTel add-on..."
            uninstall_keda_otel_addon
        fi
    fi

    log_info "Installing KEDA OTel add-on ${KEDA_OTEL_ADDON_VERSION} from kedify/otel-add-on..."
    helm upgrade -i "${ADDON_RELEASE_NAME}" \
        oci://ghcr.io/kedify/charts/otel-add-on \
        --namespace "${KEDA_NAMESPACE}" \
        --version="${KEDA_OTEL_ADDON_VERSION}" \
        --wait \
        ${KEDA_OTEL_ADDON_EXTRA_ARGS:-}

    log_success "Successfully installed KEDA OTel add-on ${KEDA_OTEL_ADDON_VERSION} via Helm"

    wait_for_pods "${KEDA_NAMESPACE}" "app.kubernetes.io/instance=${ADDON_RELEASE_NAME}" "300s"

    log_success "KEDA OTel add-on is ready!"
}

# ----------------------------------------
# CLI/Component: opentelemetry
# ----------------------------------------

uninstall_opentelemetry() {
    log_info "Uninstalling OpenTelemetry Operator..."
    helm uninstall "${OTEL_RELEASE_NAME}" -n "${OTEL_NAMESPACE}" 2>/dev/null || true
    kubectl delete all --all -n "${OTEL_NAMESPACE}" --force --grace-period=0 2>/dev/null || true
    kubectl delete namespace "${OTEL_NAMESPACE}" --wait=true --timeout=60s --force --grace-period=0 2>/dev/null || true
    log_success "OpenTelemetry Operator uninstalled"
}

install_opentelemetry() {
    if helm list -n "${OTEL_NAMESPACE}" 2>/dev/null | grep -q "${OTEL_RELEASE_NAME}"; then
        if [ "$REINSTALL" = false ]; then
            log_info "OpenTelemetry Operator is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling OpenTelemetry Operator..."
            uninstall_opentelemetry
        fi
    fi

    log_info "Adding OpenTelemetry Helm repository..."
    helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts --force-update

    log_info "Installing OpenTelemetry Operator..."
    helm install "${OTEL_RELEASE_NAME}" open-telemetry/opentelemetry-operator \
        --namespace "${OTEL_NAMESPACE}" \
        --create-namespace \
        --wait \
        ${OTEL_OPERATOR_EXTRA_ARGS:-}

    log_success "Successfully installed OpenTelemetry Operator via Helm"

    wait_for_pods "${OTEL_NAMESPACE}" "app.kubernetes.io/instance=${OTEL_RELEASE_NAME}" "300s"

    log_success "OpenTelemetry Operator is ready!"
}

# ----------------------------------------
# CLI/Component: kserve
# ----------------------------------------

uninstall_kserve() {
    log_info "Uninstalling KServe..."

    # EMBED_MANIFESTS: use embedded manifests
    if [ "$EMBED_MANIFESTS" = "true" ]; then
        if type uninstall_kserve_manifest &>/dev/null; then
            uninstall_kserve_manifest
        else
            log_error "EMBED_MANIFESTS enabled but uninstall_kserve_manifest function not found"
            log_error "This script should be called from a generated installation script"
            exit 1
        fi
    else
        # Development/Helm mode
        helm uninstall "${KSERVE_RELEASE_NAME}" -n "${KSERVE_NAMESPACE}" 2>/dev/null || true
        helm uninstall "${KSERVE_CRD_RELEASE_NAME}" -n "${KSERVE_NAMESPACE}" --namespace "${KSERVE_NAMESPACE}" 2>/dev/null || true
    fi

    kubectl delete all --all -n "${KSERVE_NAMESPACE}" --force --grace-period=0 2>/dev/null || true
    kubectl delete namespace "${KSERVE_NAMESPACE}" --wait=true --timeout=60s --force --grace-period=0 2>/dev/null || true
    log_success "KServe uninstalled"
}

install_kserve() {
    if helm list -n "${KSERVE_NAMESPACE}" 2>/dev/null | grep -q "${KSERVE_RELEASE_NAME}"; then
        if [ "$REINSTALL" = false ]; then
            log_info "KServe is already installed. Use --reinstall to reinstall."
            return 0
        else
            log_info "Reinstalling KServe..."
            uninstall_kserve
        fi
    fi

    # EMBED_MANIFESTS: use embedded manifests from generated script
    if [ "$EMBED_MANIFESTS" = "true" ]; then
        log_info "Installing KServe using embedded manifests ..."

        # Call manifest functions (these should be available in generated script)
        if type install_kserve_manifest &>/dev/null; then
            install_kserve_manifest
        else
            log_error "EMBED_MANIFESTS enabled but install_kserve_manifest function not found"
            log_error "This script should be called from a generated installation script"
            exit 1
        fi
    elif [ "${USE_LOCAL_CHARTS}" = true ]; then
        # Install KServe using local charts (for development)
        log_info "Installing KServe using local charts..."
        log_info "📍 Using local charts from ${CHARTS_DIR}/"

        # Update default version in values.yaml
        log_info "Updating default version in values.yaml to ${KSERVE_VERSION}"
        sed -i -e "s/*defaultVersion*/${KSERVE_VERSION}/g" ${CHARTS_DIR}/${CORE_DIR_NAME}/values.yaml

        # Install KServe CRDs from local chart
        log_info "Installing KServe CRDs..."
        helm install "${KSERVE_CRD_RELEASE_NAME}" "${CHARTS_DIR}/${CRD_DIR_NAME}" \
            --namespace "${KSERVE_NAMESPACE}" \
            --create-namespace \
            --wait \
            ${KSERVE_CRD_EXTRA_ARGS:-}

        # Install KServe resources from local chart
        log_info "Installing KServe resources..."
        helm install "${KSERVE_RELEASE_NAME}" "${CHARTS_DIR}/${CORE_DIR_NAME}" \
            --namespace "${KSERVE_NAMESPACE}" \
            --create-namespace \
            --wait \
            ${KSERVE_EXTRA_ARGS:-}

        log_success "Successfully installed KServe using local charts"
    else
        # Install KServe from OCI registry
        log_info "Installing KServe ${KSERVE_VERSION} from OCI registry..."

        # Install KServe CRDs
        log_info "Installing KServe CRDs..."
        helm install "${KSERVE_CRD_RELEASE_NAME}" \
            oci://ghcr.io/kserve/charts/${CRD_DIR_NAME} \
            --version "${KSERVE_VERSION}" \
            --namespace "${KSERVE_NAMESPACE}" \
            --create-namespace \
            --wait \
            ${KSERVE_CRD_EXTRA_ARGS:-}

        # Install KServe resources
        log_info "Installing KServe resources..."
        if ! helm install "${KSERVE_RELEASE_NAME}" \
            oci://ghcr.io/kserve/charts/${KSERVE_RELEASE_NAME} \
            --version "${KSERVE_VERSION}" \
            --namespace "${KSERVE_NAMESPACE}" \
            --create-namespace \
            --wait \
            ${KSERVE_EXTRA_ARGS:-}; then

            # If installation fails, try using helm upgrade after kserve controller is Ready
            log_info "Install failed, attempting upgrade instead..."

            for deploy in "${TARGET_DEPLOYMENT_NAMES[@]}"; do
                    wait_for_deployment "${KSERVE_NAMESPACE}" "${deploy}" "120s"
            done
            if ! helm upgrade "${KSERVE_RELEASE_NAME}" \
                oci://ghcr.io/kserve/charts/${KSERVE_RELEASE_NAME} \
                --version "${KSERVE_VERSION}" \
                --namespace "${KSERVE_NAMESPACE}" \
                --wait \
                ${KSERVE_EXTRA_ARGS:-}; then

                log_error "Failed to install/upgrade KServe ${KSERVE_VERSION}"
                exit 1
            fi
        fi

        log_success "Successfully installed KServe ${KSERVE_VERSION}"
    fi

    # Build list of config updates
    local config_updates=()

    # Update deployment mode if needed
    if [ "${DEPLOYMENT_MODE}" = "Standard" ] || [ "${DEPLOYMENT_MODE}" = "RawDeployment" ]; then
        log_info "Adding deployment mode update: ${DEPLOYMENT_MODE}"
        config_updates+=("deploy.defaultDeploymentMode=\"${DEPLOYMENT_MODE}\"")
    fi

    # Enable Gateway API if needed
    if [ "${GATEWAY_NETWORK_LAYER}" != "false" ]; then
        log_info "Adding Gateway API updates: enableGatewayApi=true, className=${GATEWAY_NETWORK_LAYER}"
        config_updates+=("ingress.ingressGateway.enableGatewayApi=true")
        config_updates+=("ingress.ingressGateway.className=\"${GATEWAY_NETWORK_LAYER}\"")
    fi

    # Apply all config updates at once if there are any
    if [ ${#config_updates[@]} -gt 0 ]; then
        log_info "Applying ${#config_updates[@]} configuration update(s):"
        for update in "${config_updates[@]}"; do
            log_info "  - ${update}"
        done
        update_isvc_config "${config_updates[@]}"
        kubectl rollout restart deployment kserve-controller-manager -n ${KSERVE_NAMESPACE}
    else
        if [ "${LLMISVC}" = "true" ]; then
            log_info "No configuration updates needed for LLMISVC (GATEWAY_NETWORK_LAYER=${GATEWAY_NETWORK_LAYER})"
        else
            log_info "No configuration updates needed (DEPLOYMENT_MODE=${DEPLOYMENT_MODE}, GATEWAY_NETWORK_LAYER=${GATEWAY_NETWORK_LAYER})"
        fi
    fi

    for deploy in "${TARGET_DEPLOYMENT_NAMES[@]}"; do
        wait_for_deployment "${KSERVE_NAMESPACE}" "${deploy}" "300s"
    done
    log_success "KServe is ready!"
}



#================================================
# Main Installation Logic
#================================================

main() {
    if [ "$UNINSTALL" = true ]; then
        echo "=========================================="
        echo "Uninstalling components..."
        echo "=========================================="
        uninstall_kserve
        uninstall_opentelemetry
        uninstall_keda_otel_addon
        uninstall_keda
        uninstall_istio_ingress_class
        uninstall_istio
        uninstall_cert_manager
        
        
        
        echo "=========================================="
        echo "✅ Uninstallation completed!"
        echo "=========================================="
        exit 0
    fi

    echo "=========================================="
    echo "Install KServe Knative Mode and all related dependencies using helm."
    echo "=========================================="



    install_helm
    install_kustomize
    install_yq
    install_cert_manager
    install_istio
    install_istio_ingress_class
    install_keda
    install_keda_otel_addon
    install_opentelemetry
    (
        # Set Helm release names and target pod labels based on LLMISVC
        if [ "${LLMISVC}" = "true" ]; then
            log_info "LLMISVC is enabled"
            CRD_DIR_NAME="kserve-llmisvc-crd"
            CORE_DIR_NAME="kserve-llmisvc-resources"
            KSERVE_CRD_RELEASE_NAME="kserve-llmisvc-crd"
            KSERVE_RELEASE_NAME="kserve-llmisvc-resources"
            TARGET_DEPLOYMENT_NAMES=("kserve-llmisvc-controller-manager")
        fi
        
        if [ "${SET_KSERVE_VERSION}" != "" ]; then
            log_info "Setting KServe version to ${SET_KSERVE_VERSION}"
            KSERVE_VERSION="${SET_KSERVE_VERSION}"
        fi
        install_kserve
    )

    echo "=========================================="
    echo "✅ Installation completed successfully!"
    echo "=========================================="
}



main "$@"
