#!/bin/bash

# EKS Self-Managed Node Group Configuration Collector
# This script collects configuration details from self-managed node groups

set -e

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Default values
CLUSTER_NAME=""
REGION=""
OUTPUT_DIR="./eks-nodegroup-config-$(date +%Y%m%d-%H%M%S)"
PROFILE=""
DETAILED_NODES="false"

# Function to print colored output
print_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

print_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

print_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# Function to show usage
usage() {
    cat << EOF
Usage: $0 -c CLUSTER_NAME -r REGION [OPTIONS]

Required:
  -c, --cluster     EKS cluster name
  -r, --region      AWS region

Optional:
  -o, --output      Output directory (default: ./eks-nodegroup-config-TIMESTAMP)
  -p, --profile     AWS profile name
  --detailed-nodes  Include detailed node descriptions (slow for large clusters)
  -h, --help        Show this help message

Examples:
  $0 -c my-cluster -r us-east-1
  $0 -c my-cluster -r us-east-1 -p production -o /tmp/config-backup
  $0 -c my-cluster -r us-east-1 --detailed-nodes  # Include detailed node info
EOF
}

# Parse command line arguments
while [[ $# -gt 0 ]]; do
    case $1 in
        -c|--cluster)
            CLUSTER_NAME="$2"
            shift 2
            ;;
        -r|--region)
            REGION="$2"
            shift 2
            ;;
        -o|--output)
            OUTPUT_DIR="$2"
            shift 2
            ;;
        -p|--profile)
            PROFILE="$2"
            shift 2
            ;;
        --detailed-nodes)
            DETAILED_NODES="true"
            shift
            ;;
        -h|--help)
            usage
            exit 0
            ;;
        *)
            print_error "Unknown option: $1"
            usage
            exit 1
            ;;
    esac
done

# Validate required parameters
if [[ -z "$CLUSTER_NAME" || -z "$REGION" ]]; then
    print_error "Cluster name and region are required"
    usage
    exit 1
fi

# Set AWS CLI profile if provided
AWS_CLI_OPTS="--region $REGION"
if [[ -n "$PROFILE" ]]; then
    AWS_CLI_OPTS="$AWS_CLI_OPTS --profile $PROFILE"
fi

# Create output directory
mkdir -p "$OUTPUT_DIR"
print_info "Output directory: $OUTPUT_DIR"

# Function to run AWS CLI commands with error handling
run_aws_cmd() {
    local cmd="$1"
    local output_file="$2"
    local description="$3"
    
    print_info "Collecting $description..."
    if eval "aws $cmd $AWS_CLI_OPTS" > "$output_file" 2>/dev/null; then
        print_success "✓ $description saved to $(basename "$output_file")"
    else
        print_warning "✗ Failed to collect $description"
        echo "Error: Failed to collect $description" > "$output_file"
    fi
}

# Function to collect cluster information
collect_cluster_info() {
    print_info "=== Collecting EKS Cluster Information ==="
    
    # Cluster details
    run_aws_cmd "eks describe-cluster --name $CLUSTER_NAME" \
        "$OUTPUT_DIR/cluster-details.json" \
        "cluster details"
    
    # Cluster add-ons
    run_aws_cmd "eks list-addons --cluster-name $CLUSTER_NAME" \
        "$OUTPUT_DIR/cluster-addons.json" \
        "cluster add-ons list"
    
    # Get detailed add-on information
    if [[ -f "$OUTPUT_DIR/cluster-addons.json" ]]; then
        addons=$(jq -r '.addons[]?' "$OUTPUT_DIR/cluster-addons.json" 2>/dev/null || echo "")
        if [[ -n "$addons" ]]; then
            mkdir -p "$OUTPUT_DIR/addons"
            while IFS= read -r addon; do
                if [[ -n "$addon" ]]; then
                    run_aws_cmd "eks describe-addon --cluster-name $CLUSTER_NAME --addon-name $addon" \
                        "$OUTPUT_DIR/addons/$addon.json" \
                        "add-on: $addon"
                    
                    # Get add-on configuration if it exists
                    run_aws_cmd "eks describe-addon-configuration --addon-name $addon --addon-version \$(jq -r '.addon.addonVersion' \"$OUTPUT_DIR/addons/$addon.json\" 2>/dev/null || echo 'latest')" \
                        "$OUTPUT_DIR/addons/$addon-configuration-schema.json" \
                        "add-on configuration schema: $addon"
                fi
            done <<< "$addons"
        fi
    fi
}

# Function to collect add-on configurations
collect_addon_configurations() {
    print_info "=== Collecting Add-on Configurations ==="
    
    if [[ -f "$OUTPUT_DIR/cluster-addons.json" ]]; then
        addons=$(jq -r '.addons[]?' "$OUTPUT_DIR/cluster-addons.json" 2>/dev/null || echo "")
        if [[ -n "$addons" ]]; then
            mkdir -p "$OUTPUT_DIR/addon-configs"
            
            while IFS= read -r addon; do
                if [[ -n "$addon" && -f "$OUTPUT_DIR/addons/$addon.json" ]]; then
                    print_info "Collecting configuration for add-on: $addon"
                    
                    # Get the addon version for configuration schema
                    local addon_version=$(jq -r '.addon.addonVersion // "latest"' "$OUTPUT_DIR/addons/$addon.json" 2>/dev/null)
                    
                    # Get add-on configuration schema
                    run_aws_cmd "eks describe-addon-configuration --addon-name $addon --addon-version $addon_version" \
                        "$OUTPUT_DIR/addon-configs/$addon-config-schema.json" \
                        "configuration schema for $addon v$addon_version"
                    
                    # Get current add-on configuration values (if any custom config exists)
                    local config_status=$(jq -r '.addon.configurationValues // empty' "$OUTPUT_DIR/addons/$addon.json" 2>/dev/null)
                    if [[ -n "$config_status" && "$config_status" != "null" ]]; then
                        echo "$config_status" > "$OUTPUT_DIR/addon-configs/$addon-current-config.json"
                        print_success "✓ Current configuration for $addon saved to addon-configs/$addon-current-config.json"
                    else
                        echo "{\"note\": \"No custom configuration values set for this add-on\"}" > "$OUTPUT_DIR/addon-configs/$addon-current-config.json"
                        print_info "ℹ No custom configuration for $addon (using defaults)"
                    fi
                    
                    # Get add-on versions available
                    run_aws_cmd "eks describe-addon-versions --addon-name $addon --kubernetes-version \$(jq -r '.cluster.version' \"$OUTPUT_DIR/cluster-details.json\" 2>/dev/null || echo '1.28')" \
                        "$OUTPUT_DIR/addon-configs/$addon-available-versions.json" \
                        "available versions for $addon"
                    
                    # Extract and save configuration recommendations if available
                    if [[ -f "$OUTPUT_DIR/addon-configs/$addon-config-schema.json" ]]; then
                        local config_schema=$(jq -r '.configurationSchema // empty' "$OUTPUT_DIR/addon-configs/$addon-config-schema.json" 2>/dev/null)
                        if [[ -n "$config_schema" && "$config_schema" != "null" ]]; then
                            echo "$config_schema" | jq '.' > "$OUTPUT_DIR/addon-configs/$addon-schema-details.json" 2>/dev/null && \
                                print_success "✓ Configuration schema details for $addon saved" || \
                                print_warning "✗ Failed to parse configuration schema for $addon"
                        fi
                    fi
                fi
            done <<< "$addons"
        else
            print_info "No add-ons found to collect configurations for"
        fi
    else
        print_warning "Add-ons list not found, skipping configuration collection"
    fi
}

# Function to collect add-on Kubernetes resources
collect_addon_kubernetes_resources() {
    print_info "=== Collecting Add-on Kubernetes Resources ==="
    
    # Check if kubectl is available
    if ! command -v kubectl &> /dev/null; then
        print_warning "kubectl not found - skipping add-on Kubernetes resources"
        return
    fi
    
    mkdir -p "$OUTPUT_DIR/addon-k8s-resources"
    
    # VPC-CNI resources
    print_info "Collecting VPC-CNI Kubernetes resources..."
    if kubectl get daemonset aws-node -n kube-system &>/dev/null; then
        kubectl get daemonset aws-node -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/vpc-cni-daemonset.yaml" 2>/dev/null
        print_success "✓ VPC-CNI DaemonSet saved"
    fi
    if kubectl get configmap amazon-vpc-cni -n kube-system &>/dev/null; then
        kubectl get configmap amazon-vpc-cni -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/vpc-cni-configmap.yaml" 2>/dev/null
        print_success "✓ VPC-CNI ConfigMap saved"
    fi
    
    # CoreDNS resources
    print_info "Collecting CoreDNS Kubernetes resources..."
    if kubectl get deployment coredns -n kube-system &>/dev/null; then
        kubectl get deployment coredns -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/coredns-deployment.yaml" 2>/dev/null
        print_success "✓ CoreDNS Deployment saved"
    fi
    if kubectl get configmap coredns -n kube-system &>/dev/null; then
        kubectl get configmap coredns -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/coredns-configmap.yaml" 2>/dev/null
        print_success "✓ CoreDNS ConfigMap saved"
    fi
    if kubectl get service kube-dns -n kube-system &>/dev/null; then
        kubectl get service kube-dns -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/coredns-service.yaml" 2>/dev/null
        print_success "✓ CoreDNS Service saved"
    fi
    
    # Kube-proxy resources
    print_info "Collecting Kube-proxy Kubernetes resources..."
    if kubectl get daemonset kube-proxy -n kube-system &>/dev/null; then
        kubectl get daemonset kube-proxy -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/kube-proxy-daemonset.yaml" 2>/dev/null
        print_success "✓ Kube-proxy DaemonSet saved"
    fi
    if kubectl get configmap kube-proxy-config -n kube-system &>/dev/null; then
        kubectl get configmap kube-proxy-config -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/kube-proxy-configmap.yaml" 2>/dev/null
        print_success "✓ Kube-proxy ConfigMap saved"
    fi
    
    # EBS CSI Driver resources
    print_info "Collecting EBS CSI Driver Kubernetes resources..."
    if kubectl get deployment ebs-csi-controller -n kube-system &>/dev/null; then
        kubectl get deployment ebs-csi-controller -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/ebs-csi-controller-deployment.yaml" 2>/dev/null
        print_success "✓ EBS CSI Controller Deployment saved"
    fi
    if kubectl get daemonset ebs-csi-node -n kube-system &>/dev/null; then
        kubectl get daemonset ebs-csi-node -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/ebs-csi-node-daemonset.yaml" 2>/dev/null
        print_success "✓ EBS CSI Node DaemonSet saved"
    fi
    
    # EFS CSI Driver resources
    print_info "Collecting EFS CSI Driver Kubernetes resources..."
    if kubectl get deployment efs-csi-controller -n kube-system &>/dev/null; then
        kubectl get deployment efs-csi-controller -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/efs-csi-controller-deployment.yaml" 2>/dev/null
        print_success "✓ EFS CSI Controller Deployment saved"
    fi
    if kubectl get daemonset efs-csi-node -n kube-system &>/dev/null; then
        kubectl get daemonset efs-csi-node -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/efs-csi-node-daemonset.yaml" 2>/dev/null
        print_success "✓ EFS CSI Node DaemonSet saved"
    fi
    
    # Mountpoint S3 CSI Driver resources
    print_info "Collecting Mountpoint S3 CSI Driver Kubernetes resources..."
    if kubectl get daemonset s3-csi-node -n kube-system &>/dev/null; then
        kubectl get daemonset s3-csi-node -n kube-system -o yaml > "$OUTPUT_DIR/addon-k8s-resources/s3-csi-node-daemonset.yaml" 2>/dev/null
        print_success "✓ S3 CSI Node DaemonSet saved"
    fi
    
    print_success "Add-on Kubernetes resources collection completed"
}

# Function to collect CRDs (Custom Resource Definitions)
collect_crds() {
    print_info "=== Collecting Custom Resource Definitions ==="
    
    # Check if kubectl is available
    if ! command -v kubectl &> /dev/null; then
        print_warning "kubectl not found - skipping CRD collection"
        return
    fi
    
    mkdir -p "$OUTPUT_DIR/crds"
    
    # Get all CRDs
    if kubectl get crd -o yaml > "$OUTPUT_DIR/crds/all-crds.yaml" 2>/dev/null; then
        print_success "✓ All CRDs saved to crds/all-crds.yaml"
    fi
    
    # CSI-related CRDs
    local csi_crds=(
        "csidrivers.storage.k8s.io"
        "csinodes.storage.k8s.io"
        "volumesnapshotclasses.snapshot.storage.k8s.io"
        "volumesnapshotcontents.snapshot.storage.k8s.io"
        "volumesnapshots.snapshot.storage.k8s.io"
    )
    
    print_info "Collecting CSI-related CRDs..."
    for crd in "${csi_crds[@]}"; do
        if kubectl get crd "$crd" &>/dev/null; then
            kubectl get crd "$crd" -o yaml > "$OUTPUT_DIR/crds/$crd.yaml" 2>/dev/null
            print_success "✓ CRD $crd saved"
        fi
    done
    
    # AWS-specific CRDs (ALB Controller, etc.)
    print_info "Collecting AWS-specific CRDs..."
    kubectl get crd 2>/dev/null | grep -E "(aws|elbv2)" | awk '{print $1}' | while read -r crd; do
        if [[ -n "$crd" ]]; then
            kubectl get crd "$crd" -o yaml > "$OUTPUT_DIR/crds/$crd.yaml" 2>/dev/null
            print_success "✓ AWS CRD $crd saved"
        fi
    done
    
    print_success "CRD collection completed"
}

# Function to collect Custom Resources
collect_custom_resources() {
    print_info "=== Collecting Custom Resources ==="
    
    # Check if kubectl is available
    if ! command -v kubectl &> /dev/null; then
        print_warning "kubectl not found - skipping custom resources"
        return
    fi
    
    mkdir -p "$OUTPUT_DIR/custom-resources"
    
    # CSI Drivers
    if kubectl get csidrivers &>/dev/null; then
        kubectl get csidrivers -o yaml > "$OUTPUT_DIR/custom-resources/csidrivers.yaml" 2>/dev/null
        print_success "✓ CSI Drivers saved"
    fi
    
    # CSI Nodes
    if kubectl get csinodes &>/dev/null; then
        kubectl get csinodes -o yaml > "$OUTPUT_DIR/custom-resources/csinodes.yaml" 2>/dev/null
        print_success "✓ CSI Nodes saved"
    fi
    
    # Volume Snapshot Classes
    if kubectl get volumesnapshotclasses &>/dev/null; then
        kubectl get volumesnapshotclasses -o yaml > "$OUTPUT_DIR/custom-resources/volumesnapshotclasses.yaml" 2>/dev/null
        print_success "✓ Volume Snapshot Classes saved"
    fi
    
    # Volume Snapshots (all namespaces)
    if kubectl get volumesnapshots --all-namespaces &>/dev/null; then
        kubectl get volumesnapshots --all-namespaces -o yaml > "$OUTPUT_DIR/custom-resources/volumesnapshots.yaml" 2>/dev/null
        print_success "✓ Volume Snapshots saved"
    fi
    
    # Target Group Bindings (ALB Controller)
    if kubectl get targetgroupbindings --all-namespaces &>/dev/null; then
        kubectl get targetgroupbindings --all-namespaces -o yaml > "$OUTPUT_DIR/custom-resources/targetgroupbindings.yaml" 2>/dev/null
        print_success "✓ Target Group Bindings saved"
    fi
    
    # Ingress Class Parameters (ALB Controller)
    if kubectl get ingressclassparams --all-namespaces &>/dev/null; then
        kubectl get ingressclassparams --all-namespaces -o yaml > "$OUTPUT_DIR/custom-resources/ingressclassparams.yaml" 2>/dev/null
        print_success "✓ Ingress Class Parameters saved"
    fi
    
    print_success "Custom resources collection completed"
}

# Function to collect managed node group information
collect_managed_nodegroups() {
    print_info "=== Collecting Managed Node Groups ==="
    
    # Get managed node groups
    run_aws_cmd "eks list-nodegroups --cluster-name $CLUSTER_NAME" \
        "$OUTPUT_DIR/managed-nodegroups-list.json" \
        "managed node groups list"
    
    # Get detailed information for each managed node group
    if [[ -f "$OUTPUT_DIR/managed-nodegroups-list.json" ]]; then
        nodegroups=$(jq -r '.nodegroups[]?' "$OUTPUT_DIR/managed-nodegroups-list.json" 2>/dev/null || echo "")
        if [[ -n "$nodegroups" ]]; then
            mkdir -p "$OUTPUT_DIR/managed-nodegroups"
            while IFS= read -r nodegroup; do
                if [[ -n "$nodegroup" ]]; then
                    print_info "Found managed node group: $nodegroup"
                    run_aws_cmd "eks describe-nodegroup --cluster-name $CLUSTER_NAME --nodegroup-name $nodegroup" \
                        "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" \
                        "managed node group: $nodegroup"
                fi
            done <<< "$nodegroups"
        else
            print_info "No managed node groups found for cluster: $CLUSTER_NAME"
        fi
    fi
}

# Function to collect Auto Scaling Group information
collect_asg_info() {
    print_info "=== Collecting Auto Scaling Group Information ==="
    
    # Get all ASGs
    run_aws_cmd "autoscaling describe-auto-scaling-groups" \
        "$OUTPUT_DIR/all-asgs.json" \
        "all Auto Scaling Groups"
    
    # Filter ASGs related to the EKS cluster
    if [[ -f "$OUTPUT_DIR/all-asgs.json" ]]; then
        mkdir -p "$OUTPUT_DIR/asgs"
        
        # Extract ASG names that contain the cluster name
        asg_names=$(jq -r ".AutoScalingGroups[] | select(.Tags[]? | select(.Key==\"kubernetes.io/cluster/$CLUSTER_NAME\" or .Key==\"eks:cluster-name\") | .Value==\"$CLUSTER_NAME\" or .Value==\"owned\") | .AutoScalingGroupName" "$OUTPUT_DIR/all-asgs.json" 2>/dev/null || echo "")
        
        if [[ -n "$asg_names" ]]; then
            while IFS= read -r asg_name; do
                if [[ -n "$asg_name" ]]; then
                    print_info "Found EKS-related ASG: $asg_name"
                    
                    # Get detailed ASG information
                    run_aws_cmd "autoscaling describe-auto-scaling-groups --auto-scaling-group-names $asg_name" \
                        "$OUTPUT_DIR/asgs/$asg_name.json" \
                        "ASG details: $asg_name"
                    
                    # Get scaling policies
                    run_aws_cmd "autoscaling describe-policies --auto-scaling-group-name $asg_name" \
                        "$OUTPUT_DIR/asgs/$asg_name-policies.json" \
                        "ASG policies: $asg_name"
                    
                    # Get launch template information
                    if [[ -f "$OUTPUT_DIR/asgs/$asg_name.json" ]]; then
                        lt_id=$(jq -r '.AutoScalingGroups[0].LaunchTemplate.LaunchTemplateId?' "$OUTPUT_DIR/asgs/$asg_name.json" 2>/dev/null)
                        lt_version=$(jq -r '.AutoScalingGroups[0].LaunchTemplate.Version?' "$OUTPUT_DIR/asgs/$asg_name.json" 2>/dev/null)
                        
                        if [[ -n "$lt_id" && "$lt_id" != "null" ]]; then
                            run_aws_cmd "ec2 describe-launch-templates --launch-template-ids $lt_id" \
                                "$OUTPUT_DIR/asgs/$asg_name-launch-template.json" \
                                "Launch template: $lt_id"
                            
                            run_aws_cmd "ec2 describe-launch-template-versions --launch-template-id $lt_id --versions $lt_version" \
                                "$OUTPUT_DIR/asgs/$asg_name-launch-template-version.json" \
                                "Launch template version: $lt_id:$lt_version"
                        fi
                    fi
                fi
            done <<< "$asg_names"
        else
            print_warning "No Auto Scaling Groups found for cluster: $CLUSTER_NAME"
        fi
    fi
}

# Function to collect EC2 instances information
collect_ec2_info() {
    print_info "=== Collecting EC2 Instance Information ==="
    
    # Get instances tagged with the cluster name
    run_aws_cmd "ec2 describe-instances --filters Name=tag:kubernetes.io/cluster/$CLUSTER_NAME,Values=owned Name=instance-state-name,Values=running,pending,stopping,stopped" \
        "$OUTPUT_DIR/cluster-instances.json" \
        "cluster EC2 instances"
    
    # Get security groups used by instances
    if [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
        sg_ids=$(jq -r '.Reservations[].Instances[].SecurityGroups[].GroupId' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u)
        if [[ -n "$sg_ids" ]]; then
            mkdir -p "$OUTPUT_DIR/security-groups"
            while IFS= read -r sg_id; do
                if [[ -n "$sg_id" ]]; then
                    run_aws_cmd "ec2 describe-security-groups --group-ids $sg_id" \
                        "$OUTPUT_DIR/security-groups/$sg_id.json" \
                        "security group: $sg_id"
                fi
            done <<< "$sg_ids"
        fi
    fi
}

# Function to collect IAM roles and policies
collect_iam_info() {
    print_info "=== Collecting IAM Information ==="
    
    mkdir -p "$OUTPUT_DIR/iam"
    
    # Get node instance profiles from EC2 instances
    if [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
        instance_profiles=$(jq -r '.Reservations[].Instances[].IamInstanceProfile.Arn?' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | grep -v null | sort -u)
        
        while IFS= read -r profile_arn; do
            if [[ -n "$profile_arn" ]]; then
                profile_name=$(basename "$profile_arn")
                
                run_aws_cmd "iam get-instance-profile --instance-profile-name $profile_name" \
                    "$OUTPUT_DIR/iam/instance-profile-$profile_name.json" \
                    "instance profile: $profile_name"
                
                # Get associated roles
                if [[ -f "$OUTPUT_DIR/iam/instance-profile-$profile_name.json" ]]; then
                    roles=$(jq -r '.InstanceProfile.Roles[].RoleName' "$OUTPUT_DIR/iam/instance-profile-$profile_name.json" 2>/dev/null)
                    while IFS= read -r role_name; do
                        if [[ -n "$role_name" ]]; then
                            run_aws_cmd "iam get-role --role-name $role_name" \
                                "$OUTPUT_DIR/iam/role-$role_name.json" \
                                "IAM role: $role_name"
                            
                            run_aws_cmd "iam list-attached-role-policies --role-name $role_name" \
                                "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" \
                                "attached policies for role: $role_name"
                            
                            run_aws_cmd "iam list-role-policies --role-name $role_name" \
                                "$OUTPUT_DIR/iam/role-$role_name-inline-policies.json" \
                                "inline policies for role: $role_name"
                        fi
                    done <<< "$roles"
                fi
            fi
        done <<< "$instance_profiles"
    fi
}

# Function to collect Kubernetes node information
collect_k8s_info() {
    print_info "=== Collecting Kubernetes Node Information ==="
    
    # Check if kubectl is available and configured
    if command -v kubectl &> /dev/null; then
        # Update kubeconfig
        print_info "Updating kubeconfig for cluster: $CLUSTER_NAME"
        if aws eks update-kubeconfig --region "$REGION" --name "$CLUSTER_NAME" ${PROFILE:+--profile $PROFILE} &>/dev/null; then
            mkdir -p "$OUTPUT_DIR/kubernetes"
            
            # Get nodes
            if kubectl get nodes &>/dev/null; then
                if kubectl get nodes -o yaml > "$OUTPUT_DIR/kubernetes/nodes.yaml" 2>/dev/null; then
                    print_success "✓ Kubernetes nodes saved to kubernetes/nodes.yaml"
                else
                    print_warning "✗ Failed to collect Kubernetes nodes"
                fi
                
                if kubectl get nodes -o wide > "$OUTPUT_DIR/kubernetes/nodes-wide.txt" 2>/dev/null; then
                    print_success "✓ Kubernetes nodes (wide) saved to kubernetes/nodes-wide.txt"
                else
                    print_warning "✗ Failed to collect Kubernetes nodes (wide)"
                fi
                
                # Get node details (optional for performance)
                if [[ "$DETAILED_NODES" == "true" ]]; then
                    print_info "Collecting detailed node descriptions (this may take a while for large clusters)..."
                    if kubectl describe nodes > "$OUTPUT_DIR/kubernetes/nodes-describe.txt" 2>/dev/null; then
                        print_success "✓ Kubernetes node descriptions saved to kubernetes/nodes-describe.txt"
                    else
                        print_warning "✗ Failed to collect Kubernetes node descriptions"
                    fi
                else
                    # Skip node descriptions for performance in large clusters
                    print_info "ℹ Skipping detailed node descriptions for better performance (use --detailed-nodes to enable)"
                    echo "# Node descriptions skipped for performance reasons" > "$OUTPUT_DIR/kubernetes/nodes-describe-skipped.txt"
                    echo "# To collect node descriptions manually, run:" >> "$OUTPUT_DIR/kubernetes/nodes-describe-skipped.txt"
                    echo "# kubectl describe nodes > nodes-describe.txt" >> "$OUTPUT_DIR/kubernetes/nodes-describe-skipped.txt"
                fi
                
                # Get service accounts with IRSA roles
                if kubectl get serviceaccounts --all-namespaces -o json > "$OUTPUT_DIR/kubernetes/serviceaccounts.json" 2>/dev/null; then
                    print_success "✓ Service accounts saved to kubernetes/serviceaccounts.json"
                else
                    print_warning "✗ Failed to collect service accounts"
                fi
                
                # Get system deployments (focus on kube-system for performance)
                if kubectl get deployments -n kube-system -o json > "$OUTPUT_DIR/kubernetes/kube-system-deployments.json" 2>/dev/null; then
                    print_success "✓ Kube-system deployments saved to kubernetes/kube-system-deployments.json"
                else
                    print_warning "✗ Failed to collect kube-system deployments"
                fi
                
                # Create note about all deployments
                echo "# All deployments collection skipped for performance" > "$OUTPUT_DIR/kubernetes/all-deployments-skipped.txt"
                echo "# To collect all deployments manually, run:" >> "$OUTPUT_DIR/kubernetes/all-deployments-skipped.txt"
                echo "# kubectl get deployments --all-namespaces -o json > all-deployments.json" >> "$OUTPUT_DIR/kubernetes/all-deployments-skipped.txt"
                
                # Get network policies
                if kubectl get networkpolicies --all-namespaces -o json > "$OUTPUT_DIR/kubernetes/networkpolicies.json" 2>/dev/null; then
                    print_success "✓ Network policies saved to kubernetes/networkpolicies.json"
                else
                    print_warning "✗ Failed to collect network policies (may not exist)"
                fi
            else
                print_warning "Cannot access Kubernetes cluster - check your permissions"
            fi
        else
            print_warning "Failed to update kubeconfig - skipping Kubernetes information"
        fi
    else
        print_warning "kubectl not found - skipping Kubernetes information"
    fi
}

# Function to collect IAM information for IRSA roles
collect_irsa_iam_info() {
    print_info "=== Collecting IRSA IAM Information ==="
    
    # Extract IRSA roles from service accounts and add-ons
    local irsa_roles=()
    
    # Get IRSA roles from service accounts
    if [[ -f "$OUTPUT_DIR/kubernetes/serviceaccounts.json" ]]; then
        local sa_roles=$(jq -r '.items[] | select(.metadata.annotations["eks.amazonaws.com/role-arn"]) | .metadata.annotations["eks.amazonaws.com/role-arn"]' "$OUTPUT_DIR/kubernetes/serviceaccounts.json" 2>/dev/null)
        while IFS= read -r role_arn; do
            if [[ -n "$role_arn" ]]; then
                local role_name=$(basename "$role_arn" 2>/dev/null)
                if [[ -n "$role_name" ]]; then
                    irsa_roles+=("$role_name")
                fi
            fi
        done <<< "$sa_roles"
    fi
    
    # Get IRSA roles from add-ons
    if [[ -d "$OUTPUT_DIR/addons" ]]; then
        for addon_file in "$OUTPUT_DIR/addons"/*.json; do
            if [[ -f "$addon_file" ]]; then
                local addon_role_arn=$(jq -r '.addon.serviceAccountRoleArn // empty' "$addon_file" 2>/dev/null)
                if [[ -n "$addon_role_arn" && "$addon_role_arn" != "null" ]]; then
                    local role_name=$(basename "$addon_role_arn" 2>/dev/null)
                    if [[ -n "$role_name" ]]; then
                        irsa_roles+=("$role_name")
                    fi
                fi
            fi
        done
    fi
    
    # Remove duplicates and collect IAM information for each IRSA role
    local unique_roles=($(printf '%s\n' "${irsa_roles[@]}" | sort -u))
    
    if [[ ${#unique_roles[@]} -gt 0 ]]; then
        print_info "Found ${#unique_roles[@]} IRSA roles to collect"
        
        for role_name in "${unique_roles[@]}"; do
            if [[ -n "$role_name" && ! -f "$OUTPUT_DIR/iam/role-$role_name.json" ]]; then
                print_info "Collecting IRSA role: $role_name"
                
                run_aws_cmd "iam get-role --role-name $role_name" \
                    "$OUTPUT_DIR/iam/role-$role_name.json" \
                    "IRSA IAM role: $role_name"
                
                run_aws_cmd "iam list-attached-role-policies --role-name $role_name" \
                    "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" \
                    "attached policies for IRSA role: $role_name"
                
                run_aws_cmd "iam list-role-policies --role-name $role_name" \
                    "$OUTPUT_DIR/iam/role-$role_name-inline-policies.json" \
                    "inline policies for IRSA role: $role_name"
            fi
        done
    else
        print_info "No IRSA roles found to collect"
    fi
}

# Function to extract cluster information for summary
extract_cluster_summary() {
    local summary_file="$1"
    
    if [[ -f "$OUTPUT_DIR/cluster-details.json" ]]; then
        echo "## 1. EKS Cluster Configuration" >> "$summary_file"
        echo "" >> "$summary_file"
        
        # Create cluster configuration table
        echo "| Property | Value |" >> "$summary_file"
        echo "|----------|-------|" >> "$summary_file"
        
        # Extract EKS version
        local eks_version=$(jq -r '.cluster.version // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        echo "| EKS Version | $eks_version |" >> "$summary_file"
        
        # Extract cluster IAM role
        local cluster_role_arn=$(jq -r '.cluster.roleArn // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        local cluster_role_name=$(basename "$cluster_role_arn" 2>/dev/null || echo "N/A")
        echo "| Cluster IAM Role | $cluster_role_name |" >> "$summary_file"
        echo "| Cluster IAM Role ARN | $cluster_role_arn |" >> "$summary_file"
        
        # Extract security groups
        local cluster_sg=$(jq -r '.cluster.resourcesVpcConfig.clusterSecurityGroupId // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        local additional_sgs=$(jq -r '.cluster.resourcesVpcConfig.securityGroupIds[]?' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
        echo "| Cluster Security Group | $cluster_sg |" >> "$summary_file"
        if [[ -n "$additional_sgs" ]]; then
            echo "| Additional Security Groups | $additional_sgs |" >> "$summary_file"
        fi
        
        # Extract VPC and subnets
        local vpc_id=$(jq -r '.cluster.resourcesVpcConfig.vpcId // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        local subnets=$(jq -r '.cluster.resourcesVpcConfig.subnetIds[]?' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
        echo "| VPC ID | $vpc_id |" >> "$summary_file"
        echo "| Subnets | $subnets |" >> "$summary_file"
        
        # Extract endpoint access
        local public_access=$(jq -r '.cluster.resourcesVpcConfig.endpointPublicAccess // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        local private_access=$(jq -r '.cluster.resourcesVpcConfig.endpointPrivateAccess // "N/A"' "$OUTPUT_DIR/cluster-details.json" 2>/dev/null)
        echo "| Public Endpoint Access | $public_access |" >> "$summary_file"
        echo "| Private Endpoint Access | $private_access |" >> "$summary_file"
        
        echo "" >> "$summary_file"
        
        # Get cluster role policies if available
        if [[ -f "$OUTPUT_DIR/iam/role-$cluster_role_name-attached-policies.json" ]]; then
            echo "### Cluster IAM Role Policies" >> "$summary_file"
            echo "" >> "$summary_file"
            echo "| Policy Type | Policy Name |" >> "$summary_file"
            echo "|-------------|-------------|" >> "$summary_file"
            
            # Attached policies
            local attached_policies=$(jq -r '.AttachedPolicies[]?.PolicyName // empty' "$OUTPUT_DIR/iam/role-$cluster_role_name-attached-policies.json" 2>/dev/null)
            if [[ -n "$attached_policies" ]]; then
                while IFS= read -r policy; do
                    echo "| Attached | $policy |" >> "$summary_file"
                done <<< "$attached_policies"
            fi
            
            # Inline policies
            if [[ -f "$OUTPUT_DIR/iam/role-$cluster_role_name-inline-policies.json" ]]; then
                local inline_policies=$(jq -r '.PolicyNames[]? // empty' "$OUTPUT_DIR/iam/role-$cluster_role_name-inline-policies.json" 2>/dev/null)
                if [[ -n "$inline_policies" ]]; then
                    while IFS= read -r policy; do
                        echo "| Inline | $policy |" >> "$summary_file"
                    done <<< "$inline_policies"
                fi
            fi
            echo "" >> "$summary_file"
        fi
        
        # Add security group rules section
        extract_security_group_rules "$summary_file"
    fi
}

# Function to extract security group rules for summary
extract_security_group_rules() {
    local summary_file="$1"
    
    echo "### Node Security Group Rules" >> "$summary_file"
    echo "" >> "$summary_file"
    
    if [[ -d "$OUTPUT_DIR/security-groups" ]]; then
        for sg_file in "$OUTPUT_DIR/security-groups"/*.json; do
            if [[ -f "$sg_file" ]]; then
                local sg_id=$(basename "$sg_file" .json)
                local sg_name=$(jq -r '.SecurityGroups[0].GroupName // "N/A"' "$sg_file" 2>/dev/null)
                local sg_description=$(jq -r '.SecurityGroups[0].Description // "N/A"' "$sg_file" 2>/dev/null)
                
                echo "#### Node Security Group: $sg_id ($sg_name)" >> "$summary_file"
                echo "" >> "$summary_file"
                echo "**Description:** $sg_description" >> "$summary_file"
                
                # Find which node groups use this security group
                local using_nodegroups=""
                local using_asgs=""
                if [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
                    # Try to find managed node groups first
                    using_nodegroups=$(jq -r --arg sg_id "$sg_id" '
                        .Reservations[].Instances[] | 
                        select(.NetworkInterfaces[].Groups[].GroupId == $sg_id) |
                        .Tags[]? | select(.Key=="eks:nodegroup-name") | .Value' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u | tr '\n' ', ' | sed 's/,$//')
                    
                    # If no managed node groups found, look for ASG names (self-managed)
                    if [[ -z "$using_nodegroups" ]]; then
                        using_asgs=$(jq -r --arg sg_id "$sg_id" '
                            .Reservations[].Instances[] | 
                            select(.NetworkInterfaces[].Groups[].GroupId == $sg_id) |
                            .Tags[]? | select(.Key=="aws:autoscaling:groupName") | .Value' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u | tr '\n' ', ' | sed 's/,$//')
                    fi
                fi
                
                if [[ -n "$using_nodegroups" ]]; then
                    echo "**Used by Managed Node Groups:** $using_nodegroups" >> "$summary_file"
                elif [[ -n "$using_asgs" ]]; then
                    echo "**Used by Self-Managed Node Groups (ASGs):** $using_asgs" >> "$summary_file"
                else
                    echo "**Used by Node Groups:** Unknown" >> "$summary_file"
                fi
                echo "" >> "$summary_file"
                
                # Ingress Rules
                echo "**Ingress Rules:**" >> "$summary_file"
                echo "" >> "$summary_file"
                echo "| Protocol | Port Range | Source | Description |" >> "$summary_file"
                echo "|----------|------------|--------|-------------|" >> "$summary_file"
                
                local ingress_rules=$(jq -r '.SecurityGroups[0].IpPermissions[]? | "\(.IpProtocol)|\(.FromPort // "All")|\(.ToPort // "All")|\(.IpRanges[]?.CidrIp // .UserIdGroupPairs[]?.GroupId // .PrefixListIds[]?.PrefixListId // "N/A")|\(.IpRanges[]?.Description // .UserIdGroupPairs[]?.Description // .PrefixListIds[]?.Description // "N/A")"' "$sg_file" 2>/dev/null)
                
                if [[ -n "$ingress_rules" ]]; then
                    while IFS='|' read -r protocol from_port to_port source description; do
                        local port_range="$from_port"
                        if [[ "$from_port" != "$to_port" && "$to_port" != "All" ]]; then
                            port_range="$from_port-$to_port"
                        fi
                        echo "| $protocol | $port_range | $source | $description |" >> "$summary_file"
                    done <<< "$ingress_rules"
                else
                    echo "| - | - | - | No ingress rules |" >> "$summary_file"
                fi
                
                echo "" >> "$summary_file"
                
                # Egress Rules
                echo "**Egress Rules:**" >> "$summary_file"
                echo "" >> "$summary_file"
                echo "| Protocol | Port Range | Destination | Description |" >> "$summary_file"
                echo "|----------|------------|-------------|-------------|" >> "$summary_file"
                
                local egress_rules=$(jq -r '.SecurityGroups[0].IpPermissionsEgress[]? | "\(.IpProtocol)|\(.FromPort // "All")|\(.ToPort // "All")|\(.IpRanges[]?.CidrIp // .UserIdGroupPairs[]?.GroupId // .PrefixListIds[]?.PrefixListId // "N/A")|\(.IpRanges[]?.Description // .UserIdGroupPairs[]?.Description // .PrefixListIds[]?.Description // "N/A")"' "$sg_file" 2>/dev/null)
                
                if [[ -n "$egress_rules" ]]; then
                    while IFS='|' read -r protocol from_port to_port destination description; do
                        local port_range="$from_port"
                        if [[ "$from_port" != "$to_port" && "$to_port" != "All" ]]; then
                            port_range="$from_port-$to_port"
                        fi
                        echo "| $protocol | $port_range | $destination | $description |" >> "$summary_file"
                    done <<< "$egress_rules"
                else
                    echo "| - | - | - | No egress rules |" >> "$summary_file"
                fi
                
                echo "" >> "$summary_file"
            fi
        done
    else
        echo "No node security group information collected." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
}

# Function to extract managed node groups for summary
extract_managed_nodegroups_summary() {
    local summary_file="$1"
    
    echo "## 2. Managed Node Groups" >> "$summary_file"
    echo "" >> "$summary_file"
    
    if [[ -f "$OUTPUT_DIR/managed-nodegroups-list.json" ]]; then
        local nodegroups=$(jq -r '.nodegroups[]?' "$OUTPUT_DIR/managed-nodegroups-list.json" 2>/dev/null)
        
        if [[ -n "$nodegroups" ]]; then
            echo "| Node Group | Status | Instance Types | Capacity Type | Min/Max/Desired | AMI Type | Node Role | Node Security Groups | Associated ASG |" >> "$summary_file"
            echo "|------------|--------|----------------|---------------|-----------------|----------|-----------|---------------------|----------------|" >> "$summary_file"
            
            while IFS= read -r nodegroup; do
                if [[ -n "$nodegroup" && -f "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" ]]; then
                    local status=$(jq -r '.nodegroup.status // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local instance_types=$(jq -r '.nodegroup.instanceTypes[]?' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                    local capacity_type=$(jq -r '.nodegroup.capacityType // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local min_size=$(jq -r '.nodegroup.scalingConfig.minSize // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local max_size=$(jq -r '.nodegroup.scalingConfig.maxSize // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local desired_size=$(jq -r '.nodegroup.scalingConfig.desiredSize // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local ami_type=$(jq -r '.nodegroup.amiType // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local node_role=$(jq -r '.nodegroup.nodeRole // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null | xargs basename)
                    
                    # Find associated ASG by matching nodegroup name pattern
                    local associated_asg="N/A"
                    if [[ -d "$OUTPUT_DIR/asgs" ]]; then
                        for asg_file in "$OUTPUT_DIR/asgs"/*.json; do
                            if [[ -f "$asg_file" && ! "$asg_file" =~ (policies|launch-template) ]]; then
                                local asg_name=$(basename "$asg_file" .json)
                                if [[ "$asg_name" =~ $nodegroup ]]; then
                                    associated_asg="$asg_name"
                                    break
                                fi
                            fi
                        done
                    fi
                    
                    # Get node security groups for this managed nodegroup
                    local nodegroup_security_groups="N/A"
                    if [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
                        nodegroup_security_groups=$(jq -r --arg nodegroup "$nodegroup" '
                            .Reservations[].Instances[] | 
                            select(.Tags[]? | select(.Key=="eks:nodegroup-name" and .Value==$nodegroup)) |
                            .NetworkInterfaces[].Groups[].GroupId' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u | tr '\n' ', ' | sed 's/,$//')
                        if [[ -z "$nodegroup_security_groups" ]]; then
                            nodegroup_security_groups="N/A"
                        fi
                    fi
                    
                    echo "| $nodegroup | $status | $instance_types | $capacity_type | $min_size/$max_size/$desired_size | $ami_type | $node_role | $nodegroup_security_groups | $associated_asg |" >> "$summary_file"
                fi
            done <<< "$nodegroups"
            echo "" >> "$summary_file"
            
            # Detailed managed node group information
            echo "### Managed Node Groups Details" >> "$summary_file"
            echo "" >> "$summary_file"
            
            while IFS= read -r nodegroup; do
                if [[ -n "$nodegroup" && -f "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" ]]; then
                    echo "#### $nodegroup" >> "$summary_file"
                    echo "" >> "$summary_file"
                    echo "| Property | Value |" >> "$summary_file"
                    echo "|----------|-------|" >> "$summary_file"
                    
                    local node_role_arn=$(jq -r '.nodegroup.nodeRole // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    local node_role_name=$(basename "$node_role_arn" 2>/dev/null || echo "N/A")
                    local subnets=$(jq -r '.nodegroup.subnets[]?' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                    local disk_size=$(jq -r '.nodegroup.diskSize // "N/A"' "$OUTPUT_DIR/managed-nodegroups/$nodegroup.json" 2>/dev/null)
                    
                    # Get security groups from EC2 instances and associated ASG
                    local security_groups="N/A"
                    local associated_asg=""
                    local node_security_groups=""
                    
                    # First, try to get security groups from running EC2 instances
                    if [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
                        node_security_groups=$(jq -r --arg nodegroup "$nodegroup" '
                            .Reservations[].Instances[] | 
                            select(.Tags[]? | select(.Key=="eks:nodegroup-name" and .Value==$nodegroup)) |
                            .NetworkInterfaces[].Groups[].GroupId' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u | tr '\n' ', ' | sed 's/,$//')
                    fi
                    
                    # If no instances found, try ASG launch template
                    if [[ -z "$node_security_groups" && -d "$OUTPUT_DIR/asgs" ]]; then
                        for asg_file in "$OUTPUT_DIR/asgs"/*.json; do
                            if [[ -f "$asg_file" && ! "$asg_file" =~ (policies|launch-template) ]]; then
                                local asg_name=$(basename "$asg_file" .json)
                                if [[ "$asg_name" =~ $nodegroup ]]; then
                                    associated_asg="$asg_name"
                                    local lt_file="$OUTPUT_DIR/asgs/$asg_name-launch-template-version.json"
                                    if [[ -f "$lt_file" ]]; then
                                        node_security_groups=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.SecurityGroupIds[]? // empty' "$lt_file" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                                    fi
                                    break
                                fi
                            fi
                        done
                    fi
                    
                    security_groups="$node_security_groups"
                    
                    echo "| IAM Role | $node_role_name |" >> "$summary_file"
                    echo "| IAM Role ARN | $node_role_arn |" >> "$summary_file"
                    echo "| Subnets | $subnets |" >> "$summary_file"
                    echo "| Disk Size (GB) | $disk_size |" >> "$summary_file"
                    echo "| Security Groups | $security_groups |" >> "$summary_file"
                    echo "| Associated ASG | $associated_asg |" >> "$summary_file"
                    
                    # Get role policies if available
                    if [[ -f "$OUTPUT_DIR/iam/role-$node_role_name-attached-policies.json" ]]; then
                        local attached_policies=$(jq -r '.AttachedPolicies[]?.PolicyName // empty' "$OUTPUT_DIR/iam/role-$node_role_name-attached-policies.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                        echo "| Attached Policies | $attached_policies |" >> "$summary_file"
                    fi
                    
                    echo "" >> "$summary_file"
                fi
            done <<< "$nodegroups"
        else
            echo "No managed node groups found." >> "$summary_file"
            echo "" >> "$summary_file"
        fi
    else
        echo "No managed node groups found." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
}

# Function to extract self-managed nodegroup/ASG information for summary
extract_selfmanaged_nodegroups_summary() {
    local summary_file="$1"
    
    echo "## 3. Self-Managed Node Groups (Auto Scaling Groups)" >> "$summary_file"
    echo "" >> "$summary_file"
    
    if [[ -d "$OUTPUT_DIR/asgs" ]]; then
        # Check if we have any truly self-managed ASGs (not associated with managed node groups)
        local selfmanaged_asgs=()
        local managed_nodegroups=""
        
        # Get list of managed node groups
        if [[ -f "$OUTPUT_DIR/managed-nodegroups-list.json" ]]; then
            managed_nodegroups=$(jq -r '.nodegroups[]?' "$OUTPUT_DIR/managed-nodegroups-list.json" 2>/dev/null)
        fi
        
        # Check each ASG to see if it's associated with a managed node group
        for asg_file in "$OUTPUT_DIR/asgs"/*.json; do
            if [[ -f "$asg_file" && ! "$asg_file" =~ (policies|launch-template) ]]; then
                local asg_name=$(basename "$asg_file" .json)
                local is_managed=false
                
                # Check if this ASG is associated with any managed node group
                while IFS= read -r nodegroup; do
                    if [[ -n "$nodegroup" && "$asg_name" =~ $nodegroup ]]; then
                        is_managed=true
                        break
                    fi
                done <<< "$managed_nodegroups"
                
                # If not associated with managed node group, it's truly self-managed
                if [[ "$is_managed" == false ]]; then
                    selfmanaged_asgs+=("$asg_name")
                fi
            fi
        done
        
        if [[ ${#selfmanaged_asgs[@]} -gt 0 ]]; then
            echo "| ASG Name | Min/Max/Desired | Instance Type | IAM Role | Node Security Groups |" >> "$summary_file"
            echo "|----------|-----------------|---------------|----------|---------------------|" >> "$summary_file"
            
            for asg_name in "${selfmanaged_asgs[@]}"; do
                local asg_file="$OUTPUT_DIR/asgs/$asg_name.json"
                
                # Get ASG scaling info
                local min_size=$(jq -r '.AutoScalingGroups[0].MinSize // "N/A"' "$asg_file" 2>/dev/null)
                local max_size=$(jq -r '.AutoScalingGroups[0].MaxSize // "N/A"' "$asg_file" 2>/dev/null)
                local desired_size=$(jq -r '.AutoScalingGroups[0].DesiredCapacity // "N/A"' "$asg_file" 2>/dev/null)
                
                # Get instance type, role, and security groups
                local lt_file="$OUTPUT_DIR/asgs/$asg_name-launch-template-version.json"
                local instance_type="N/A"
                local instance_profile="N/A"
                local sg_ids="N/A"
                
                # Try launch template first
                if [[ -f "$lt_file" ]]; then
                    instance_type=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.InstanceType // "N/A"' "$lt_file" 2>/dev/null)
                    instance_profile=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.IamInstanceProfile?.Name // "N/A"' "$lt_file" 2>/dev/null)
                    sg_ids=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.SecurityGroupIds[]? // empty' "$lt_file" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                fi
                
                # If no security groups from launch template, try to get from running instances
                if [[ "$sg_ids" == "N/A" || -z "$sg_ids" ]] && [[ -f "$OUTPUT_DIR/cluster-instances.json" ]]; then
                    sg_ids=$(jq -r --arg asg_name "$asg_name" '
                        .Reservations[].Instances[] | 
                        select(.Tags[]? | select(.Key=="aws:autoscaling:groupName" and .Value==$asg_name)) |
                        .NetworkInterfaces[].Groups[].GroupId' "$OUTPUT_DIR/cluster-instances.json" 2>/dev/null | sort -u | tr '\n' ', ' | sed 's/,$//')
                    if [[ -z "$sg_ids" ]]; then
                        sg_ids="N/A"
                    fi
                fi
                
                # Get IAM role from instance profile
                local role_name="N/A"
                if [[ -f "$OUTPUT_DIR/iam/instance-profile-$instance_profile.json" ]]; then
                    role_name=$(jq -r '.InstanceProfile.Roles[0]?.RoleName // "N/A"' "$OUTPUT_DIR/iam/instance-profile-$instance_profile.json" 2>/dev/null)
                fi
                
                echo "| $asg_name | $min_size/$max_size/$desired_size | $instance_type | $role_name | $sg_ids |" >> "$summary_file"
            done
            echo "" >> "$summary_file"
            
            # Detailed self-managed node group information
            echo "### Self-Managed Node Groups Details" >> "$summary_file"
            echo "" >> "$summary_file"
            
            for asg_name in "${selfmanaged_asgs[@]}"; do
                echo "#### $asg_name" >> "$summary_file"
                echo "" >> "$summary_file"
                echo "| Property | Value |" >> "$summary_file"
                echo "|----------|-------|" >> "$summary_file"
                
                # Get instance profile and role from launch template
                local lt_file="$OUTPUT_DIR/asgs/$asg_name-launch-template-version.json"
                if [[ -f "$lt_file" ]]; then
                    local instance_profile=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.IamInstanceProfile?.Name // "N/A"' "$lt_file" 2>/dev/null)
                    local sg_ids=$(jq -r '.LaunchTemplateVersions[0]?.LaunchTemplateData?.SecurityGroupIds[]? // empty' "$lt_file" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                    
                    echo "| Instance Profile | $instance_profile |" >> "$summary_file"
                    echo "| Security Groups | $sg_ids |" >> "$summary_file"
                    
                    # Get IAM role from instance profile
                    if [[ -f "$OUTPUT_DIR/iam/instance-profile-$instance_profile.json" ]]; then
                        local role_name=$(jq -r '.InstanceProfile.Roles[0]?.RoleName // "N/A"' "$OUTPUT_DIR/iam/instance-profile-$instance_profile.json" 2>/dev/null)
                        local role_arn=$(jq -r '.InstanceProfile.Roles[0]?.Arn // "N/A"' "$OUTPUT_DIR/iam/instance-profile-$instance_profile.json" 2>/dev/null)
                        echo "| IAM Role | $role_name |" >> "$summary_file"
                        echo "| IAM Role ARN | $role_arn |" >> "$summary_file"
                        
                        # Role policies
                        if [[ -f "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" ]]; then
                            local attached_policies=$(jq -r '.AttachedPolicies[]?.PolicyName // empty' "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                            echo "| Attached Policies | $attached_policies |" >> "$summary_file"
                        fi
                    fi
                fi
                echo "" >> "$summary_file"
            done
        else
            echo "No truly self-managed node groups found. All ASGs are associated with managed node groups." >> "$summary_file"
            echo "" >> "$summary_file"
        fi
    else
        echo "No Auto Scaling Groups found." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
}

# Function to extract add-on information for summary
extract_addon_summary() {
    local summary_file="$1"
    
    echo "## 4. EKS Add-ons" >> "$summary_file"
    echo "" >> "$summary_file"
    
    if [[ -d "$OUTPUT_DIR/addons" ]]; then
        echo "| Add-on Name | Version | Status | Service Account Role | Policies | Configuration |" >> "$summary_file"
        echo "|-------------|---------|--------|---------------------|----------|---------------|" >> "$summary_file"
        
        for addon_file in "$OUTPUT_DIR/addons"/*.json; do
            if [[ -f "$addon_file" && ! "$addon_file" =~ configuration-schema ]]; then
                local addon_name=$(basename "$addon_file" .json)
                local addon_version=$(jq -r '.addon.addonVersion // "N/A"' "$addon_file" 2>/dev/null)
                local addon_status=$(jq -r '.addon.status // "N/A"' "$addon_file" 2>/dev/null)
                local service_account_role=$(jq -r '.addon.serviceAccountRoleArn // "N/A"' "$addon_file" 2>/dev/null)
                
                local role_name="None"
                local policies="N/A"
                local custom_config="Default"
                
                if [[ "$service_account_role" != "N/A" && "$service_account_role" != "null" ]]; then
                    role_name=$(basename "$service_account_role" 2>/dev/null || echo "N/A")
                    
                    # Get role policies if available
                    if [[ -f "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" ]]; then
                        policies=$(jq -r '.AttachedPolicies[]?.PolicyName // empty' "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                    fi
                fi
                
                # Check for custom configuration
                if [[ -f "$OUTPUT_DIR/addon-configs/$addon_name-current-config.json" ]]; then
                    local config_note=$(jq -r '.note // empty' "$OUTPUT_DIR/addon-configs/$addon_name-current-config.json" 2>/dev/null)
                    if [[ -z "$config_note" ]]; then
                        custom_config="Custom"
                    fi
                fi
                
                echo "| $addon_name | $addon_version | $addon_status | $role_name | $policies | $custom_config |" >> "$summary_file"
            fi
        done
        echo "" >> "$summary_file"
        
        # Add detailed add-on configuration section
        echo "### Add-on Configuration Details" >> "$summary_file"
        echo "" >> "$summary_file"
        
        for addon_file in "$OUTPUT_DIR/addons"/*.json; do
            if [[ -f "$addon_file" && ! "$addon_file" =~ configuration-schema ]]; then
                local addon_name=$(basename "$addon_file" .json)
                echo "#### $addon_name" >> "$summary_file"
                echo "" >> "$summary_file"
                echo "| Property | Value |" >> "$summary_file"
                echo "|----------|-------|" >> "$summary_file"
                
                local addon_version=$(jq -r '.addon.addonVersion // "N/A"' "$addon_file" 2>/dev/null)
                local addon_status=$(jq -r '.addon.status // "N/A"' "$addon_file" 2>/dev/null)
                local created_at=$(jq -r '.addon.createdAt // "N/A"' "$addon_file" 2>/dev/null)
                local modified_at=$(jq -r '.addon.modifiedAt // "N/A"' "$addon_file" 2>/dev/null)
                local service_account_role=$(jq -r '.addon.serviceAccountRoleArn // "N/A"' "$addon_file" 2>/dev/null)
                local resolve_conflicts=$(jq -r '.addon.resolveConflicts // "N/A"' "$addon_file" 2>/dev/null)
                
                echo "| Version | $addon_version |" >> "$summary_file"
                echo "| Status | $addon_status |" >> "$summary_file"
                echo "| Created At | $created_at |" >> "$summary_file"
                echo "| Modified At | $modified_at |" >> "$summary_file"
                echo "| Resolve Conflicts | $resolve_conflicts |" >> "$summary_file"
                
                if [[ "$service_account_role" != "N/A" && "$service_account_role" != "null" ]]; then
                    echo "| Service Account Role | $service_account_role |" >> "$summary_file"
                else
                    echo "| Service Account Role | None configured |" >> "$summary_file"
                fi
                
                # Check if custom configuration exists
                local config_status="Default (no custom config)"
                if [[ -f "$OUTPUT_DIR/addon-configs/$addon_name-current-config.json" ]]; then
                    local config_note=$(jq -r '.note // empty' "$OUTPUT_DIR/addon-configs/$addon_name-current-config.json" 2>/dev/null)
                    if [[ -z "$config_note" ]]; then
                        config_status="Custom configuration applied"
                    fi
                fi
                echo "| Configuration | $config_status |" >> "$summary_file"
                
                # Add available versions info
                if [[ -f "$OUTPUT_DIR/addon-configs/$addon_name-available-versions.json" ]]; then
                    local available_versions=$(jq -r '.addons[0].addonVersions[]?.addonVersion // empty' "$OUTPUT_DIR/addon-configs/$addon_name-available-versions.json" 2>/dev/null | head -3 | tr '\n' ', ' | sed 's/,$//')
                    if [[ -n "$available_versions" ]]; then
                        echo "| Available Versions | $available_versions |" >> "$summary_file"
                    fi
                fi
                
                echo "" >> "$summary_file"
            fi
        done
    else
        echo "No add-ons found or add-on information not collected." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
}

# Function to extract deployment information for summary
extract_deployment_summary() {
    local summary_file="$1"
    
    echo "## 5. Kubernetes Deployments (Service Accounts & IRSA)" >> "$summary_file"
    echo "" >> "$summary_file"
    
    if [[ -f "$OUTPUT_DIR/kubernetes/nodes.yaml" ]]; then
        echo "**Note:** Deployment-level IAM roles and security groups require additional Kubernetes API access." >> "$summary_file"
        echo "To collect this information, run the following commands:" >> "$summary_file"
        echo "" >> "$summary_file"
        echo '```bash' >> "$summary_file"
        echo "# Get all service accounts with annotations (IRSA roles)" >> "$summary_file"
        echo 'kubectl get serviceaccounts --all-namespaces -o json | jq -r '"'"'.items[] | select(.metadata.annotations["eks.amazonaws.com/role-arn"]) | "\(.metadata.namespace)/\(.metadata.name): \(.metadata.annotations["eks.amazonaws.com/role-arn"])"'"'"'' >> "$summary_file"
        echo "" >> "$summary_file"
        echo "# Get deployments and their service accounts" >> "$summary_file"
        echo 'kubectl get deployments --all-namespaces -o json | jq -r '"'"'.items[] | "\(.metadata.namespace)/\(.metadata.name): \(.spec.template.spec.serviceAccountName // "default")"'"'"'' >> "$summary_file"
        echo "" >> "$summary_file"
        echo "# Get network policies (security group equivalent)" >> "$summary_file"
        echo "kubectl get networkpolicies --all-namespaces" >> "$summary_file"
        echo '```' >> "$summary_file"
        echo "" >> "$summary_file"
        
        # Extract deployment info first
        if [[ -f "$OUTPUT_DIR/kubernetes/deployments.json" ]]; then
            echo "### Deployments and Service Accounts" >> "$summary_file"
            echo "" >> "$summary_file"
            
            local deployment_info=$(jq -r '.items[] | "\(.metadata.namespace)|\(.metadata.name)|\(.spec.template.spec.serviceAccountName // "default")"' "$OUTPUT_DIR/kubernetes/deployments.json" 2>/dev/null)
            
            if [[ -n "$deployment_info" ]]; then
                echo "| Namespace | Deployment | Service Account |" >> "$summary_file"
                echo "|-----------|------------|-----------------|" >> "$summary_file"
                
                while IFS= read -r deploy_info; do
                    local namespace=$(echo "$deploy_info" | cut -d'|' -f1)
                    local deployment=$(echo "$deploy_info" | cut -d'|' -f2)
                    local sa=$(echo "$deploy_info" | cut -d'|' -f3)
                    echo "| $namespace | $deployment | $sa |" >> "$summary_file"
                done <<< "$deployment_info"
            else
                echo "No deployments found." >> "$summary_file"
            fi
            echo "" >> "$summary_file"
        fi
        
        # Extract IRSA info second
        if [[ -f "$OUTPUT_DIR/kubernetes/serviceaccounts.json" ]]; then
            echo "### Service Accounts with IRSA Roles" >> "$summary_file"
            echo "" >> "$summary_file"
            
            local irsa_accounts=$(jq -r '.items[] | select(.metadata.annotations["eks.amazonaws.com/role-arn"]) | "\(.metadata.namespace)|\(.metadata.name)|\(.metadata.annotations["eks.amazonaws.com/role-arn"])"' "$OUTPUT_DIR/kubernetes/serviceaccounts.json" 2>/dev/null)
            
            if [[ -n "$irsa_accounts" ]]; then
                echo "| Namespace | Service Account | IAM Role ARN | Policies |" >> "$summary_file"
                echo "|-----------|-----------------|--------------|----------|" >> "$summary_file"
                
                while IFS= read -r sa_info; do
                    local namespace=$(echo "$sa_info" | cut -d'|' -f1)
                    local sa_name=$(echo "$sa_info" | cut -d'|' -f2)
                    local role_arn=$(echo "$sa_info" | cut -d'|' -f3)
                    local role_name=$(basename "$role_arn" 2>/dev/null)
                    
                    local policies="N/A"
                    if [[ -f "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" ]]; then
                        policies=$(jq -r '.AttachedPolicies[]?.PolicyName // empty' "$OUTPUT_DIR/iam/role-$role_name-attached-policies.json" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
                    fi
                    
                    echo "| $namespace | $sa_name | $role_arn | $policies |" >> "$summary_file"
                done <<< "$irsa_accounts"
            else
                echo "No service accounts with IRSA roles found." >> "$summary_file"
            fi
            echo "" >> "$summary_file"
        fi
        
        # Network policies info
        if [[ -f "$OUTPUT_DIR/kubernetes/networkpolicies.json" ]]; then
            echo "### Network Policies" >> "$summary_file"
            
            local network_policies=$(jq -r '.items[] | "\(.metadata.namespace)/\(.metadata.name)"' "$OUTPUT_DIR/kubernetes/networkpolicies.json" 2>/dev/null)
            
            if [[ -n "$network_policies" ]]; then
                while IFS= read -r np_info; do
                    echo "- $np_info" >> "$summary_file"
                done <<< "$network_policies"
            else
                echo "No network policies found." >> "$summary_file"
            fi
            echo "" >> "$summary_file"
        fi
    else
        echo "Kubernetes information not available. Ensure kubectl is configured and accessible." >> "$summary_file"
    fi
    echo "" >> "$summary_file"
}

# Function to extract add-on resources and CRDs for summary
extract_addon_resources_summary() {
    local summary_file="$1"
    
    echo "## 6. Add-on Kubernetes Resources & CRDs" >> "$summary_file"
    echo "" >> "$summary_file"
    
    # Add-on Kubernetes Resources
    if [[ -d "$OUTPUT_DIR/addon-k8s-resources" ]]; then
        echo "### Add-on Kubernetes Resources" >> "$summary_file"
        echo "" >> "$summary_file"
        echo "| Add-on | Resource Type | File |" >> "$summary_file"
        echo "|--------|---------------|------|" >> "$summary_file"
        
        # List collected add-on resources
        for file in "$OUTPUT_DIR/addon-k8s-resources"/*.yaml; do
            if [[ -f "$file" ]]; then
                local filename=$(basename "$file")
                local addon=""
                local resource_type=""
                
                case "$filename" in
                    vpc-cni-*)
                        addon="VPC-CNI"
                        resource_type=$(echo "$filename" | sed 's/vpc-cni-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                    coredns-*)
                        addon="CoreDNS"
                        resource_type=$(echo "$filename" | sed 's/coredns-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                    kube-proxy-*)
                        addon="Kube-proxy"
                        resource_type=$(echo "$filename" | sed 's/kube-proxy-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                    ebs-csi-*)
                        addon="EBS CSI Driver"
                        resource_type=$(echo "$filename" | sed 's/ebs-csi-.*-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                    efs-csi-*)
                        addon="EFS CSI Driver"
                        resource_type=$(echo "$filename" | sed 's/efs-csi-.*-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                    s3-csi-*)
                        addon="S3 CSI Driver"
                        resource_type=$(echo "$filename" | sed 's/s3-csi-.*-//; s/.yaml//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                        ;;
                esac
                
                if [[ -n "$addon" ]]; then
                    echo "| $addon | $resource_type | addon-k8s-resources/$filename |" >> "$summary_file"
                fi
            fi
        done
        echo "" >> "$summary_file"
    else
        echo "### Add-on Kubernetes Resources" >> "$summary_file"
        echo "No add-on Kubernetes resources collected (kubectl not available or no resources found)." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
    
    # CRDs
    if [[ -d "$OUTPUT_DIR/crds" ]]; then
        echo "### Custom Resource Definitions (CRDs)" >> "$summary_file"
        echo "" >> "$summary_file"
        
        if [[ -f "$OUTPUT_DIR/crds/all-crds.yaml" ]]; then
            local crd_count=$(kubectl get crd --no-headers 2>/dev/null | wc -l || echo "0")
            echo "**Total CRDs in cluster:** $crd_count" >> "$summary_file"
            echo "" >> "$summary_file"
        fi
        
        echo "| Category | CRD Name | File |" >> "$summary_file"
        echo "|----------|----------|------|" >> "$summary_file"
        
        # List CSI-related CRDs
        for file in "$OUTPUT_DIR/crds"/*.storage.k8s.io.yaml "$OUTPUT_DIR/crds"/*.snapshot.storage.k8s.io.yaml; do
            if [[ -f "$file" ]]; then
                local filename=$(basename "$file")
                local crd_name=$(echo "$filename" | sed 's/.yaml$//')
                echo "| CSI Storage | $crd_name | crds/$filename |" >> "$summary_file"
            fi
        done
        
        # List AWS-related CRDs
        for file in "$OUTPUT_DIR/crds"/*aws*.yaml "$OUTPUT_DIR/crds"/*elbv2*.yaml; do
            if [[ -f "$file" ]]; then
                local filename=$(basename "$file")
                local crd_name=$(echo "$filename" | sed 's/.yaml$//')
                echo "| AWS Services | $crd_name | crds/$filename |" >> "$summary_file"
            fi
        done
        
        echo "" >> "$summary_file"
    else
        echo "### Custom Resource Definitions (CRDs)" >> "$summary_file"
        echo "No CRDs collected (kubectl not available)." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
    
    # Custom Resources
    if [[ -d "$OUTPUT_DIR/custom-resources" ]]; then
        echo "### Custom Resources" >> "$summary_file"
        echo "" >> "$summary_file"
        echo "| Resource Type | File | Description |" >> "$summary_file"
        echo "|---------------|------|-------------|" >> "$summary_file"
        
        for file in "$OUTPUT_DIR/custom-resources"/*.yaml; do
            if [[ -f "$file" ]]; then
                local filename=$(basename "$file")
                local resource_type=$(echo "$filename" | sed 's/.yaml$//' | tr '-' ' ' | sed 's/\b\w/\U&/g')
                local description=""
                
                case "$filename" in
                    csidrivers.yaml) description="CSI driver configurations" ;;
                    csinodes.yaml) description="CSI node information" ;;
                    volumesnapshotclasses.yaml) description="Volume snapshot class definitions" ;;
                    volumesnapshots.yaml) description="Existing volume snapshots" ;;
                    targetgroupbindings.yaml) description="ALB target group bindings" ;;
                    ingressclassparams.yaml) description="ALB ingress class parameters" ;;
                    *) description="Custom resource instances" ;;
                esac
                
                echo "| $resource_type | custom-resources/$filename | $description |" >> "$summary_file"
            fi
        done
        echo "" >> "$summary_file"
    else
        echo "### Custom Resources" >> "$summary_file"
        echo "No custom resources collected (kubectl not available or no resources found)." >> "$summary_file"
        echo "" >> "$summary_file"
    fi
    
    echo "**Note:** These resources contain the actual runtime configurations of add-ons, including custom environment variables, resource limits, and ConfigMap settings that may differ from EKS managed add-on defaults." >> "$summary_file"
    echo "" >> "$summary_file"
}

# Function to generate summary report
generate_summary() {
    print_info "=== Generating Detailed Summary Report ==="
    
    local summary_file="$OUTPUT_DIR/SUMMARY.md"
    
    cat > "$summary_file" << EOF
# EKS Cluster Configuration Summary

**Cluster Name:** $CLUSTER_NAME  
**Region:** $REGION  
**Collection Date:** $(date)  
**AWS Profile:** ${PROFILE:-default}

---

EOF

    # Extract detailed information
    extract_cluster_summary "$summary_file"
    extract_managed_nodegroups_summary "$summary_file"
    extract_selfmanaged_nodegroups_summary "$summary_file"
    extract_addon_summary "$summary_file"
    extract_deployment_summary "$summary_file"
    extract_addon_resources_summary "$summary_file"
    
    cat >> "$summary_file" << EOF
---

## Next Steps

1. Review the collected configurations above
2. Document any custom settings or modifications
3. Use this information for cluster upgrades or migrations
4. Store this backup in a secure location
5. Review add-on Kubernetes resources in addon-k8s-resources/ for runtime configurations
6. Check CRDs and custom resources for extended cluster functionality
7. For deployment-level information, run the kubectl commands provided in section 5

## Collected Files Structure

\`\`\`
eks-nodegroup-config-YYYYMMDD-HHMMSS/
├── SUMMARY.md                     # This comprehensive summary
├── cluster-details.json          # EKS cluster configuration
├── cluster-addons.json           # Managed add-ons list
├── addons/                       # EKS managed add-on details
├── addon-configs/                # Add-on configuration schemas
├── addon-k8s-resources/          # Add-on Kubernetes resources (NEW)
├── crds/                         # Custom Resource Definitions (NEW)
├── custom-resources/             # Custom resource instances (NEW)
├── managed-nodegroups/           # Managed node group configs
├── asgs/                         # Auto Scaling Group configs
├── security-groups/              # Security group rules
├── iam/                          # IAM roles and policies
└── kubernetes/                   # Kubernetes cluster resources
\`\`\`

## Commands Used

All data was collected using AWS CLI commands with the following options:
\`\`\`
Region: $REGION
Profile: ${PROFILE:-default}
\`\`\`
EOF

    print_success "✓ Detailed summary report saved to SUMMARY.md"
}

# Main execution
main() {
    print_info "Starting EKS self-managed node group configuration collection"
    print_info "Cluster: $CLUSTER_NAME"
    print_info "Region: $REGION"
    print_info "Profile: ${PROFILE:-default}"
    print_info "Performance mode: Optimized for large clusters (use --detailed-nodes for full collection)"
    
    # Test AWS CLI access
    print_info "Testing AWS CLI access..."
    if ! aws sts get-caller-identity $AWS_CLI_OPTS &>/dev/null; then
        print_error "Cannot access AWS CLI. Please check your credentials and permissions."
        exit 1
    fi
    print_success "✓ AWS CLI access confirmed"
    
    # Collect all information
    collect_cluster_info
    collect_addon_configurations
    collect_addon_kubernetes_resources
    collect_crds
    collect_custom_resources
    collect_managed_nodegroups
    collect_asg_info
    collect_ec2_info
    collect_iam_info
    collect_k8s_info
    collect_irsa_iam_info
    generate_summary
    
    print_success "Configuration collection completed!"
    print_info "All files saved to: $OUTPUT_DIR"
    print_info "Review the SUMMARY.md file for an overview of collected data"
}

# Run main function
main "$@"
