#!/bin/bash
# NSEMSolver.jl Supercomputer Job Submission Scripts
# Supports multiple HPC systems with optimized configurations

# Function to detect HPC system
detect_hpc_system() {
    if [[ $(hostname) == *"frontera"* ]]; then
        echo "frontera"
    elif [[ $(hostname) == *"stampede"* ]]; then
        echo "stampede"
    elif [[ $(hostname) == *"perlmutter"* ]]; then
        echo "perlmutter"
    elif [[ $(hostname) == *"summit"* ]]; then
        echo "summit"
    elif [[ $(hostname) == *"cori"* ]]; then
        echo "cori"
    else
        echo "generic"
    fi
}

HPC_SYSTEM=$(detect_hpc_system)
echo "Detected HPC system: $HPC_SYSTEM"

# Function to submit Frontera jobs (TACC)
submit_frontera_jobs() {
    echo "Submitting jobs for Frontera (TACC)..."
    
    # Small scaling study (development queue)
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-dev
#SBATCH -o nsem-dev.%j
#SBATCH -e nsem-dev.%j
#SBATCH -p development
#SBATCH -N 4
#SBATCH --ntasks-per-node=56
#SBATCH -t 00:30:00
#SBATCH -A your_allocation

module load intel/19.1.1
module load impi/19.0.9
module load julia/1.9.0

export I_MPI_PIN_DOMAIN=omp
export OMP_NUM_THREADS=1

srun -N 4 -n 224 julia --project=. examples/hpc/hpc_scaling_demo.jl
EOF

    # Large production scaling study
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-large
#SBATCH -o nsem-large.%j
#SBATCH -e nsem-large.%j
#SBATCH -p normal
#SBATCH -N 32
#SBATCH --ntasks-per-node=56
#SBATCH -t 04:00:00
#SBATCH -A your_allocation

module load intel/19.1.1
module load impi/19.0.9
module load julia/1.9.0

export I_MPI_PIN_DOMAIN=omp
export OMP_NUM_THREADS=1

# Strong scaling study
for nodes in 4 8 16 32; do
    ntasks=$((nodes * 56))
    echo "Running with $nodes nodes ($ntasks tasks)..."
    srun -N $nodes -n $ntasks julia --project=. examples/hpc/hpc_scaling_demo.jl --problem-size=64
done

# Thermal convection at scale
srun -N 32 -n 1792 julia --project=. examples/hpc/parallel_thermal_convection.jl
EOF
}

# Function to submit Perlmutter jobs (NERSC)
submit_perlmutter_jobs() {
    echo "Submitting jobs for Perlmutter (NERSC)..."
    
    # CPU-based scaling study
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-cpu
#SBATCH -o nsem-cpu.%j
#SBATCH -e nsem-cpu.%j
#SBATCH -q regular
#SBATCH -C cpu
#SBATCH -N 16
#SBATCH --ntasks-per-node=128
#SBATCH -t 02:00:00
#SBATCH -A your_repo

module load julia/1.9.0
module load cray-mpich/8.1.25

export JULIA_MPI_BINARY=system
export SLURM_CPU_BIND=cores

srun -N 16 -n 2048 julia --project=. examples/hpc/distributed_turbulent_flow.jl
EOF

    # GPU-accelerated version (if available)
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-gpu
#SBATCH -o nsem-gpu.%j
#SBATCH -e nsem-gpu.%j
#SBATCH -q regular
#SBATCH -C gpu
#SBATCH -N 4
#SBATCH --ntasks-per-node=4
#SBATCH --gpus-per-task=1
#SBATCH -t 01:00:00
#SBATCH -A your_repo

module load julia/1.9.0
module load cudatoolkit/11.7
module load cray-mpich/8.1.25

export JULIA_CUDA_USE_BINARYBUILDER=false
export SLURM_CPU_BIND=cores

srun -N 4 -n 16 julia --project=. examples/hpc/hpc_scaling_demo.jl --use-gpu
EOF
}

# Function to submit Summit jobs (ORNL)
submit_summit_jobs() {
    echo "Submitting jobs for Summit (ORNL)..."
    
    bsub << 'EOF'
#!/bin/bash
#BSUB -P your_project
#BSUB -W 02:00
#BSUB -nnodes 16
#BSUB -J nsem-summit
#BSUB -o nsem-summit.%J
#BSUB -e nsem-summit.%J

module load julia/1.9.0
module load spectrum-mpi/10.4.0

export OMPI_MCA_btl=^openib
export JULIA_MPI_BINARY=system

# Summit has 6 cores per GPU, 42 cores per node
jsrun -n 672 -a 1 -c 1 -r 42 julia --project=. examples/hpc/hpc_scaling_demo.jl

# GPU version with multiple GPUs per node
jsrun -n 96 -a 1 -c 7 -g 1 -r 6 julia --project=. examples/hpc/hpc_scaling_demo.jl --use-gpu
EOF
}

# Function to submit Stampede2 jobs (TACC)
submit_stampede_jobs() {
    echo "Submitting jobs for Stampede2 (TACC)..."
    
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-stampede
#SBATCH -o nsem-stampede.%j
#SBATCH -e nsem-stampede.%j
#SBATCH -p normal
#SBATCH -N 8
#SBATCH -n 384
#SBATCH -t 02:00:00
#SBATCH -A your_allocation

module load intel/18.0.5
module load impi/18.0.5
module load julia/1.9.0

export I_MPI_PIN_DOMAIN=omp

# Scaling study on Stampede2 KNL nodes
srun -N 8 -n 384 julia --project=. examples/hpc/hpc_scaling_demo.jl
EOF
}

# Function to submit generic cluster jobs
submit_generic_jobs() {
    echo "Submitting generic cluster jobs..."
    
    sbatch << 'EOF'
#!/bin/bash
#SBATCH -J nsem-generic
#SBATCH -o nsem-generic.%j
#SBATCH -e nsem-generic.%j
#SBATCH -N 4
#SBATCH -n 128
#SBATCH -t 01:00:00

# Load modules (adjust for your system)
module load julia
module load openmpi

export JULIA_MPI_BINARY=system

srun julia --project=. examples/hpc/hpc_scaling_demo.jl
EOF
}

# Main execution
case $HPC_SYSTEM in
    "frontera")
        submit_frontera_jobs
        ;;
    "perlmutter")
        submit_perlmutter_jobs
        ;;
    "summit")
        submit_summit_jobs
        ;;
    "stampede")
        submit_stampede_jobs
        ;;
    "cori")
        # Similar to Perlmutter but different modules
        echo "Submitting Cori jobs..."
        submit_perlmutter_jobs  # Use similar configuration
        ;;
    *)
        submit_generic_jobs
        ;;
esac

echo "Jobs submitted for $HPC_SYSTEM"
echo
echo "Monitor job status with:"
case $HPC_SYSTEM in
    "summit")
        echo "  bjobs"
        ;;
    *)
        echo "  squeue -u \$USER"
        ;;
esac

echo
echo "Scaling Study Job Submission Guidelines:"
echo "========================================"
echo "1. Adjust allocation names/project codes before submitting"
echo "2. Modify time limits based on problem size"
echo "3. Check queue limits and policies for your system"
echo "4. Monitor resource usage: memory, CPU, network"
echo "5. Use development queues for testing, production for large runs"
echo
echo "Performance Tips:"
echo "- Start with small runs to validate setup"
echo "- Use 1000-10000 DOFs per MPI process for good efficiency"
echo "- Monitor communication patterns and load balancing"
echo "- Enable all compiler optimizations in production runs"
echo
echo "For help with specific HPC systems, consult:"
echo "  Frontera: https://frontera-portal.tacc.utexas.edu/"
echo "  Perlmutter: https://docs.nersc.gov/systems/perlmutter/"
echo "  Summit: https://docs.olcf.ornl.gov/systems/summit_user_guide.html"