cluster_name: {{cluster_name_on_cloud}}

# The maximum number of workers nodes to launch in addition to the head node.
max_workers: {{num_nodes - 1}}
upscaling_speed: {{num_nodes - 1}}
idle_timeout_minutes: 60

provider:
  type: external
  module: sky.provision.slurm

  cluster: {{slurm_cluster}}
  partition: {{slurm_partition}}

  ssh:
    hostname: {{ssh_hostname}}
    port: {{ssh_port}}
    user: {{ssh_user}}
    private_key: {{slurm_private_key}}
{% if slurm_proxy_command is not none %}
    proxycommand: {{slurm_proxy_command | tojson }}
{% endif %}

auth:
  ssh_user: {{ssh_user}}
  # TODO(jwj): Modify this tmp workaround.
  # ssh_private_key: {{ssh_private_key}}
  ssh_private_key: {{slurm_private_key}}
  ssh_proxy_command: {{slurm_proxy_command | tojson }}

available_node_types:
  ray_head_default:
    resources: {}
    node_config:
      # From clouds/slurm.py::Slurm.make_deploy_resources_variables.
      instance_type: {{instance_type}}
      disk_size: {{disk_size}}
      cpus: {{cpus}}
      memory: {{memory}}
      accelerator_type: {{accelerator_type}}
      accelerator_count: {{accelerator_count}}

      # TODO: more configs that is required by the provisioner to create new
      # instances on the FluffyCloud:
      # sky/provision/fluffycloud/instance.py::run_instances

head_node_type: ray_head_default

# Format: `REMOTE_PATH : LOCAL_PATH`
file_mounts: {
  "{{sky_ray_yaml_remote_path}}": "{{sky_ray_yaml_local_path}}",
  "{{sky_remote_path}}/{{sky_wheel_hash}}": "{{sky_local_path}}",
{%- for remote_path, local_path in credentials.items() %}
  "{{remote_path}}": "{{local_path}}",
{%- endfor %}
}

rsync_exclude: []

initialization_commands: []

# List of shell commands to run to set up nodes.
# NOTE: these are very performance-sensitive. Each new item opens/closes an SSH
# connection, which is expensive. Try your best to co-locate commands into fewer
# items!
#
# Increment the following for catching performance bugs easier:
#   current num items (num SSH connections): 1
setup_commands:
  # Disable `unattended-upgrades` to prevent apt-get from hanging. It should be called at the beginning before the process started to avoid being blocked. (This is a temporary fix.)
  # Create ~/.ssh/config file in case the file does not exist in the image.
  # Line 'rm ..': there is another installation of pip.
  # Line 'sudo bash ..': set the ulimit as suggested by ray docs for performance. https://docs.ray.io/en/latest/cluster/vms/user-guides/large-cluster-best-practices.html#system-configuration
  # Line 'sudo grep ..': set the number of threads per process to unlimited to avoid ray job submit stucking issue when the number of running ray jobs increase.
  # Line 'mkdir -p ..': disable host key check
  - {%- for initial_setup_command in initial_setup_commands %}
    {{ initial_setup_command }}
    {%- endfor %}
    sudo systemctl stop unattended-upgrades || true;
    sudo systemctl disable unattended-upgrades || true;
    sudo sed -i 's/Unattended-Upgrade "1"/Unattended-Upgrade "0"/g' /etc/apt/apt.conf.d/20auto-upgrades || true;
    sudo kill -9 `sudo lsof /var/lib/dpkg/lock-frontend | awk '{print $2}' | tail -n 1` || true;
    sudo pkill -9 apt-get;
    sudo pkill -9 dpkg;
    sudo dpkg --configure -a;
    mkdir -p ~/.ssh; touch ~/.ssh/config;
    {{ setup_sky_dirs_commands }}
    {{ conda_installation_commands }}
    {{ skypilot_wheel_installation_commands }}

head_node: {}
worker_nodes: {}

# These fields are required for external cloud providers.
head_setup_commands: []
worker_setup_commands: []
cluster_synced_files: []
file_mounts_sync_continuously: False
